repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
rlmeta | rlmeta-main/tests/data/segment_tree_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import unittest
from math import prod
import numpy as np
import torch
from rlmeta.data import SumSegmentTree
from tests.test_utils import TestCaseBase
class SumSegmentTreeTest(TestCaseBase):
def setUp(self) -> None:
self.size = 100
self.data = torch.randn(self.size)
self.segment_tree = SumSegmentTree(self.size, dtype=np.float32)
self.segment_tree[torch.arange(self.size)] = self.data
self.query_size = (2, 3, 4)
def test_at(self) -> None:
index = torch.randint(self.size, self.query_size)
value = self.segment_tree[index]
self.assert_tensor_equal(value, self.data[index])
value = self.segment_tree.at(index)
self.assert_tensor_equal(value, self.data[index])
value = self.segment_tree[index.numpy()]
self.assert_tensor_equal(value, self.data[index].numpy())
value = self.segment_tree.at(index.numpy())
self.assert_tensor_equal(value, self.data[index].numpy())
def test_update(self) -> None:
weights = torch.ones(self.size)
index = weights.multinomial(prod(self.query_size), replacement=False)
index = index.view(self.query_size)
origin_value = self.segment_tree[index]
value = np.random.randn()
self.segment_tree[index] = value
self.assert_tensor_equal(self.segment_tree[index],
torch.full(self.query_size, value))
self.segment_tree[index] = origin_value
value = np.random.randn()
self.segment_tree.update(index, value)
self.assert_tensor_equal(self.segment_tree[index],
torch.full(self.query_size, value))
self.segment_tree[index] = origin_value
value = torch.randn(self.query_size)
self.segment_tree[index] = value
self.assert_tensor_equal(self.segment_tree[index], value)
self.segment_tree[index] = origin_value
value = torch.randn(self.query_size)
self.segment_tree.update(index, value)
self.assert_tensor_equal(self.segment_tree[index], value)
self.segment_tree[index] = origin_value
def test_masked_update(self) -> None:
weights = torch.ones(self.size)
index = weights.multinomial(prod(self.query_size), replacement=False)
index = index.view(self.query_size)
origin_value = self.segment_tree[index]
mask = torch.randint(2, size=self.query_size, dtype=torch.bool)
value = torch.randn(self.query_size)
self.segment_tree.update(index, value, mask)
self.assert_tensor_equal(self.segment_tree[index],
torch.where(mask, value, origin_value))
self.segment_tree[index] = origin_value
def test_query(self) -> None:
a = torch.randint(self.size, self.query_size)
b = torch.randint(self.size, self.query_size)
l = torch.minimum(a, b)
r = torch.maximum(a, b)
value = self.segment_tree.query(l, r)
l_list = l.view(-1).tolist()
r_list = r.view(-1).tolist()
ret = []
for (x, y) in zip(l_list, r_list):
ret.append(self.data[x:y].sum())
ret = torch.tensor(ret).view(self.query_size)
self.assert_tensor_close(value, ret, rtol=1e-6, atol=1e-6)
def test_pickle(self) -> None:
s = pickle.dumps(self.segment_tree)
t = pickle.loads(s)
self.assert_tensor_equal(t[torch.arange(self.size)], self.data)
for _ in range(10):
l = np.random.randint(self.size)
r = np.random.randint(self.size)
if l > r:
l, r = r, l
ret = t.query(l, r)
ans = self.data[l:r].sum().item()
self.assertAlmostEqual(ret, ans, places=5)
if __name__ == "__main__":
unittest.main()
| 4,036 | 35.044643 | 77 | py |
rlmeta | rlmeta-main/tests/data/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 179 | 35 | 65 | py |
rlmeta | rlmeta-main/tests/ops/discounted_return_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Union
import torch
import rlmeta.ops as ops
from tests.test_utils import TestCaseBase
class DiscountReturnTest(TestCaseBase):
def setUp(self) -> None:
self.rtol = 1e-6
self.atol = 1e-6
def test_discounted_return_with_scalar_gamma(self) -> None:
n = 100
gamma = torch.rand(1).item()
reward = torch.randn(n)
g = ops.discounted_return(reward, gamma)
expected_g = self._discounted_return(reward, gamma)
self.assert_tensor_close(g, expected_g, rtol=self.rtol, atol=self.atol)
reward = torch.randn(n, 1)
g = ops.discounted_return(reward, gamma)
expected_g = self._discounted_return(reward, gamma)
self.assert_tensor_close(g, expected_g, rtol=self.rtol, atol=self.atol)
def test_discounted_return_with_tensor_gamma(self) -> None:
n = 200
reward = torch.randn(n)
gamma = torch.rand(1)
g = ops.discounted_return(reward, gamma)
expected_g = self._discounted_return(reward, gamma)
self.assert_tensor_close(g, expected_g, rtol=self.rtol, atol=self.atol)
reward = torch.randn(n, 1)
gamma = torch.rand(n, 1)
g = ops.discounted_return(reward, gamma)
expected_g = self._discounted_return(reward, gamma)
self.assert_tensor_close(g, expected_g, rtol=self.rtol, atol=self.atol)
def _discounted_return(self, reward: torch.Tensor,
gamma: Union[float, torch.Tensor]) -> torch.Tensor:
n = reward.size(0)
g = torch.zeros(1)
ret = []
for i in range(n - 1, -1, -1):
if isinstance(gamma, float):
gamma_i = gamma
elif gamma.numel() == 1:
gamma_i = gamma.item()
else:
gamma_i = gamma[i].item()
g = reward[i] + gamma_i * g
ret.append(g)
ret = torch.stack(tuple(reversed(ret)))
if reward.dim() == 1:
ret.squeeze_(-1)
return ret
if __name__ == "__main__":
unittest.main()
| 2,278 | 29.797297 | 79 | py |
rlmeta | rlmeta-main/tests/ops/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 179 | 35 | 65 | py |
rlmeta | rlmeta-main/tests/ops/generalized_advantage_estimation_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Optional, Union
import torch
import rlmeta.ops as ops
from tests.test_utils import TestCaseBase
class GeneralizedAdvantageEstimationTest(TestCaseBase):
def setUp(self) -> None:
self.rtol = 1e-6
self.atol = 1e-6
def test_gae_with_scalar_parameter(self) -> None:
n = 100
gamma = torch.rand(1).item()
gae_lambda = torch.rand(1).item()
reward = torch.randn(n)
value = torch.randn(n)
gae = ops.generalized_advantage_estimation(reward, value, gamma,
gae_lambda)
expected_gae = self._gae(reward, value, gamma, gae_lambda)
self.assert_tensor_close(gae,
expected_gae,
rtol=self.rtol,
atol=self.atol)
reward = torch.randn(n, 1)
value = torch.randn(n, 1)
last_v = torch.randn(1)
gae = ops.generalized_advantage_estimation(reward, value, gamma,
gae_lambda, last_v)
expected_gae = self._gae(reward, value, gamma, gae_lambda, last_v)
self.assert_tensor_close(gae,
expected_gae,
rtol=self.rtol,
atol=self.atol)
def test_gae_with_tensor_parameter(self) -> None:
n = 200
reward = torch.randn(n)
value = torch.randn(n)
gamma = torch.rand(1)
gae_lambda = torch.rand(1)
gae = ops.generalized_advantage_estimation(reward, value, gamma,
gae_lambda)
expected_gae = self._gae(reward, value, gamma, gae_lambda)
self.assert_tensor_close(gae,
expected_gae,
rtol=self.rtol,
atol=self.atol)
reward = torch.randn(n, 1)
value = torch.randn(n, 1)
gamma = torch.rand(n, 1)
gae_lambda = torch.rand(n, 1)
last_v = torch.randn(1)
gae = ops.generalized_advantage_estimation(reward, value, gamma,
gae_lambda, last_v)
expected_gae = self._gae(reward, value, gamma, gae_lambda, last_v)
self.assert_tensor_close(gae,
expected_gae,
rtol=self.rtol,
atol=self.atol)
def _gae(self,
reward: torch.Tensor,
value: torch.Tensor,
gamma: Union[float, torch.Tensor],
gae_lambda: Union[float, torch.Tensor],
last_v: Optional[torch.Tensor] = None) -> torch.Tensor:
n = reward.size(0)
v = torch.zeros(1) if last_v is None else last_v
adv = torch.zeros(1)
gae = []
for i in range(n - 1, -1, -1):
if isinstance(gamma, float):
gamma_i = gamma
elif gamma.numel() == 1:
gamma_i = gamma.item()
else:
gamma_i = gamma[i].item()
if isinstance(gae_lambda, float):
lambda_i = gae_lambda
elif gae_lambda.numel() == 1:
lambda_i = gae_lambda.item()
else:
lambda_i = gae_lambda[i].item()
delta = reward[i] + gamma_i * v - value[i]
v = value[i]
adv = delta + gamma_i * lambda_i * adv
gae.append(adv)
gae = torch.stack(tuple(reversed(gae)))
if reward.dim() == 1:
gae.squeeze_(-1)
return gae
if __name__ == "__main__":
unittest.main()
| 3,929 | 33.173913 | 74 | py |
rlmeta | rlmeta-main/docs/source/conf.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'RLMeta'
copyright = '2021, Facebook AI Research'
author = 'Facebook AI Research'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 2,078 | 35.473684 | 79 | py |
rlmeta | rlmeta-main/rlmeta/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 179 | 35 | 65 | py |
rlmeta | rlmeta-main/rlmeta/core/callbacks.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from rlmeta.core.types import Action, TimeStep
# The EpisodeCallbacks class is adapted from RLLib's DefaultCallbacks
# https://github.com/ray-project/ray/blob/f9173a189023ccf4b4b09cf1533c628da13d000b/rllib/algorithms/callbacks.py#L37
#
# It was released under the Apache License, Version 2.0 (the "License"),
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EpisodeCallbacks:
"""Callbacks class for custom episode metrics.
"""
def __init__(self) -> None:
self._custom_metrics = {}
@property
def custom_metrics(self) -> Dict[str, Any]:
return self._custom_metrics
@custom_metrics.setter
def custom_metrics(self, metrics: Dict[str, Any]) -> None:
self._custom_metrics = metrics
def reset(self) -> None:
self._custom_metrics.clear()
def on_episode_start(self, index: int) -> None:
pass
def on_episode_init(self, index: int, timestep: TimeStep) -> None:
pass
def on_episode_step(self, index: int, step: int, action: Action,
timestep: TimeStep) -> None:
pass
def on_episode_end(self, index: int) -> None:
pass
| 1,865 | 31.172414 | 116 | py |
rlmeta | rlmeta-main/rlmeta/core/loop.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import asyncio
import copy
import logging
import time
from typing import Dict, List, NoReturn, Optional, Sequence, Union
from rich.console import Console
import torch
import torch.multiprocessing as mp
import moolib
import rlmeta.core.remote as remote
import rlmeta.utils.asyncio_utils as asyncio_utils
import rlmeta.utils.moolib_utils as moolib_utils
from rlmeta.agents.agent import Agent, AgentFactory
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.controller import Controller, ControllerLike, Phase
from rlmeta.core.launchable import Launchable
from rlmeta.envs.env import Env, EnvFactory
console = Console()
class Loop(abc.ABC):
@abc.abstractmethod
def run(self, num_episodes: Optional[int] = None) -> None:
"""
"""
class AsyncLoop(Loop, Launchable):
def __init__(self,
env_factory: EnvFactory,
agent_factory: AgentFactory,
controller: ControllerLike,
running_phase: Phase,
should_update: bool = False,
num_rollouts: int = 1,
index: int = 0,
index_offset: Optional[int] = None,
seed: Optional[int] = None,
episode_callbacks: Optional[EpisodeCallbacks] = None) -> None:
self._running_phase = running_phase
self._should_update = should_update
self._index = index
self._num_rollouts = num_rollouts
if index_offset is None:
self._index_offset = index * num_rollouts
else:
self._index_offset = index_offset
self._seed = seed
self._env_factory = env_factory
self._agent_factory = agent_factory
self._envs = []
self._agents = []
self._controller = controller
self._loop = None
self._tasks = []
self._running = False
self._episode_callbacks = episode_callbacks
@property
def running_phase(self) -> Phase:
return self._running_phase
@property
def should_update(self) -> bool:
return self._should_update
@property
def num_rollouts(self) -> int:
return self._num_rollouts
@property
def index(self) -> int:
return self._index
@property
def index_offset(self) -> int:
return self._index_offset
@property
def seed(self) -> Optional[int]:
return self._seed
@property
def running(self) -> bool:
return self._running
@running.setter
def running(self, running: bool) -> None:
self._running = running
def init_launching(self) -> None:
pass
def init_execution(self) -> None:
for i in range(self._num_rollouts):
env = self._env_factory(self.index_offset + i)
if self.seed is not None:
env.reset(seed=self.seed + self.index_offset + i)
self._envs.append(env)
for i in range(self._num_rollouts):
agent = self._agent_factory(self.index_offset + i)
agent.connect()
# if self.seed is not None:
# agent.seed(self.seed + self.index_offset + i)
self._agents.append(agent)
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, remote.Remote):
obj.name = moolib_utils.expend_name_by_index(
obj.name, self.index)
obj.connect()
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, Launchable):
obj.init_execution()
def run(self) -> NoReturn:
console.log(f"Starting async loop with: {self._controller}")
self._loop = asyncio.get_event_loop()
self._tasks.append(
asyncio_utils.create_task(self._loop, self._check_phase()))
for i, (env, agent) in enumerate(zip(self._envs, self._agents)):
index = self.index_offset + i
task = asyncio_utils.create_task(
self._loop,
self._run_loop(index, env, agent,
copy.deepcopy(self._episode_callbacks)))
self._tasks.append(task)
try:
self._loop.run_forever()
except Exception as e:
logging.error(e)
raise e
finally:
for task in self._tasks:
task.cancel()
self._loop.stop()
async def _check_phase(self) -> NoReturn:
while True:
cur_phase = await self._controller.async_phase()
self._running = ((cur_phase &
self._running_phase) == self._running_phase)
await asyncio.sleep(1)
async def _run_loop(
self,
index: int,
env: Env,
agent: Agent,
episode_callbacks: Optional[EpisodeCallbacks] = None) -> NoReturn:
while True:
while not self.running:
await asyncio.sleep(1)
stats = await self._run_episode(index, env, agent,
episode_callbacks)
if self.running and stats is not None:
await self._controller.async_add_episode(
self._running_phase, stats)
# The method _run_episode is adapted from Acme's Enviroment.run_episode:
# https://github.com/deepmind/acme/blob/df961057bcd2e1436d5f894ebced62d694225034/acme/environment_loop.py#L76
#
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
async def _run_episode(
self,
index: int,
env: Env,
agent: Agent,
episode_callbacks: Optional[EpisodeCallbacks] = None
) -> Optional[Dict[str, float]]:
episode_length = 0
episode_return = 0.0
start_time = time.perf_counter()
if episode_callbacks is not None:
episode_callbacks.reset()
episode_callbacks.on_episode_start(index)
timestep = env.reset()
await agent.async_observe_init(timestep)
if episode_callbacks is not None:
episode_callbacks.on_episode_init(index, timestep)
while not timestep.terminated and not timestep.truncated:
if not self.running:
return None
action = await agent.async_act(timestep)
timestep = env.step(action)
await agent.async_observe(action, timestep)
if self.should_update:
await agent.async_update()
episode_length += 1
episode_return += timestep.reward
if episode_callbacks is not None:
episode_callbacks.on_episode_step(index, episode_length - 1,
action, timestep)
episode_time = time.perf_counter() - start_time
steps_per_second = episode_length / episode_time
if episode_callbacks is not None:
episode_callbacks.on_episode_end(index)
metrics = {
"episode_length": float(episode_length),
"episode_return": episode_return,
"episode_time/s": episode_time,
"steps_per_second": steps_per_second,
}
if episode_callbacks is not None:
metrics.update(episode_callbacks.custom_metrics)
return metrics
class ParallelLoop(Loop):
def __init__(self,
env_factory: EnvFactory,
agent_factory: AgentFactory,
controller: Union[Controller, remote.Remote],
running_phase: Phase,
should_update: bool = False,
num_rollouts: int = 1,
num_workers: Optional[int] = None,
index: int = 0,
index_offset: Optional[int] = None,
seed: Optional[int] = None,
episode_callbacks: Optional[EpisodeCallbacks] = None) -> None:
self._running_phase = running_phase
self._should_update = should_update
self._index = index
self._num_rollouts = num_rollouts
self._num_workers = min(mp.cpu_count(), self._num_rollouts)
if num_workers is not None:
self._num_workers = min(self._num_workers, num_workers)
if index_offset is None:
self._index_offset = index * num_rollouts
else:
self._index_offset = index_offset
self._env_factory = env_factory
self._agent_factory = agent_factory
self._controller = controller
self._seed = seed
self._episode_callbacks = episode_callbacks
self._workloads = self._compute_workloads()
self._async_loops = []
self._processes = []
index_offset = self._index_offset
for i, workload in enumerate(self._workloads):
loop = AsyncLoop(self._env_factory, self._agent_factory,
self._controller, self._running_phase,
self._should_update, workload, i, index_offset,
self._seed, self._episode_callbacks)
self._async_loops.append(loop)
index_offset += workload
@property
def running_phase(self) -> Phase:
return self._running_phase
@property
def should_update(self) -> bool:
return self._should_update
@property
def num_rollouts(self) -> int:
return self._num_rollouts
@property
def num_workers(self) -> int:
return self._num_workers
@property
def index(self) -> int:
return self._index
@property
def index_offset(self) -> int:
return self._index_offset
@property
def seed(self) -> Optional[int]:
return self._seed
def run(self) -> NoReturn:
self.start()
self.join()
def start(self) -> None:
processes = []
for loop in self._async_loops:
loop.init_launching()
process = mp.Process(target=self._run_async_loop, args=(loop,))
processes.append(process)
for process in processes:
process.start()
self._processes = processes
def join(self) -> None:
for process in self._processes:
process.join()
def terminate(self) -> None:
for process in self._processes:
process.terminate()
def _compute_workloads(self) -> List[int]:
workload = self.num_rollouts // self.num_workers
r = self.num_rollouts % self.num_workers
workloads = [workload + 1] * r + [workload] * (self.num_workers - r)
return workloads
def _run_async_loop(self, loop: AsyncLoop) -> NoReturn:
if loop.seed is not None:
torch.manual_seed(loop.seed + loop.index_offset)
loop.init_execution()
loop.run()
class LoopList:
def __init__(self, loops: Optional[Sequence[Loop]] = None) -> None:
self._loops = []
if loops is not None:
self._loops.extend(loops)
@property
def loops(self) -> List[Loop]:
return self._loops
def append(self, loop: Loop) -> None:
self.loops.append(loop)
def extend(self, loops: Union[LoopList, Sequence[Loop]]) -> None:
if isinstance(loops, LoopList):
self.loops.extend(loops.loops)
else:
self.loops.extend(loops)
def start(self) -> None:
for loop in self.loops:
loop.start()
def join(self) -> None:
for loop in self.loops:
loop.join()
def terminate(self) -> None:
for loop in self.loops:
loop.terminate()
LoopLike = Union[Loop, LoopList]
| 12,555 | 30.949109 | 113 | py |
rlmeta | rlmeta-main/rlmeta/core/launchable.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
class Launchable(abc.ABC):
@abc.abstractmethod
def init_launching(self) -> None:
"""
"""
@abc.abstractmethod
def init_execution(self) -> None:
"""
"""
| 394 | 18.75 | 65 | py |
rlmeta | rlmeta-main/rlmeta/core/server.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import asyncio
import logging
from typing import Any, Callable, List, NoReturn, Optional, Sequence, Union
import torch
import torch.multiprocessing as mp
from rich.console import Console
import moolib
import rlmeta.utils.asyncio_utils as asyncio_utils
from rlmeta.core.launchable import Launchable
from rlmeta.core.remote import Remotable
console = Console()
class Server(Launchable):
def __init__(self, name: str, addr: str, timeout: float = 60) -> None:
self._name = name
self._addr = addr
self._timeout = timeout
self._services = []
self._process = None
self._server = None
self._loop = None
self._tasks = None
def __repr__(self):
return f'Server(name={self._name} addr={self._addr})'
@property
def name(self) -> str:
return self._name
@property
def addr(self) -> str:
return self._addr
@property
def timeout(self) -> float:
return self._timeout
def add_service(self, service: Union[Remotable,
Sequence[Remotable]]) -> None:
if isinstance(service, (list, tuple)):
self._services.extend(service)
else:
self._services.append(service)
def start(self) -> None:
self.init_launching()
self._process = mp.Process(target=self.run)
self._process.start()
def join(self) -> None:
self._process.join()
def terminate(self) -> None:
if self._process is not None:
self._process.terminate()
def run(self) -> NoReturn:
self.init_execution()
self._start_services()
def init_launching(self) -> None:
for service in self._services:
if isinstance(service, Launchable):
service.init_launching()
def init_execution(self) -> None:
for service in self._services:
if isinstance(service, Launchable):
service.init_execution()
self._server = moolib.Rpc()
self._server.set_name(self._name)
self._server.set_timeout(self._timeout)
console.log(f"Server={self.name} listening to {self._addr}")
try:
self._server.listen(self._addr)
except:
console.log(f"ERROR on listen({self._addr}) from: server={self}")
raise
def _start_services(self) -> NoReturn:
self._loop = asyncio.get_event_loop()
self._tasks = []
console.log(f"Server={self.name} starting services: {self._services}")
for service in self._services:
for method in service.remote_methods:
method_impl = getattr(service, method)
batch_size = getattr(method_impl, "__batch_size__", None)
self._add_server_task(service.remote_method_name(method),
method_impl, batch_size)
try:
if not self._loop.is_running():
self._loop.run_forever()
except Exception as e:
logging.error(e)
raise
finally:
for task in self._tasks:
task.cancel()
self._loop.stop()
self._loop.close()
console.log(f"Server={self.name} services started")
def _add_server_task(self, func_name: str, func_impl: Callable[..., Any],
batch_size: Optional[int]) -> None:
if batch_size is None:
que = self._server.define_queue(func_name)
else:
que = self._server.define_queue(func_name,
batch_size=batch_size,
dynamic_batching=True)
task = asyncio_utils.create_task(self._loop,
self._async_process(que, func_impl))
self._tasks.append(task)
async def _async_process(self, que: moolib.Queue,
func: Callable[..., Any]) -> None:
try:
while True:
ret_cb, args, kwargs = await que
ret = func(*args, **kwargs)
ret_cb(ret)
except asyncio.CancelledError:
pass
except Exception as e:
logging.error(e)
raise e
class ServerList:
def __init__(self, servers: Optional[Sequence[Server]] = None) -> None:
self._servers = []
if servers is not None:
self._servers.extend(servers)
def __getitem__(self, index: int) -> Server:
return self._servers[index]
@property
def servers(self) -> List[Server]:
return self._servers
def append(self, server: Server) -> None:
self.servers.append(server)
def extend(self, servers: Union[ServerList, Sequence[Server]]) -> None:
if isinstance(servers, ServerList):
self.servers.extend(servers.servers)
else:
self.servers.extend(servers)
def start(self) -> None:
for server in self.servers:
server.start()
def join(self) -> None:
for server in self.servers:
server.join()
def terminate(self) -> None:
for server in self.servers:
server.terminate()
ServerLike = Union[Server, ServerList]
| 5,518 | 28.994565 | 78 | py |
rlmeta | rlmeta-main/rlmeta/core/model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import functools
import random
from enum import IntEnum
from typing import (Any, Awaitable, Callable, Dict, Optional, Sequence, Tuple,
Union)
from rich.console import Console
import numpy as np
import torch
import torch.nn as nn
import rlmeta.core.remote as remote
import rlmeta.ops as rlmeta_ops
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
import rlmeta.utils.random_utils as random_utils
from rlmeta.core.launchable import Launchable
from rlmeta.core.server import Server
from rlmeta.core.types import NestedTensor
from rlmeta.samplers import UniformSampler
from rlmeta.storage.circular_buffer import CircularBuffer
console = Console()
class ModelVersion(IntEnum):
# Use negative values for latest version flag to avoid conflict with real
# version.
LATEST = -0x7FFFFFFF
STABLE = -1
class RemotableModel(nn.Module, remote.Remotable):
def __init__(self, identifier: Optional[str] = None) -> None:
nn.Module.__init__(self)
remote.Remotable.__init__(self, identifier)
@property
def device(self) -> torch.device:
return next(self.parameters()).device
def init_model(self) -> None:
pass
class RemotableModelPool(remote.Remotable, Launchable):
def __init__(self,
model: RemotableModel,
capacity: int = 0,
seed: Optional[int] = None,
identifier: Optional[str] = None) -> None:
super().__init__(identifier)
self._model = model
self._capacity = capacity
self._seed = seed
if self._capacity > 0:
self._history = CircularBuffer(self._capacity)
@property
def capacity(self) -> int:
return self._capacity
@property
def seed(self) -> Optional[int]:
return self._seed
def init_launching(self) -> None:
self._model.share_memory()
def init_execution(self) -> None:
self._bind()
if self._seed is not None:
random_utils.manual_seed(self._seed)
self._model.init_model()
console.log(self._model)
def model(self, version: int = ModelVersion.LATEST) -> nn.Module:
return (self._model if version == ModelVersion.LATEST else
self._history[version][1])
@remote.remote_method(batch_size=None)
def pull(self,
version: int = ModelVersion.LATEST) -> Dict[str, torch.Tensor]:
state_dict = self.model(version).state_dict()
state_dict = nested_utils.map_nested(lambda x: x.cpu(), state_dict)
return state_dict
@remote.remote_method(batch_size=None)
def push(self, state_dict: Dict[str, torch.Tensor]) -> None:
# Move state_dict to device before loading.
# https://github.com/pytorch/pytorch/issues/34880
device = self._model.device
state_dict = nested_utils.map_nested(lambda x: x.to(device), state_dict)
self._model.load_state_dict(state_dict)
@remote.remote_method(batch_size=None)
def release(self) -> None:
if self._capacity > 0:
self._history.append(copy.deepcopy(self._model))
@remote.remote_method(batch_size=None)
def sample_model(self) -> int:
if self._capacity == 0:
return ModelVersion.LATEST
else:
return np.random.randint(len(self._history))
def _bind(self) -> None:
for method in self._model.remote_methods:
batch_size = getattr(getattr(self._model, method), "__batch_size__",
None)
method_name, method_impl = self._wrap_remote_method(
method, batch_size)
self.__remote_methods__.append(method_name)
setattr(self, method_name, method_impl)
for i in range(self._capacity):
method_name, method_impl = self._wrap_remote_method(
method, batch_size, i)
self.__remote_methods__.append(method_name)
setattr(self, method_name, method_impl)
method_name, method_impl = self._wrap_remote_method(
method, batch_size, -i - 1)
setattr(self, method_name, method_impl)
self.__remote_methods__.append(method_name)
def _wrap_remote_method(
self,
method: str,
batch_size: Optional[int] = None,
version: int = ModelVersion.LATEST) -> Callable[..., Any]:
method_name = method
if version != ModelVersion.LATEST:
method_name += f"[{version}]"
method_impl = functools.partial(self._dispatch_model_call, version,
method)
setattr(method_impl, "__remote__", True)
if batch_size is not None:
setattr(method_impl, "__batch_size__", batch_size)
return method_name, method_impl
def _dispatch_model_call(self, version: int, method: str, *args,
**kwargs) -> Any:
model = self.model(version)
device = model.device
args = nested_utils.map_nested(lambda x: x.to(device), args)
kwargs = nested_utils.map_nested(lambda x: x.to(device), kwargs)
ret = getattr(model, method)(*args, **kwargs)
ret = nested_utils.map_nested(lambda x: x.cpu(), ret)
return ret
class RemoteModel(remote.Remote):
def __init__(self,
target: RemotableModel,
server_name: str,
server_addr: str,
name: Optional[str] = None,
version: int = ModelVersion.LATEST,
timeout: float = 60) -> None:
super().__init__(target, server_name, server_addr, name, timeout)
self._version = version
@property
def version(self) -> int:
return self._version
@version.setter
def version(self, version: int) -> None:
self._version = version
def sample_model(self,
num_samples: int = 1,
replacement: bool = False) -> torch.Tensor:
return self.client.sync(self.server_name,
self.remote_method_name("sample_model"),
num_samples, replacement)
async def async_sample_model(self,
num_samples: int = 1,
replacement: bool = False) -> torch.Tensor:
return await self.client.async_(self.server_name,
self.remote_method_name("sample_model"),
num_samples, replacement)
def _bind(self) -> None:
for method in self._remote_methods:
method_name = self.remote_method_name(method)
self._client_methods[method] = functools.partial(
self._remote_model_call, method_name)
self._client_methods["async_" + method] = functools.partial(
self._async_remote_model_call, method_name)
def _remote_model_call(self, method: str, *args, **kwargs) -> Any:
method_name = method
if self._version != ModelVersion.LATEST:
method_name += f"[{self._version}]"
return self.client.sync(self.server_name, method_name, *args, **kwargs)
def _async_remote_model_call(self, method: str, *args,
**kwargs) -> Awaitable:
method_name = method
if self._version != ModelVersion.LATEST:
method_name += f"[{self._version}]"
return self.client.async_(self.server_name, method_name, *args,
**kwargs)
class DownstreamModel(remote.Remote):
def __init__(self,
model: nn.Module,
server_name: str,
server_addr: str,
name: Optional[str] = None,
timeout: float = 60) -> None:
self._wrapped = model
self._reset(server_name, server_addr, name, timeout)
# TODO: Find a better way to implement this
def __getattribute__(self, attr: str) -> Any:
try:
return object.__getattribute__(self, attr)
except AttributeError:
return getattr(object.__getattribute__(self, "_wrapped"), attr)
@property
def wrapped(self) -> nn.Module:
return self._wrapped
def __call__(self, *args, **kwargs) -> Any:
return self.wrapped(*args, **kwargs)
def pull(self, version: int = ModelVersion.LATEST) -> None:
state_dict = self.client.sync(self.server_name,
self.remote_method_name("pull"), version)
self.wrapped.load_state_dict(state_dict)
async def async_pull(self, version: int = ModelVersion.LATEST) -> None:
state_dict = await self.client.async_(self.server_name,
self.remote_method_name("pull"),
version)
self.wrapped.load_state_dict(state_dict)
def push(self) -> None:
state_dict = self.wrapped.state_dict()
state_dict = nested_utils.map_nested(lambda x: x.cpu(), state_dict)
self.client.sync(self.server_name, self.remote_method_name("push"),
state_dict)
async def async_push(self) -> None:
state_dict = self.wrapped.state_dict()
state_dict = nested_utils.map_nested(lambda x: x.cpu(), state_dict)
await self.client.async_(self.server_name,
self.remote_method_name("push"), state_dict)
def release(self) -> None:
self.client.sync(self.server_name, self.remote_method_name("release"))
async def async_release(self) -> None:
await self.client.async_(self.server_name,
self.remote_method_name("release"))
def sample_model(self,
num_samples: int = 1,
replacement: bool = False) -> torch.Tensor:
return self.client.sync(self.server_name,
self.remote_method_name("sample_model"),
num_samples, replacement)
async def async_sample_model(self,
num_samples: int = 1,
replacement: bool = False) -> torch.Tensor:
return await self.client.async_(self.server_name,
self.remote_method_name("sample_model"),
num_samples, replacement)
def _bind(self) -> None:
pass
ModelLike = Union[nn.Module, RemotableModel, RemoteModel, DownstreamModel,
remote.Remote]
def make_remote_model(model: Union[RemotableModel, RemotableModelPool],
server: Server,
name: Optional[str] = None,
version: int = ModelVersion.LATEST,
timeout: float = 60) -> RemoteModel:
if isinstance(model, RemotableModelPool):
model = model.model()
return RemoteModel(model, server.name, server.addr, name, version, timeout)
def wrap_downstream_model(model: RemotableModel,
server: Server,
name: Optional[str] = None,
timeout: float = 60) -> DownstreamModel:
return DownstreamModel(model, server.name, server.addr, name, timeout)
| 11,670 | 35.358255 | 80 | py |
rlmeta | rlmeta-main/rlmeta/core/types.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from typing import Any, NamedTuple, Optional, Union
Tensor = Union[np.ndarray, torch.Tensor]
# NestedTensor is adapted from Acme's NestedTensor
# https://github.com/deepmind/acme/blob/df961057bcd2e1436d5f894ebced62d694225034/acme/types.py#L23
#
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NestedTensor = Any
# TimeStep is Inspired from dm_env's TimeStep:
# https://github.com/deepmind/dm_env/blob/abee135a07cc8e684173586dc8a20e696bbd40fb/dm_env/_environment.py#L25
#
# Copyright 2019 The dm_env Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TimeStep(NamedTuple):
observation: Any
reward: Optional[float] = None
terminated: bool = False
truncated: bool = False
info: Optional[Any] = None
class Action(NamedTuple):
action: Any
info: Optional[Any] = None
| 2,108 | 33.57377 | 109 | py |
rlmeta | rlmeta-main/rlmeta/core/controller.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import IntFlag
from typing import Dict, Optional, Union
import rlmeta.core.remote as remote
from rlmeta.utils.stats_dict import StatsDict
class Phase(IntFlag):
NONE = 0
TRAIN = 1
EVAL = 2
BOTH = 3
class Controller(remote.Remotable):
@dataclass
class PhaseStatus:
limit: Optional[int] = None
count: int = 0
stats: StatsDict = StatsDict()
def __init__(self, identifier: Optional[str] = None) -> None:
super().__init__(identifier)
self._phase = Phase.NONE
self._status = [
Controller.PhaseStatus(limit=None, count=0, stats=StatsDict())
for _ in range(len(Phase))
]
def __repr__(self):
return f"Controller(phase={self._phase})"
@remote.remote_method(batch_size=None)
def reset(self) -> None:
self._phase = Phase.NONE
for status in self._status:
status.limit = None
status.count = 0
status.stats.clear()
@remote.remote_method(batch_size=None)
def phase(self) -> Phase:
return self._phase
@remote.remote_method(batch_size=None)
def set_phase(self, phase: Phase) -> None:
self._phase = phase
@remote.remote_method(batch_size=None)
def reset_phase(self, phase: Phase, limit: Optional[int] = None) -> None:
status = self._status[phase]
status.limit = limit
status.count = 0
status.stats.reset()
@remote.remote_method(batch_size=None)
def count(self, phase: Phase) -> int:
return self._status[phase].count
@remote.remote_method(batch_size=None)
def stats(self, phase: Phase) -> StatsDict:
return self._status[phase].stats
@remote.remote_method(batch_size=None)
def add_episode(self, phase: Phase, stats: Dict[str, float]) -> None:
status = self._status[phase]
if status.limit is None or status.count < status.limit:
status.count += 1
status.stats.extend(stats)
ControllerLike = Union[Controller, remote.Remote]
| 2,275 | 27.098765 | 77 | py |
rlmeta | rlmeta-main/rlmeta/core/rescalers.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from rlmeta.utils.running_stats import RunningMoments, RunningRMS
class Rescaler(nn.Module, abc.ABC):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.rescale(x)
def reset(self) -> None:
pass
def update(self, x: torch.Tensor) -> None:
pass
@abc.abstractmethod
def rescale(self, x: torch.Tensor) -> torch.Tensor:
"""
Do rescale for the input tensor.
"""
@abc.abstractmethod
def recover(self, x: torch.Tensor) -> torch.Tensor:
"""
Undo rescale for the input tensor.
"""
class IdentityRescaler(Rescaler):
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return x
def recover(self, x: torch.Tensor) -> torch.Tensor:
return x
class RMSRescaler(Rescaler):
def __init__(self,
size: Union[int, Tuple[int]],
eps: float = 1e-8,
dtype: torch.dtype = torch.float64) -> None:
super().__init__()
self._size = size
self._eps = eps
self._running_rms = RunningRMS(size, dtype=dtype)
@property
def size(self) -> Union[int, Tuple[int]]:
return self._size
@property
def eps(self) -> float:
return self._eps
def reset(self) -> None:
self._running_rms.reset()
def update(self, x: torch.Tensor) -> None:
self._running_rms.update(x)
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return (x * self._running_rms.rrms(self._eps)).to(x.dtype)
def recover(self, x: torch.Tensor) -> torch.Tensor:
return (x * self._running_rms.rms(self._eps)).to(x.dtype)
class MomentsRescaler(Rescaler):
def __init__(self,
size: Union[int, Tuple[int]],
ddof: int = 0,
eps: float = 1e-8,
dtype: torch.dtype = torch.float64) -> None:
super().__init__()
self._size = size
self._ddof = ddof
self._eps = eps
self._running_moments = RunningMoments(size, dtype=dtype)
@property
def size(self) -> Union[int, Tuple[int]]:
return self._size
@property
def ddof(self) -> int:
return self._ddof
@property
def eps(self) -> float:
return self._eps
def reset(self) -> None:
self._running_moments.reset()
def update(self, x: torch.Tensor) -> None:
self._running_moments.update(x)
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return x if self._running_moments.count() <= 1 else (
(x - self._running_moments.mean()) *
self._running_moments.rstd(self._ddof, self._eps)).to(x.dtype)
def recover(self, x: torch.Tensor) -> torch.Tensor:
return x if self._running_moments.count() <= 1 else (
(x * self._running_moments.std(self._ddof, self._eps)) +
self._running_moments.mean()).to(x.dtype)
class StdRescaler(Rescaler):
def __init__(self,
size: Union[int, Tuple[int]],
ddof: int = 0,
eps: float = 1e-8,
dtype: torch.dtype = torch.float64) -> None:
super().__init__()
self._size = size
self._ddof = ddof
self._eps = eps
self._running_moments = RunningMoments(size, dtype=dtype)
@property
def size(self) -> Union[int, Tuple[int]]:
return self._size
@property
def ddof(self) -> int:
return self._ddof
@property
def eps(self) -> float:
return self._eps
def reset(self) -> None:
self._running_moments.reset()
def update(self, x: torch.Tensor) -> None:
self._running_moments.update(x)
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return x if self._running_moments.count() <= 1 else (
x * self._running_moments.rstd(self._ddof, self._eps)).to(x.dtype)
def recover(self, x: torch.Tensor) -> torch.Tensor:
return x if self._running_moments.count() <= 1 else (
x * self._running_moments.std(self._ddof, self._eps)).to(x.dtype)
class SignedHyperbolicRescaler(Rescaler):
"""
Transformed Bellman Operator in https://arxiv.org/abs/1805.11593.
"""
def __init__(self, eps: float = 1e-3) -> None:
super().__init__()
self._eps = eps
@property
def eps(self) -> float:
return self._eps
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return x.sign() * ((x.abs() + 1.0).sqrt() - 1.0) + self.eps * x
def recover(self, x: torch.Tensor) -> torch.Tensor:
if self._eps == 0.0:
return x.sign() * (x.square() + 2.0 * x.abs())
r = ((1.0 + 4.0 * self.eps *
(x.abs() + 1.0 + self.eps)).sqrt() - 1.0) / (2.0 * self.eps)
return x.sign() * (r.square() - 1.0)
| 5,106 | 26.605405 | 78 | py |
rlmeta | rlmeta-main/rlmeta/core/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 179 | 35 | 65 | py |
rlmeta | rlmeta-main/rlmeta/core/replay_buffer.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import time
import logging
from typing import Callable, Optional, Sequence, Tuple, Union
from rich.console import Console
import numpy as np
import torch
import rlmeta.core.remote as remote
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.core.launchable import Launchable
from rlmeta.core.server import Server
from rlmeta.core.types import Tensor, NestedTensor
from rlmeta.storage import Storage
from rlmeta.samplers import Sampler
console = Console()
# The design of ReplayBuffer is inspired from DeepMind's Reverb project.
#
# https://github.com/deepmind/reverb
#
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IndexType = Union[int, Tensor]
KeyType = Union[int, Tensor]
ValueType = Union[NestedTensor, Sequence[NestedTensor]]
class ReplayBuffer(remote.Remotable, Launchable):
def __init__(self,
storage: Storage,
sampler: Sampler,
identifier: Optional[str] = None) -> None:
remote.Remotable.__init__(self, identifier)
self._storage = storage
self._sampler = sampler
def __len__(self) -> int:
return len(self._storage)
def __getitem__(self, index: IndexType) -> Tuple[KeyType, ValueType]:
return self._storage.at(index)
@property
def capacity(self) -> int:
return self._storage.capacity
@property
def size(self) -> int:
return self._storage.size
def init_launching(self) -> None:
pass
def init_execution(self) -> None:
pass
@remote.remote_method(batch_size=None)
def info(self) -> Tuple[int, int]:
return self.size, self.capacity
@remote.remote_method(batch_size=None)
def reset(self) -> None:
self._storage.reset()
self._sampler.reset()
@remote.remote_method(batch_size=None)
def clear(self) -> None:
self._storage.clear()
self._sampler.reset()
@remote.remote_method(batch_size=None)
def at(self, index: IndexType) -> Tuple[KeyType, ValueType]:
return self._storage.at(index)
@remote.remote_method(batch_size=None)
def get(self, key: KeyType) -> ValueType:
return self._storage.get(key)
@remote.remote_method(batch_size=None)
def append(self, data: NestedTensor, priority: float = 1.0) -> int:
new_key, old_key = self._storage.append(data)
self._sampler.insert(new_key, priority)
if old_key is not None:
self._sampler.delete(old_key)
return new_key
@remote.remote_method(batch_size=None)
def extend(self,
data: Sequence[NestedTensor],
priorities: Union[float, Tensor] = 1.0,
stacked: bool = False) -> torch.Tensor:
new_keys, old_keys = self._storage.extend(data, stacked)
if isinstance(priorities, torch.Tensor):
priorities = priorities.numpy().astype(np.float64)
elif isinstance(priorities, np.ndarray):
priorities = priorities.astype(np.float64)
self._sampler.insert(new_keys, priorities)
self._sampler.delete(old_keys)
return torch.from_numpy(new_keys)
@remote.remote_method(batch_size=None)
def sample(
self,
num_samples: int,
replacement: bool = False
) -> Tuple[torch.Tensor, NestedTensor, torch.Tensor]:
keys, probabilities = self._sampler.sample(num_samples, replacement)
values = self._storage.get(keys)
return torch.from_numpy(keys), values, torch.from_numpy(probabilities)
@remote.remote_method(batch_size=None)
def update(self, key: Union[int, Tensor], priority: Union[float,
Tensor]) -> None:
if isinstance(key, torch.Tensor):
key = key.numpy()
if isinstance(priority, torch.Tensor):
priority = priority.numpy().astype(np.float64)
elif isinstance(priority, np.ndarray):
priority = priority.astype(np.float64)
self._sampler.update(key, priority)
class RemoteReplayBuffer(remote.Remote):
def __init__(self,
target: ReplayBuffer,
server_name: str,
server_addr: str,
name: Optional[str] = None,
prefetch: int = 0,
timeout: float = 60) -> None:
super().__init__(target, server_name, server_addr, name, timeout)
self._prefetch = prefetch
self._futures = collections.deque()
self._server_name = server_name
self._server_addr = server_addr
def __repr__(self):
return (f"RemoteReplayBuffer(server_name={self._server_name}, " +
f"server_addr={self._server_addr})")
@property
def prefetch(self) -> Optional[int]:
return self._prefetch
def sample(
self,
num_samples: int,
replacement: bool = False
) -> Union[NestedTensor, Tuple[NestedTensor, torch.Tensor, torch.Tensor,
torch.Tensor]]:
if len(self._futures) > 0:
ret = self._futures.popleft().result()
else:
ret = self.client.sync(self.server_name,
self.remote_method_name("sample"),
num_samples, replacement)
while len(self._futures) < self.prefetch:
fut = self.client.async_(self.server_name,
self.remote_method_name("sample"),
num_samples, replacement)
self._futures.append(fut)
return ret
async def async_sample(
self,
num_samples: int,
replacement: bool = False
) -> Union[NestedTensor, Tuple[NestedTensor, torch.Tensor, torch.Tensor,
torch.Tensor]]:
if len(self._futures) > 0:
ret = await self._futures.popleft()
else:
ret = await self.client.async_(self.server_name,
self.remote_method_name("sample"),
num_samples, replacement)
while len(self._futures) < self.prefetch:
fut = self.client.async_(self.server_name,
self.remote_method_name("sample"),
num_samples, replacement)
self._futures.append(fut)
return ret
def warm_up(self, learning_starts: Optional[int] = None) -> None:
size, capacity = self.info()
target_size = capacity
if learning_starts is not None:
target_size = min(target_size, learning_starts)
width = len(str(capacity)) + 1
while size < target_size:
time.sleep(1)
size, capacity = self.info()
console.log("Warming up replay buffer: " +
f"[{size: {width}d} / {capacity} ]")
ReplayBufferLike = Union[ReplayBuffer, RemoteReplayBuffer]
def make_remote_replay_buffer(target: ReplayBuffer,
server: Server,
name: Optional[str] = None,
prefetch: int = 0,
timeout: float = 60):
return RemoteReplayBuffer(target, server.name, server.addr, name, prefetch,
timeout)
| 8,167 | 33.464135 | 79 | py |
rlmeta | rlmeta-main/rlmeta/core/remote.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import functools
from typing import Any, Callable, List, Optional
import moolib
from rlmeta.core.launchable import Launchable
from rlmeta.utils.moolib_utils import generate_random_name
class RemotableMeta(abc.ABCMeta):
def __new__(cls, name, bases, attrs):
remote_methods = set(attrs.get("__remote_methods__", []))
for base in bases:
remote_methods.update(getattr(base, "__remote_methods__", []))
for method in attrs.values():
if getattr(method, "__remote__", False):
remote_methods.add(method.__name__)
attrs["__remote_methods__"] = list(remote_methods)
return super().__new__(cls, name, bases, attrs)
class Remotable(abc.ABC, metaclass=RemotableMeta):
def __init__(self, identifier: Optional[str] = None):
self._identifier = identifier
@property
def remote_methods(self) -> List[str]:
return getattr(self, "__remote_methods__", [])
@property
def identifier(self) -> Optional[str]:
return self._identifier
def remote_method_name(self, method: str) -> str:
return method if self._identifier is None else (self._identifier +
"::" + method)
class Remote:
def __init__(self,
target: Remotable,
server_name: str,
server_addr: str,
name: Optional[str] = None,
timeout: float = 60) -> None:
self._target_repr = repr(target)
self._server_name = server_name
self._server_addr = server_addr
self._remote_methods = target.remote_methods
self._identifier = target.identifier
self._reset(server_name, server_addr, name, timeout)
self._client_methods = {}
# TODO: Find a better way to implement this
def __getattribute__(self, attr: str) -> Any:
try:
return object.__getattribute__(self, attr)
except AttributeError:
ret = object.__getattribute__(self, "_client_methods").get(attr)
if ret is not None:
return ret
raise
def __repr__(self):
return (f"Remote(target={self._target_repr} " +
f"server_name={self._server_name} " +
f"server_addr={self._server_addr})")
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str) -> None:
self._name = name
if self._client is not None:
self._client.set_name(name)
@property
def server_name(self) -> str:
return self._server_name
@property
def server_addr(self) -> str:
return self._server_addr
@property
def client(self) -> Optional[moolib.Client]:
return self._client
@property
def connected(self) -> bool:
return self._connected
@property
def identifier(self) -> Optional[str]:
return self._identifier
def remote_method_name(self, method: str) -> str:
return method if self._identifier is None else (self._identifier +
"::" + method)
def connect(self) -> None:
if self._connected:
return
self._client = moolib.Rpc()
self._client.set_name(self._name)
self._client.set_timeout(self._timeout)
self._client.connect(self._server_addr)
self._bind()
self._connected = True
def _reset(self,
server_name: str,
server_addr: str,
name: Optional[str] = None,
timeout: float = 60) -> None:
if name is None:
name = generate_random_name()
self._server_name = server_name
self._server_addr = server_addr
self._name = name
self._timeout = timeout
self._client = None
self._connected = False
def _bind(self) -> None:
for method in self._remote_methods:
method_name = self.remote_method_name(method)
self._client_methods[method] = functools.partial(
self.client.sync, self.server_name, method_name)
self._client_methods["async_" + method] = functools.partial(
self.client.async_, self.server_name, method_name)
def remote_method(batch_size: Optional[int] = None) -> Callable[..., Any]:
def remote_method_impl(func: Callable[..., Any]) -> Callable[..., Any]:
setattr(func, "__remote__", True)
setattr(func, "__batch_size__", batch_size)
return func
return remote_method_impl
| 4,865 | 29.993631 | 76 | py |
rlmeta | rlmeta-main/rlmeta/envs/gym_wrapper.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import numpy as np
import gym
from gym.wrappers.frame_stack import LazyFrames
from gym.wrappers.step_api_compatibility import StepAPICompatibility
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import Tensor, NestedTensor
from rlmeta.envs.env import Env
class GymWrapper(Env):
def __init__(self,
env: gym.Env,
observation_fn: Optional[Callable[..., Tensor]] = None,
old_step_api: bool = False) -> None:
super(GymWrapper, self).__init__()
self._env = StepAPICompatibility(
env, output_truncation_bool=True) if old_step_api else env
self._action_space = self._env.action_space
self._observation_space = self._env.observation_space
self._reward_range = self._env.reward_range
self._metadata = self._env.metadata
self._old_step_api = old_step_api
if observation_fn is not None:
self._observation_fn = observation_fn
else:
self._observation_fn = data_utils.to_torch
@property
def env(self):
return self._env
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
@property
def reward_range(self):
return self._reward_range
@property
def metadata(self):
return self._metadata
def reset(self, *args, seed: Optional[int] = None, **kwargs) -> TimeStep:
# TODO: Clean up this function when most envs are fully migrated to the
# new OpenAI Gym API.
if self._old_step_api:
if seed is not None:
self._env.seed(seed)
obs = self._env.reset(*args, **kwargs)
info = None
else:
obs, info = self._env.reset(*args, seed=seed, **kwargs)
obs = nested_utils.map_nested(
lambda x: self._observation_fn(
np.asarray(x) if isinstance(x, LazyFrames) else x), obs)
return TimeStep(obs, info=info)
def step(self, action: Action) -> TimeStep:
act = action.action
if not isinstance(act, int):
act = act.item()
obs, reward, terminated, truncated, info = self._env.step(act)
obs = nested_utils.map_nested(
lambda x: self._observation_fn(
np.asarray(x) if isinstance(x, LazyFrames) else x), obs)
return TimeStep(obs, reward, terminated, truncated, info)
def close(self) -> None:
self._env.close()
| 2,873 | 30.582418 | 79 | py |
rlmeta | rlmeta-main/rlmeta/envs/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 179 | 35 | 65 | py |
rlmeta | rlmeta-main/rlmeta/envs/atari_wrapper.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import numpy as np
import gym
from gym.wrappers.atari_preprocessing import AtariPreprocessing
from gym.wrappers.frame_stack import FrameStack
from gym.wrappers.time_limit import TimeLimit
from gym.wrappers.transform_observation import TransformObservation
from rlmeta.envs.env import Env, EnvFactory
from rlmeta.envs.gym_wrapper import GymWrapper
def make_atari_env(
game: str,
mode: Optional[int] = None,
difficulty: Optional[int] = None,
repeat_action_probability: float = 0.25, # sticky actions
full_action_space: bool = False,
max_num_frames_per_episode: Optional[int] = None,
render_mode: Optional[str] = None,
noop_max: int = 30,
frame_skip: int = 4,
screen_size: int = 84,
terminal_on_life_loss: bool = False,
grayscale_obs: bool = True,
grayscale_newaxis: bool = False,
scale_obs: bool = False,
clip_rewards: bool = False,
frame_stack: Optional[int] = 4) -> Env:
game = "ALE/" + game + "-v5"
env = gym.make(
game,
mode=mode,
difficulty=difficulty,
obs_type="rgb", # Convert to grayscale in AtariPreprocessing
frameskip=1, # NoFrameskip, max and skip in AtariPreprocessing
repeat_action_probability=repeat_action_probability,
full_action_space=full_action_space,
max_num_frames_per_episode=max_num_frames_per_episode,
render_mode=render_mode)
env = AtariPreprocessing(env,
noop_max=noop_max,
frame_skip=frame_skip,
screen_size=screen_size,
terminal_on_life_loss=terminal_on_life_loss,
grayscale_obs=grayscale_obs,
grayscale_newaxis=grayscale_newaxis,
scale_obs=scale_obs)
if clip_rewards:
env = TransformObservation(env, np.sign)
if frame_stack is not None:
env = FrameStack(env, frame_stack)
return GymWrapper(env)
class AtariWrapperFactory(EnvFactory):
def __init__(
self,
game: str,
mode: Optional[int] = None,
difficulty: Optional[int] = None,
repeat_action_probability: float = 0.0, # v4
full_action_space: bool = False,
max_num_frames_per_episode: Optional[int] = None,
render_mode: Optional[str] = None,
noop_max: int = 30,
frame_skip: int = 4,
screen_size: int = 84,
terminal_on_life_loss: bool = False,
grayscale_obs: bool = True,
grayscale_newaxis: bool = False,
scale_obs: bool = False,
clip_rewards: bool = False,
frame_stack: Optional[int] = 4) -> None:
# AtariEnv args.
self._game = game
self._mode = mode
self._difficulty = difficulty
self._repeat_action_probability = repeat_action_probability
self._full_action_space = full_action_space
self._max_num_frames_per_episode = max_num_frames_per_episode
self._render_mode = render_mode
# AtariPreprocessing args.
self._noop_max = noop_max
self._frame_skip = frame_skip
self._screen_size = screen_size
self._terminal_on_life_loss = terminal_on_life_loss
self._grayscale_obs = grayscale_obs
self._grayscale_newaxis = grayscale_newaxis
self._scale_obs = scale_obs
# Wrappers args.
self._clip_rewards = clip_rewards
self._frame_stack = frame_stack
def __call__(self, index: int, *args, **kwargs) -> Env:
return make_atari_env(
self._game, self._mode, self._difficulty,
self._repeat_action_probability, self._full_action_space,
self._max_num_frames_per_episode, self._render_mode, self._noop_max,
self._frame_skip, self._screen_size, self._terminal_on_life_loss,
self._grayscale_obs, self._grayscale_newaxis, self._scale_obs,
self._clip_rewards, self._frame_stack)
| 4,344 | 35.822034 | 80 | py |
rlmeta | rlmeta-main/rlmeta/envs/env.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Optional, Type
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import NestedTensor
class Env(abc.ABC):
@abc.abstractmethod
def reset(self, *args, seed: Optional[int] = None, **kwargs) -> TimeStep:
"""
Reset env.
"""
@abc.abstractmethod
def step(self, action: Action) -> TimeStep:
"""
Single env step.
"""
@abc.abstractmethod
def close(self) -> None:
"""
Release resources.
"""
class EnvFactory:
def __init__(self, cls: Type[Env], *args, **kwargs) -> None:
self._cls = cls
self._args = args
self._kwargs = kwargs
def __call__(self, index: int) -> Env:
return self._cls(*self._args, **self._kwargs)
| 976 | 21.204545 | 77 | py |
rlmeta | rlmeta-main/rlmeta/models/actor_critic.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlmeta.models.utils import MLP
class DiscreteActorCriticHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int],
num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp_p = MLP(input_size, [*hidden_sizes, num_actions])
self._mlp_v = MLP(input_size, [*hidden_sizes, 1])
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
p = self._mlp_p(x)
logpi = F.log_softmax(p, dim=-1)
v = self._mlp_v(x)
return logpi, v
class DiscreteActorCriticRNDHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int],
num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp_p = MLP(input_size, [*hidden_sizes, num_actions])
self._mlp_ext_v = MLP(input_size, [*hidden_sizes, 1])
self._mlp_int_v = MLP(input_size, [*hidden_sizes, 1])
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
p = self._mlp_p(x)
logpi = F.log_softmax(p, dim=-1)
ext_v = self._mlp_ext_v(x)
int_v = self._mlp_int_v(x)
return logpi, ext_v, int_v
| 1,699 | 32.333333 | 76 | py |
rlmeta | rlmeta-main/rlmeta/models/utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence
import torch
import torch.nn as nn
# The MLP class is inspired from the MLP class in DeepMind's haiku lib.
# https://github.com/deepmind/dm-haiku/blob/6f2769e8c8dd35b3fc0e66905c877debea7d525f/haiku/_src/nets/mlp.py#L38
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MLP(nn.Module):
def __init__(self,
input_size: int,
hidden_sizes: Sequence[int],
activate_last: bool = False) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._activate_final = activate_last
prev_size = input_size
last_size = hidden_sizes.pop()
layers = []
for hidden_size in hidden_sizes:
layers.append(nn.Linear(prev_size, hidden_size))
layers.append(nn.ReLU())
prev_size = hidden_size
layers.append(nn.Linear(prev_size, last_size))
if activate_last:
layers.append(nn.ReLU())
self._layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._layers(x)
class ResidualBlock(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: int = 3) -> None:
super().__init__()
self._in_channels = in_channels
self._out_channels = out_channels
self._kernel_size = kernel_size
layers = []
layers.append(nn.ReLU())
layers.append(
nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
padding="same"))
layers.append(nn.ReLU())
layers.append(
nn.Conv2d(out_channels,
out_channels,
kernel_size=kernel_size,
padding="same"))
self._layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self._layers(x)
| 2,810 | 32.464286 | 111 | py |
rlmeta | rlmeta-main/rlmeta/models/dqn.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence
import torch
import torch.nn as nn
from rlmeta.models.utils import MLP
class DQNHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int],
num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp = MLP(input_size, [*hidden_sizes, num_actions])
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._mlp(x)
class DuelingDQNHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int],
num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp_a = MLP(input_size, [*hidden_sizes, num_actions])
self._mlp_v = MLP(input_size, [*hidden_sizes, 1])
def forward(self, x: torch.Tensor) -> torch.Tensor:
a = self._mlp_a(x)
v = self._mlp_v(x)
return v + a - a.mean(dim=-1, keepdim=True)
| 1,306 | 29.395349 | 68 | py |
rlmeta | rlmeta-main/rlmeta/models/atari.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from rlmeta.models.utils import ResidualBlock
class NatureCNNBackbone(nn.Module):
def __init__(self) -> None:
super().__init__()
layers = []
layers.append(nn.Conv2d(4, 32, kernel_size=8, stride=4))
layers.append(nn.ReLU())
layers.append(nn.Conv2d(32, 64, kernel_size=4, stride=2))
layers.append(nn.ReLU())
layers.append(nn.Conv2d(64, 64, kernel_size=3, stride=1))
layers.append(nn.ReLU())
layers.append(nn.Flatten())
self._layers = nn.Sequential(*layers)
@property
def output_size(self) -> int:
return 3136
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._layers(x)
class ImpalaCNNBackbone(nn.Module):
def __init__(self) -> None:
super().__init__()
layers = []
layers.append(self._conv_block(4, 16))
layers.append(self._conv_block(16, 32))
layers.append(self._conv_block(32, 32))
layers.append(nn.ReLU())
layers.append(nn.Flatten())
self._layers = nn.Sequential(*layers)
@property
def output_size(self) -> int:
return 3872
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._layers(x)
def _conv_block(self, in_channels: int, out_channels: int) -> nn.Module:
layers = []
layers.append(
nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding="same"))
layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
layers.append(ResidualBlock(out_channels, out_channels))
layers.append(ResidualBlock(out_channels, out_channels))
return nn.Sequential(*layers)
| 1,990 | 28.716418 | 76 | py |
rlmeta | rlmeta-main/rlmeta/agents/agent.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import asyncio
import copy
from concurrent.futures import Future
from typing import Any, Optional, Type, Union
import rlmeta.core.remote as remote
import rlmeta.utils.moolib_utils as moolib_utils
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import NestedTensor
from rlmeta.utils.stats_dict import StatsDict
# Agent class is adapted from Acme's Agent API design.
# The async_ APIs are added to be used in our AsyncLoops.
# https://github.com/deepmind/acme/blob/6cf4f656762d71f5a903cba39fc96d7e3bfa3672/acme/agents/agent.py
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Agent(abc.ABC):
def reset(self) -> None:
pass
def act(self, timestep: TimeStep) -> Action:
"""
Act function.
"""
pass
@abc.abstractmethod
async def async_act(self, timestep: TimeStep) -> Action:
"""
Async version of act function.
"""
def observe_init(self, timestep: TimeStep) -> None:
"""
Observe function for initial timestep from Env.
"""
pass
@abc.abstractmethod
async def async_observe_init(self, timestep: TimeStep) -> None:
"""
Async version of observe function initial timestep from Env.
"""
def observe(self, action: Action, next_timestep: TimeStep) -> None:
"""
Observe function for action and next timestep.
"""
pass
@abc.abstractmethod
async def async_observe(self, action: Action,
next_timestep: TimeStep) -> None:
"""
Async version of observe function for action and next timestep.
"""
def update(self) -> None:
"""
Update function after each step.
"""
pass
@abc.abstractmethod
async def async_update(self) -> None:
"""
Async version of update function after each step.
"""
def connect(self) -> None:
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, remote.Remote):
obj.connect()
def train(self,
num_steps: int,
keep_evaluation_loops: bool = False) -> StatsDict:
pass
def eval(self,
num_episodes: int,
keep_training_loops: bool = False,
non_blocking: bool = False) -> Union[StatsDict, Future]:
pass
class AgentFactory:
def __init__(self, cls: Type[Agent], *args, **kwargs) -> None:
self._cls = cls
self._args = args
self._kwargs = kwargs
def __call__(self, index: int) -> Agent:
args = []
kwargs = {}
for x in self._args:
args.append(self._make_arg(x, index))
for k, v in self._kwargs.items():
kwargs[k] = self._make_arg(v, index)
return self._cls(*args, **kwargs)
def _make_arg(self, arg: Any, index: int) -> Any:
if isinstance(arg, remote.Remote):
arg = copy.deepcopy(arg)
arg.name = moolib_utils.expend_name_by_index(arg.name, index)
return arg
| 3,867 | 28.30303 | 101 | py |
rlmeta | rlmeta-main/rlmeta/agents/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 179 | 35 | 65 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/ppo_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Tuple
import torch
import torch.nn as nn
from rlmeta.core.model import RemotableModel
class PPOModel(RemotableModel):
@abc.abstractmethod
def forward(self, obs: torch.Tensor, *args,
**kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Forward function for PPO model.
Args:
obs: A torch.Tensor for observation.
Returns:
A tuple for pytorch tensor contains [logpi, v].
logpi: The log probility for each action.
v: The value of the current state.
"""
@abc.abstractmethod
def act(self, obs: torch.Tensor, deterministic_policy: torch.Tensor, *args,
**kwargs) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Act function will be called remotely by the agent.
This function should upload the input to the device and download the
output to cpu.
Args:
obs: A torch.Tensor for observation.
deterministic_policy: A torch.Tensor for whether to use
deterministic_policy.
Returns:
A tuple for pytorch tensor contains (action, logpi, v).
action: The final action selected by the model.
logpi: The log probility for each action.
v: The value of the current state.
"""
| 1,555 | 28.358491 | 79 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/ppo_rnd_agent.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, List, Optional, Sequence
import torch
import torch.nn as nn
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import ControllerLike
from rlmeta.core.model import ModelLike
from rlmeta.core.replay_buffer import ReplayBufferLike
from rlmeta.core.rescalers import StdRescaler
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import Tensor, NestedTensor
class PPORNDAgent(PPOAgent):
def __init__(
self,
model: ModelLike,
deterministic_policy: bool = False,
replay_buffer: Optional[ReplayBufferLike] = None,
controller: Optional[ControllerLike] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
batch_size: int = 128,
max_grad_norm: float = 1.0,
gamma: float = 0.99,
gae_lambda: float = 0.95,
ratio_clipping_eps: float = 0.2,
value_clipping_eps: Optional[float] = 0.2,
intrinsic_advantage_coeff: float = 0.5,
vf_loss_coeff: float = 0.5,
entropy_coeff: float = 0.01,
rescale_reward: bool = True,
max_abs_reward: float = 10.0,
normalize_advantage: bool = True,
learning_starts: Optional[int] = None,
model_push_period: int = 10,
local_batch_size: int = 1024,
collate_fn: Optional[Callable[[Sequence[NestedTensor]],
NestedTensor]] = None
) -> None:
super().__init__(model, deterministic_policy, replay_buffer, controller,
optimizer, batch_size, max_grad_norm, gamma,
gae_lambda, ratio_clipping_eps, value_clipping_eps,
vf_loss_coeff, entropy_coeff, rescale_reward,
max_abs_reward, normalize_advantage, learning_starts,
model_push_period, local_batch_size)
self._intrinsic_advantage_coeff = intrinsic_advantage_coeff
self._reward_rescaler = None
self._ext_reward_rescaler = StdRescaler(
size=1) if rescale_reward else None
self._int_reward_rescaler = StdRescaler(
size=1) if rescale_reward else None
self._collate_fn = torch.stack if collate_fn is None else collate_fn
def act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, logpi, ext_v, int_v = self._model.act(
obs, self._deterministic_policy)
return Action(action,
info={
"logpi": logpi,
"ext_v": ext_v,
"int_v": int_v,
})
async def async_act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, logpi, ext_v, int_v = await self._model.async_act(
obs, self._deterministic_policy)
return Action(action,
info={
"logpi": logpi,
"ext_v": ext_v,
"int_v": int_v,
})
def observe(self, action: Action, next_timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
act, info = action
obs, reward, terminated, truncated, _ = next_timestep
cur = self._trajectory[-1]
cur["reward"] = reward
cur["action"] = act
cur["logpi"] = info["logpi"]
cur["ext_v"] = info["ext_v"]
cur["int_v"] = info["int_v"]
self._trajectory.append({
"obs": obs,
"terminated": terminated,
"truncated": truncated,
})
async def async_observe(self, action: Action,
next_timestep: TimeStep) -> None:
self.observe(action, next_timestep)
def update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_ext_v = torch.zeros(1)
last_int_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_ext_v, last_int_v = self._model.act(
last_step["obs"], self._deterministic_policy)
last_step["ext_v"] = last_ext_v
last_step["int_v"] = last_int_v
replay = self._make_replay()
self._send_replay(replay)
self._trajectory.clear()
async def async_update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_ext_v = torch.zeros(1)
last_int_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_ext_v, last_int_v = await self._model.async_act(
last_step["obs"], self._deterministic_policy)
last_step["ext_v"] = last_ext_v
last_step["int_v"] = last_int_v
replay = self._make_replay()
await self._async_send_replay(replay)
self._trajectory.clear()
def _make_replay(self) -> List[NestedTensor]:
next_obs = [
self._trajectory[i]["obs"] for i in range(1, len(self._trajectory))
]
int_rewards = self._compute_intrinsic_reward(next_obs)
return self._make_replay_impl(int_rewards)
async def _async_make_replay(self) -> List[NestedTensor]:
next_obs = [
self._trajectory[i]["obs"] for i in range(1, len(self._trajectory))
]
int_rewards = await self._async_compute_intrinsic_reward(next_obs)
return self._make_replay_impl(int_rewards)
def _make_replay_impl(
self,
intrinsic_rewards: Sequence[NestedTensor]) -> List[NestedTensor]:
ext_adv, ext_ret = self._compute_gae_and_return(
[x["ext_v"] for x in self._trajectory],
[x["reward"] for x in self._trajectory], self._ext_reward_rescaler)
int_adv, int_ret = self._compute_gae_and_return(
[x["int_v"] for x in self._trajectory], intrinsic_rewards,
self._int_reward_rescaler)
self._trajectory.pop()
for cur, ext_a, ext_r, int_a, int_r in zip(self._trajectory, ext_adv,
ext_ret, int_adv, int_ret):
cur["ext_gae"] = ext_a
cur["ext_ret"] = ext_r
cur["int_gae"] = int_a
cur["int_ret"] = int_r
cur.pop("reward")
cur.pop("terminated")
cur.pop("truncated")
return self._trajectory
def _compute_intrinsic_reward(
self, next_obs: Sequence[NestedTensor]) -> List[torch.Tensor]:
int_rewards = []
n = len(next_obs)
next_obs = nested_utils.collate_nested(self._collate_fn, next_obs)
for i in range(0, n, self._local_batch_size):
batch = nested_utils.map_nested(
lambda x, i=i: x[i:i + self._local_batch_size], next_obs)
cur_rewards = self._model.intrinsic_reward(batch)
int_rewards.extend(torch.unbind(cur_rewards))
int_rewards.append(torch.zeros(1)) # Padding for last step.
return int_rewards
async def _async_compute_intrinsic_reward(
self, obs: Sequence[NestedTensor]) -> List[torch.Tensor]:
int_rewards = []
n = len(obs)
obs = nested_utils.collate_nested(self._collate_fn, obs)
for i in range(0, n, self._local_batch_size):
batch = nested_utils.map_nested(
lambda x, i=i: x[i:i + self._local_batch_size], obs)
cur_rewards = await self._model.async_intrinsic_reward(batch)
int_rewards.extend(torch.unbind(cur_rewards))
int_rewards.append(torch.zeros(1)) # Padding for last step
return int_rewards
def _train_step(self, batch: NestedTensor) -> Dict[str, float]:
batch = nested_utils.map_nested(lambda x: x.to(self._model.device),
batch)
self._optimizer.zero_grad()
obs = batch["obs"]
act = batch["action"]
ext_adv = batch["ext_gae"]
ext_ret = batch["ext_ret"]
int_adv = batch["int_gae"]
int_ret = batch["int_ret"]
behavior_logpi = batch["logpi"]
behavior_ext_v = batch["ext_v"]
behavior_int_v = batch["int_v"]
logpi, ext_v, int_v = self._model_forward(obs)
adv = ext_adv + self._intrinsic_advantage_coeff * int_adv
policy_loss, ratio = self._policy_loss(logpi.gather(dim=-1, index=act),
behavior_logpi, adv)
ext_value_loss = self._value_loss(ext_ret, ext_v, behavior_ext_v)
int_value_loss = self._value_loss(int_ret, int_v, behavior_int_v)
value_loss = ext_value_loss + int_value_loss
entropy = self._entropy(logpi)
rnd_loss = self._rnd_loss(obs)
loss = policy_loss + (self._vf_loss_coeff * value_loss) - (
self._entropy_coeff * entropy) + rnd_loss
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(self._model.parameters(),
self._max_grad_norm)
self._optimizer.step()
return {
"ext_return": ext_ret.detach().mean().item(),
"int_return": int_ret.detach().mean().item(),
"policy_ratio": ratio.detach().mean().item(),
"policy_loss": policy_loss.detach().mean().item(),
"ext_value_loss": ext_value_loss.detach().mean().item(),
"int_value_loss": int_value_loss.detach().mean().item(),
"value_loss": value_loss.detach().mean().item(),
"entropy": entropy.detach().mean().item(),
"rnd_loss": rnd_loss.detach().mean().item(),
"loss": loss.detach().mean().item(),
"grad_norm": grad_norm.detach().mean().item(),
}
def _rnd_loss(self, next_obs: torch.Tensor) -> torch.Tensor:
return self._model.rnd_loss(next_obs)
| 10,632 | 39.276515 | 80 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/ppo_rnd_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Tuple
import torch
import torch.nn as nn
from rlmeta.core.model import RemotableModel
class PPORNDModel(RemotableModel):
@abc.abstractmethod
def forward(self, obs: torch.Tensor, *args,
**kwargs) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Forward function for PPO model.
Args:
obs: A torch.Tensor for observation.
Returns:
A tuple for pytorch tensor contains [logpi, v].
logpi: The log probility for each action.
v: The value of the current state.
"""
@abc.abstractmethod
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor, *args,
**kwargs
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Act function will be called remotely by the agent.
This function should upload the input to the device and download the
output to cpu.
Args:
obs: A torch.Tensor for observation.
deterministic_policy: A torch.Tensor for whether to use
deterministic_policy.
Returns:
A tuple for pytorch tensor contains (action, logpi, ext_v, int_v).
action: The final action selected by the model.
logpi: The log probility for each action.
ext_v: The extrinsic value of the current state.
int_v: The intrinsic value of the current state.
"""
@abc.abstractmethod
def intrinsic_reward(self, obs: torch.Tensor) -> torch.Tensor:
"""
"""
@abc.abstractmethod
def rnd_loss(self, obs: torch.Tensor) -> torch.Tensor:
"""
"""
| 1,906 | 27.893939 | 78 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.agents.ppo.ppo_rnd_agent import PPORNDAgent
from rlmeta.agents.ppo.ppo_model import PPOModel
from rlmeta.agents.ppo.ppo_rnd_model import PPORNDModel
__all__ = [
"PPOAgent",
"PPORNDAgent",
"PPOModel",
"PPORNDModel",
]
| 475 | 27 | 65 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/ppo_agent.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from rich.console import Console
from rich.progress import track
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.agent import Agent
from rlmeta.core.controller import Controller, ControllerLike, Phase
from rlmeta.core.model import ModelLike
from rlmeta.core.replay_buffer import ReplayBufferLike
from rlmeta.core.rescalers import Rescaler, StdRescaler
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import Tensor, NestedTensor
from rlmeta.utils.stats_dict import StatsDict
console = Console()
class PPOAgent(Agent):
def __init__(self,
model: ModelLike,
deterministic_policy: bool = False,
replay_buffer: Optional[ReplayBufferLike] = None,
controller: Optional[ControllerLike] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
batch_size: int = 512,
max_grad_norm: float = 1.0,
gamma: float = 0.99,
gae_lambda: float = 0.95,
ratio_clipping_eps: float = 0.2,
value_clipping_eps: Optional[float] = 0.2,
vf_loss_coeff: float = 0.5,
entropy_coeff: float = 0.01,
rescale_reward: bool = True,
max_abs_reward: float = 10.0,
normalize_advantage: bool = True,
learning_starts: Optional[int] = None,
model_push_period: int = 10,
local_batch_size: int = 1024) -> None:
super().__init__()
self._model = model
self._deterministic_policy = torch.tensor([deterministic_policy])
self._replay_buffer = replay_buffer
self._controller = controller
self._optimizer = optimizer
self._batch_size = batch_size
self._max_grad_norm = max_grad_norm
self._gamma = gamma
self._gae_lambda = gae_lambda
self._ratio_clipping_eps = ratio_clipping_eps
self._value_clipping_eps = value_clipping_eps
self._vf_loss_coeff = vf_loss_coeff
self._entropy_coeff = entropy_coeff
self._rescale_reward = rescale_reward
self._max_abs_reward = max_abs_reward
self._reward_rescaler = StdRescaler(size=1) if rescale_reward else None
self._normalize_advantage = normalize_advantage
self._learning_starts = learning_starts
self._model_push_period = model_push_period
self._local_batch_size = local_batch_size
self._trajectory = []
self._step_counter = 0
self._eval_executor = None
def reset(self) -> None:
self._step_counter = 0
def act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, logpi, v = self._model.act(obs, self._deterministic_policy)
return Action(action, info={"logpi": logpi, "v": v})
async def async_act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, logpi, v = await self._model.async_act(
obs, self._deterministic_policy)
return Action(action, info={"logpi": logpi, "v": v})
def observe_init(self, timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
obs, _, terminated, truncated, _ = timestep
if terminated or truncated:
self._trajectory.clear()
else:
self._trajectory = [{
"obs": obs,
"terminated": terminated,
"truncated": truncated,
}]
async def async_observe_init(self, timestep: TimeStep) -> None:
self.observe_init(timestep)
def observe(self, action: Action, next_timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
act, info = action
obs, reward, terminated, truncated, _ = next_timestep
cur = self._trajectory[-1]
cur["action"] = act
cur["logpi"] = info["logpi"]
cur["v"] = info["v"]
cur["reward"] = reward
self._trajectory.append({
"obs": obs,
"terminated": terminated,
"truncated": truncated,
})
async def async_observe(self, action: Action,
next_timestep: TimeStep) -> None:
self.observe(action, next_timestep)
def update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_v = self._model.act(last_step["obs"],
self._deterministic_policy)
last_step["v"] = last_v
replay = self._make_replay()
self._send_replay(replay)
self._trajectory.clear()
async def async_update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_v = await self._model.async_act(
last_step["obs"], self._deterministic_policy)
last_step["v"] = last_v
replay = self._make_replay()
await self._async_send_replay(replay)
self._trajectory.clear()
def train(self,
num_steps: int,
keep_evaluation_loops: bool = False) -> StatsDict:
phase = self._controller.phase()
if keep_evaluation_loops:
self._controller.set_phase(Phase.TRAIN | phase)
else:
self._controller.set_phase(Phase.TRAIN)
self._replay_buffer.warm_up(self._learning_starts)
stats = StatsDict()
console.log(f"Training for num_steps = {num_steps}")
for _ in track(range(num_steps), description="Training..."):
t0 = time.perf_counter()
_, batch, _ = self._replay_buffer.sample(self._batch_size)
t1 = time.perf_counter()
step_stats = self._train_step(batch)
t2 = time.perf_counter()
time_stats = {
"sample_data_time/ms": (t1 - t0) * 1000.0,
"batch_learn_time/ms": (t2 - t1) * 1000.0,
}
stats.extend(step_stats)
stats.extend(time_stats)
self._step_counter += 1
if self._step_counter % self._model_push_period == 0:
self._model.push()
# Release current model to stable.
self._model.push()
self._model.release()
episode_stats = self._controller.stats(Phase.TRAIN)
stats.update(episode_stats)
self._controller.reset_phase(Phase.TRAIN)
return stats
def eval(self,
num_episodes: Optional[int] = None,
keep_training_loops: bool = False,
non_blocking: bool = False) -> Union[StatsDict, Future]:
if not non_blocking:
return self._eval(num_episodes, keep_training_loops)
if self._eval_executor is None:
self._eval_executor = ThreadPoolExecutor(max_workers=1)
return self._eval_executor.submit(self._eval, num_episodes,
keep_training_loops)
def _make_replay(self) -> List[NestedTensor]:
adv, ret = self._compute_gae_and_return(
[x["v"] for x in self._trajectory],
[x["reward"] for x in self._trajectory], self._reward_rescaler)
self._trajectory.pop()
for cur, a, r in zip(self._trajectory, adv, ret):
cur["gae"] = a
cur["ret"] = r
cur.pop("reward")
cur.pop("terminated")
cur.pop("truncated")
return self._trajectory
def _send_replay(self, replay: List[NestedTensor]) -> None:
batch = []
while replay:
batch.append(replay.pop())
if len(batch) >= self._local_batch_size:
self._replay_buffer.extend(batch)
batch.clear()
if batch:
self._replay_buffer.extend(batch)
batch.clear()
async def _async_send_replay(self, replay: List[NestedTensor]) -> None:
batch = []
while replay:
batch.append(replay.pop())
if len(batch) >= self._local_batch_size:
await self._replay_buffer.async_extend(batch)
batch.clear()
if batch:
await self._replay_buffer.async_extend(batch)
batch.clear()
def _compute_gae_and_return(
self,
val: Sequence[Union[float, torch.Tensor]],
rew: Sequence[Union[float, torch.Tensor]],
reward_rescaler: Optional[Rescaler] = None
) -> Tuple[Iterable[torch.Tensor], Iterable[torch.Tensor]]:
n = len(val)
v = val[-1]
g = torch.zeros(1)
gae = torch.zeros(1)
adv = []
ret = []
for i in range(n - 2, -1, -1):
value, reward = val[i], rew[i]
if not isinstance(reward, torch.Tensor):
reward = torch.tensor([reward], dtype=torch.float32)
if reward_rescaler is not None:
g = reward + self._gamma * g
reward_rescaler.update(g)
reward = reward_rescaler.rescale(reward)
if self._max_abs_reward is not None:
reward.clamp_(-self._max_abs_reward, self._max_abs_reward)
delta = reward + self._gamma * v - value
v = value
gae = delta + self._gamma * self._gae_lambda * gae
adv.append(gae)
ret.append(gae + v)
return reversed(adv), reversed(ret)
def _train_step(self, batch: NestedTensor) -> Dict[str, float]:
device = self._model.device
batch = nested_utils.map_nested(lambda x: x.to(device), batch)
self._optimizer.zero_grad()
obs = batch["obs"]
act = batch["action"]
adv = batch["gae"]
ret = batch["ret"]
behavior_logpi = batch["logpi"]
behavior_v = batch["v"]
logpi, v = self._model_forward(obs)
policy_loss, ratio = self._policy_loss(logpi.gather(dim=-1, index=act),
behavior_logpi, adv)
value_loss = self._value_loss(ret, v, behavior_v)
entropy = self._entropy(logpi)
loss = policy_loss + (self._vf_loss_coeff *
value_loss) - (self._entropy_coeff * entropy)
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(self._model.parameters(),
self._max_grad_norm)
self._optimizer.step()
return {
"return": ret.detach().mean().item(),
"policy_ratio": ratio.detach().mean().item(),
"policy_loss": policy_loss.detach().mean().item(),
"value_loss": value_loss.detach().mean().item(),
"entropy": entropy.detach().mean().item(),
"loss": loss.detach().mean().item(),
"grad_norm": grad_norm.detach().mean().item(),
}
def _model_forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
return self._model(obs)
def _policy_loss(self, logpi: torch.Tensor, behavior_logpi: torch.Tensor,
adv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if self._normalize_advantage:
std, mean = torch.std_mean(adv, unbiased=False)
adv = (adv - mean) / std
ratio = (logpi - behavior_logpi).exp()
clipped_ratio = ratio.clamp(1.0 - self._ratio_clipping_eps,
1.0 + self._ratio_clipping_eps)
surr1 = ratio * adv
surr2 = clipped_ratio * adv
policy_loss = -torch.min(surr1, surr2).mean()
return policy_loss, ratio
def _value_loss(self,
ret: torch.Tensor,
v: torch.Tensor,
behavior_v: Optional[torch.Tensor] = None) -> torch.Tensor:
if self._value_clipping_eps is None:
return F.mse_loss(v, ret)
clipped_v = behavior_v + torch.clamp(
v - behavior_v, -self._value_clipping_eps, self._value_clipping_eps)
vf1 = F.mse_loss(v, ret, reduction="none")
vf2 = F.mse_loss(clipped_v, ret, reduction="none")
return torch.max(vf1, vf2).mean()
def _entropy(self, logpi: torch.Tensor) -> torch.Tensor:
return -(logpi.exp() * logpi).sum(dim=-1).mean()
def _eval(self,
num_episodes: int,
keep_training_loops: bool = False) -> StatsDict:
phase = self._controller.phase()
if keep_training_loops:
self._controller.set_phase(Phase.EVAL | phase)
else:
self._controller.set_phase(Phase.EVAL)
self._controller.reset_phase(Phase.EVAL, limit=num_episodes)
while self._controller.count(Phase.EVAL) < num_episodes:
time.sleep(1)
stats = self._controller.stats(Phase.EVAL)
self._controller.set_phase(phase)
return stats
| 13,953 | 36.210667 | 80 | py |
rlmeta | rlmeta-main/rlmeta/agents/dqn/dqn_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Optional, Tuple
import torch
import torch.nn as nn
from rlmeta.core.model import RemotableModel
from rlmeta.core.types import NestedTensor
class DQNModel(RemotableModel):
@abc.abstractmethod
def forward(self, observation: torch.Tensor, *args,
**kwargs) -> torch.Tensor:
"""
Forward function for DQN model.
Args:
observation: A torch.Tensor for observation.
Returns:
q: The Q(s, a) value for each action in the current state.
"""
@abc.abstractmethod
def q(self, s: torch.Tensor, a: torch.Tensor) -> torch.Tensor:
"""
Q function for DQN model.
Args:
s: A torch.Tensor for observation.
a: A torch.Tensor for action.
Returns:
q: The Q(s, a) value for each action in the current state.
"""
@abc.abstractmethod
def act(self, observation: NestedTensor, eps: torch.Tensor, *args,
**kwargs) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Act function will be called remotely by the agent.
This function should upload the input to the device and download the
output to cpu.
Args:
observation: A torch.Tensor for observation.
eps: A torch.Tensor for eps value in epsilon-greedy policy.
Returns:
action: The final action selected by the model.
q: The Q(s, a) value of the current state and action.
v: The value estimation of current state by max(Q(s, a)).
"""
@abc.abstractmethod
def sync_target_net(self) -> None:
"""
"""
def td_error(self, observation: NestedTensor, action: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
return target - self.q(observation, action)
| 2,058 | 28 | 76 | py |
rlmeta | rlmeta-main/rlmeta/agents/dqn/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from rlmeta.agents.dqn.apex_dqn_agent import (ApexDQNAgent, ApexDQNAgentFactory,
ConstantEpsFunc, FlexibleEpsFunc)
from rlmeta.agents.dqn.dqn_model import DQNModel
__all__ = [
"ApexDQNAgent",
"ApexDQNAgentFactory",
"ConstantEpsFunc",
"FlexibleEpsFunc",
"DQNModel",
]
| 514 | 29.294118 | 80 | py |
rlmeta | rlmeta-main/rlmeta/agents/dqn/apex_dqn_agent.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Callable, Dict, List, Optional, Sequence, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from rich.console import Console
from rich.progress import track
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.agent import Agent, AgentFactory
from rlmeta.core.controller import Controller, ControllerLike, Phase
from rlmeta.core.model import ModelLike
from rlmeta.core.replay_buffer import ReplayBufferLike
from rlmeta.core.rescalers import SignedHyperbolicRescaler
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import NestedTensor
from rlmeta.utils.stats_dict import StatsDict
from rlmeta.utils.running_stats import RunningMoments
console = Console()
class ApexDQNAgent(Agent):
def __init__(
self,
model: ModelLike,
eps: float = 0.1,
replay_buffer: Optional[ReplayBufferLike] = None,
controller: Optional[ControllerLike] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
batch_size: int = 512,
max_grad_norm: float = 40.0,
n_step: int = 1,
gamma: float = 0.99,
importance_sampling_exponent: float = 0.4,
max_abs_reward: Optional[int] = None,
rescale_value: bool = False,
value_clipping_eps: Optional[float] = 0.2,
fr_kappa: Optional[float] = 1.0,
target_sync_period: Optional[int] = None,
learning_starts: Optional[int] = None,
model_push_period: int = 10,
additional_models_to_update: Optional[List[ModelLike]] = None
) -> None:
super().__init__()
self._model = model
self._eps = torch.tensor([eps], dtype=torch.float32)
self._replay_buffer = replay_buffer
self._controller = controller
self._optimizer = optimizer
self._batch_size = batch_size
self._max_grad_norm = max_grad_norm
self._n_step = n_step
self._gamma = gamma
self._gamma_pow = tuple(gamma**i for i in range(n_step + 1))
self._importance_sampling_exponent = importance_sampling_exponent
self._max_abs_reward = max_abs_reward
self._value_clipping_eps = value_clipping_eps
self._fr_kappa = fr_kappa
self._rescale_value = rescale_value
self._rescaler = SignedHyperbolicRescaler() if rescale_value else None
self._target_sync_period = target_sync_period
self._learning_starts = learning_starts
self._model_push_period = model_push_period
self._additional_models_to_update = additional_models_to_update
self._step_counter = 0
self._trajectory = []
self._update_priorities_future = None
self._eval_executor = None
def reset(self) -> None:
self._step_counter = 0
def act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, q, v = self._model.act(obs, self._eps)
return Action(action, info={"q": q, "v": v})
async def async_act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, q, v = await self._model.async_act(obs, self._eps)
return Action(action, info={"q": q, "v": v})
def observe_init(self, timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
obs, _, terminated, truncated, _ = timestep
if terminated or truncated:
self._trajectory.clear()
else:
self._trajectory = [{
"obs": obs,
"terminated": terminated,
"truncated": truncated,
}]
async def async_observe_init(self, timestep: TimeStep) -> None:
self.observe_init(timestep)
def observe(self, action: Action, next_timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
act, info = action
obs, reward, terminated, truncated, _ = next_timestep
cur = self._trajectory[-1]
cur["reward"] = reward
cur["action"] = act
cur["q"] = info["q"]
cur["v"] = info["v"]
self._trajectory.append({
"obs": obs,
"terminated": terminated,
"truncated": truncated,
})
async def async_observe(self, action: Action,
next_timestep: TimeStep) -> None:
self.observe(action, next_timestep)
def update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_v = self._model.act(last_step["obs"], self._eps)
last_step["v"] = last_v
replay = self._make_replay()
self._send_replay(replay)
self._trajectory.clear()
async def async_update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_v = await self._model.async_act(last_step["obs"],
self._eps)
last_step["v"] = last_v
replay = self._make_replay()
await self._async_send_replay(replay)
self._trajectory.clear()
def connect(self) -> None:
super().connect()
if self._additional_models_to_update is not None:
for m in self._additional_models_to_update:
m.connect()
def train(self,
num_steps: int,
keep_evaluation_loops: bool = False) -> StatsDict:
phase = self._controller.phase()
if keep_evaluation_loops:
self._controller.set_phase(Phase.TRAIN | phase)
else:
self._controller.set_phase(Phase.TRAIN)
self._replay_buffer.warm_up(self._learning_starts)
stats = StatsDict()
console.log(f"Training for num_steps = {num_steps}")
for _ in track(range(num_steps), description="Training..."):
t0 = time.perf_counter()
keys, batch, probabilities = self._replay_buffer.sample(
self._batch_size)
t1 = time.perf_counter()
step_stats = self._train_step(keys, batch, probabilities)
t2 = time.perf_counter()
time_stats = {
"sample_data_time/ms": (t1 - t0) * 1000.0,
"batch_learn_time/ms": (t2 - t1) * 1000.0,
}
stats.extend(step_stats)
stats.extend(time_stats)
self._step_counter += 1
if (self._target_sync_period is not None and
self._step_counter % self._target_sync_period == 0):
self._model.sync_target_net()
if self._additional_models_to_update is not None:
for m in self._additional_models_to_update:
m.sync_target_net()
if self._step_counter % self._model_push_period == 0:
self._model.push()
if self._additional_models_to_update is not None:
for m in self._additional_models_to_update:
m.push()
# Release current model to stable.
self._model.push()
self._model.release()
episode_stats = self._controller.stats(Phase.TRAIN)
stats.update(episode_stats)
self._controller.reset_phase(Phase.TRAIN)
return stats
def eval(self,
num_episodes: Optional[int] = None,
keep_training_loops: bool = False,
non_blocking: bool = False) -> Union[StatsDict, Future]:
if not non_blocking:
return self._eval(num_episodes, keep_training_loops)
if self._eval_executor is None:
self._eval_executor = ThreadPoolExecutor(max_workers=1)
return self._eval_executor.submit(self._eval, num_episodes,
keep_training_loops)
def _make_replay(self) -> List[NestedTensor]:
replay = []
n = len(self._trajectory)
r = torch.zeros(1)
for i in range(n - 2, -1, -1):
k = min(self._n_step, n - 1 - i)
cur = self._trajectory[i]
nxt = self._trajectory[i + k]
obs = cur["obs"]
act = cur["action"]
q = cur["q"]
cur_reward = cur["reward"]
nxt_reward = nxt["reward"]
nxt_v = nxt["v"]
if not isinstance(cur_reward, torch.Tensor):
cur_reward = torch.tensor([cur_reward], dtype=torch.float32)
if not isinstance(nxt_reward, torch.Tensor):
nxt_reward = torch.tensor([nxt_reward], dtype=torch.float32)
if self._max_abs_reward is not None:
cur_reward.clamp_(-self._max_abs_reward, self._max_abs_reward)
nxt_reward.clamp_(-self._max_abs_reward, self._max_abs_reward)
gamma = 0.0 if nxt["terminated"] else self._gamma_pow[k]
r = cur_reward + self._gamma * r - gamma * nxt_reward
if self._rescaler is not None:
nxt_v = self._rescaler.recover(nxt_v)
target = r + gamma * nxt_v
if self._rescaler is not None:
target = self._rescaler.rescale(target)
replay.append({"obs": obs, "action": act, "q": q, "target": target})
return replay
def _send_replay(self, replay: List[NestedTensor]) -> None:
batch = data_utils.stack_fields(replay)
priorities = (batch["target"] - batch["q"]).abs_().squeeze_(-1)
self._replay_buffer.extend(batch, priorities, stacked=True)
async def _async_send_replay(self, replay: List[NestedTensor]) -> None:
batch = data_utils.stack_fields(replay)
priorities = (batch["target"] - batch["q"]).abs_().squeeze_(-1)
await self._replay_buffer.async_extend(batch, priorities, stacked=True)
def _train_step(self, keys: torch.Tensor, batch: NestedTensor,
probabilities: torch.Tensor) -> Dict[str, float]:
device = next(self._model.parameters()).device
batch = nested_utils.map_nested(lambda x: x.to(device), batch)
self._optimizer.zero_grad()
obs = batch["obs"]
action = batch["action"]
target = batch["target"]
behavior_q = batch["q"]
probabilities = probabilities.to(dtype=target.dtype, device=device)
weight = probabilities.pow(-self._importance_sampling_exponent)
weight.div_(weight.max())
q = self._model.q(obs, action)
loss = self._loss(target, q, behavior_q, weight)
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(self._model.parameters(),
self._max_grad_norm)
self._optimizer.step()
with torch.no_grad():
td_err = self._model.td_error(obs, action, target)
priorities = td_err.detach().squeeze(-1).abs().cpu()
# Wait for previous update request
if self._update_priorities_future is not None:
self._update_priorities_future.wait()
# Async update to start next training step when waiting for updating
# priorities.
self._update_priorities_future = self._replay_buffer.async_update(
keys, priorities)
return {
"target": target.detach().mean().item(),
"td_error": td_err.detach().mean().item(),
"loss": loss.detach().mean().item(),
"grad_norm": grad_norm.detach().mean().item(),
}
def _loss(self, target: torch.Tensor, q: torch.Tensor,
behavior_q: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
if self._value_clipping_eps is None:
loss = F.mse_loss(q, target, reduction="none")
if self._fr_kappa is not None:
# Apply functional regularization.
# https://arxiv.org/abs/2106.02613
loss += (self._fr_kappa *
F.mse_loss(q, behavior_q, reduction="none"))
return (loss.squeeze(-1) * weight).mean()
# Apply approximate trust region value update.
# https://arxiv.org/abs/2209.07550
clipped_q = behavior_q + torch.clamp(
q - behavior_q, -self._value_clipping_eps, self._value_clipping_eps)
err1 = F.mse_loss(q, target, reduction="none")
err2 = F.mse_loss(clipped_q, target, reduction="none")
loss = torch.maximum(err1, err2)
if self._fr_kappa is not None:
# Apply functional regularization.
# https://arxiv.org/abs/2106.02613
loss += (self._fr_kappa *
F.mse_loss(q, behavior_q, reduction="none"))
return (loss.squeeze(-1) * weight).mean()
def _eval(self,
num_episodes: int,
keep_training_loops: bool = False) -> StatsDict:
phase = self._controller.phase()
if keep_training_loops:
self._controller.set_phase(Phase.EVAL | phase)
else:
self._controller.set_phase(Phase.EVAL)
self._controller.reset_phase(Phase.EVAL, limit=num_episodes)
while self._controller.count(Phase.EVAL) < num_episodes:
time.sleep(1)
stats = self._controller.stats(Phase.EVAL)
self._controller.set_phase(phase)
return stats
class ApexDQNAgentFactory(AgentFactory):
def __init__(
self,
model: ModelLike,
eps_func: Callable[[int], float],
replay_buffer: Optional[ReplayBufferLike] = None,
controller: Optional[ControllerLike] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
batch_size: int = 512,
max_grad_norm: float = 40.0,
n_step: int = 1,
gamma: float = 0.99,
importance_sampling_exponent: float = 0.4,
max_abs_reward: Optional[int] = None,
rescale_value: bool = False,
value_clipping_eps: Optional[float] = 0.2,
fr_kappa: Optional[float] = 1.0,
target_sync_period: Optional[int] = None,
learning_starts: Optional[int] = None,
model_push_period: int = 10,
additional_models_to_update: Optional[List[ModelLike]] = None
) -> None:
self._model = model
self._eps_func = eps_func
self._replay_buffer = replay_buffer
self._controller = controller
self._optimizer = optimizer
self._batch_size = batch_size
self._max_grad_norm = max_grad_norm
self._n_step = n_step
self._gamma = gamma
self._importance_sampling_exponent = importance_sampling_exponent
self._max_abs_reward = max_abs_reward
self._rescale_value = rescale_value
self._value_clipping_eps = value_clipping_eps
self._fr_kappa = fr_kappa
self._target_sync_period = target_sync_period
self._learning_starts = learning_starts
self._model_push_period = model_push_period
self._additional_models_to_update = additional_models_to_update
def __call__(self, index: int) -> ApexDQNAgent:
model = self._make_arg(self._model, index)
eps = self._eps_func(index)
replay_buffer = self._make_arg(self._replay_buffer, index)
controller = self._make_arg(self._controller, index)
return ApexDQNAgent(
model,
eps,
replay_buffer,
controller,
self._optimizer,
self._batch_size,
self._max_grad_norm,
self._n_step,
self._gamma,
self._importance_sampling_exponent,
self._max_abs_reward,
self._rescale_value,
self._value_clipping_eps,
self._fr_kappa,
self._target_sync_period,
self._learning_starts,
self._model_push_period,
additional_models_to_update=self._additional_models_to_update)
class ConstantEpsFunc:
def __init__(self, eps: float) -> None:
self._eps = eps
def __call__(self, index: int) -> float:
return self._eps
class FlexibleEpsFunc:
"""
Eps function following https://arxiv.org/abs/1803.00933
"""
def __init__(self, eps: float, num: int, alpha: float = 7.0) -> None:
self._eps = eps
self._num = num
self._alpha = alpha
def __call__(self, index: int) -> float:
if self._num == 1:
return self._eps
return self._eps**(1.0 + self._alpha * (index / (self._num - 1)))
| 17,509 | 36.255319 | 80 | py |
rlmeta | rlmeta-main/rlmeta/samplers/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from _rlmeta_extension import Sampler, UniformSampler, PrioritizedSampler
__all__ = [
"Sampler",
"UniformSampler",
"PrioritizedSampler",
]
| 332 | 24.615385 | 73 | py |
rlmeta | rlmeta-main/rlmeta/storage/storage.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Optional, Sequence, Tuple, Union
import numpy as np
from rlmeta.core.types import Tensor, NestedTensor
class Storage(abc.ABC):
def __len__(self) -> int:
return self.size
@abc.abstractmethod
def __getitem__(
self,
key: Union[int,
Tensor]) -> Union[NestedTensor, Sequence[NestedTensor]]:
"""
"""
@property
@abc.abstractmethod
def capacity(self) -> int:
"""
"""
@property
@abc.abstractmethod
def size(self) -> int:
"""
"""
@abc.abstractmethod
def reset(self) -> None:
"""
"""
@abc.abstractmethod
def clear(self) -> None:
"""
"""
@abc.abstractmethod
def append(self, data: NestedTensor) -> Tuple[int, Optional[int]]:
"""
"""
@abc.abstractmethod
def extend(self,
data: Union[NestedTensor, Sequence[NestedTensor]],
stacked: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""
"""
| 1,262 | 19.704918 | 79 | py |
rlmeta | rlmeta-main/rlmeta/storage/tensor_circular_buffer.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional, Sequence, Tuple, Union
import numpy as np
import _rlmeta_extension
from rlmeta.core.types import NestedTensor, Tensor
from rlmeta.storage import Storage
IndexType = Union[int, Tensor]
KeyType = Union[int, Tensor]
ValueType = NestedTensor
class TensorCircularBuffer(Storage):
def __init__(self, capacity: int) -> None:
self._impl = _rlmeta_extension.TensorCircularBuffer(capacity)
def __getitem__(self, index: IndexType) -> Tuple[KeyType, ValueType]:
return self._impl[index]
@property
def capacity(self) -> int:
return self._impl.capacity
@property
def size(self) -> int:
return self._impl.size
def empty(self) -> bool:
return self._impl.empty()
def reset(self) -> None:
self._impl.reset()
def clear(self) -> None:
self._impl.clear()
def front(self) -> Tuple[KeyType, ValueType]:
return self._impl.front()
def back(self) -> Tuple[KeyType, ValueType]:
return self._impl.back()
def at(self, index: IndexType) -> Tuple[KeyType, ValueType]:
return self._impl.at(index)
def get(self, key: KeyType) -> ValueType:
return self._impl.get(key)
def append(self, data: NestedTensor) -> Tuple[int, Optional[int]]:
return self._impl.append(data)
def extend(self,
data: Union[NestedTensor, Sequence[NestedTensor]],
stacked: bool = False) -> Tuple[np.ndarray, np.ndarray]:
if stacked:
return self._impl.extend_stacked(data)
else:
return self._impl.extend(data)
| 1,809 | 26.014925 | 73 | py |
rlmeta | rlmeta-main/rlmeta/storage/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from rlmeta.storage.storage import Storage
from rlmeta.storage.circular_buffer import CircularBuffer
from rlmeta.storage.tensor_circular_buffer import TensorCircularBuffer
__all__ = [
"Storage",
"CircularBuffer",
"TensorCircularBuffer",
]
| 432 | 27.866667 | 70 | py |
rlmeta | rlmeta-main/rlmeta/storage/circular_buffer.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional, Sequence, Tuple, Union
import numpy as np
import rlmeta.utils.nested_utils as nested_utils
import rlmeta.utils.data_utils as data_utils
import _rlmeta_extension
from rlmeta.core.types import NestedTensor, Tensor
from rlmeta.storage import Storage
IndexType = Union[int, Tensor]
KeyType = Union[int, Tensor]
ValueType = Union[NestedTensor, Sequence[NestedTensor]]
class CircularBuffer(Storage):
def __init__(
self,
capacity: int,
collate_fn: Optional[Callable[[Sequence[NestedTensor]],
NestedTensor]] = None
) -> None:
self._impl = _rlmeta_extension.CircularBuffer(capacity)
self._collate_fn = collate_fn
def __getitem__(self, index: IndexType) -> Tuple[KeyType, ValueType]:
key, val = self._impl[index]
if not isinstance(key, int) and self._collate_fn is not None:
val = nested_utils.collate_nested(self._collate_fn, val)
return key, val
@property
def capacity(self) -> int:
return self._impl.capacity
@property
def size(self) -> int:
return self._impl.size
def empty(self) -> bool:
return self._impl.empty()
def reset(self) -> None:
self._impl.reset()
def clear(self) -> None:
self._impl.clear()
def front(self) -> Tuple[KeyType, ValueType]:
return self._impl.front()
def back(self) -> Tuple[KeyType, ValueType]:
return self._impl.back()
def at(self, index: IndexType) -> Tuple[KeyType, ValueType]:
key, val = self._impl.at(index)
if not isinstance(key, int) and self._collate_fn is not None:
val = nested_utils.collate_nested(self._collate_fn, val)
return key, val
def get(self, key: KeyType) -> ValueType:
val = self._impl.get(key)
if not isinstance(key, int) and self._collate_fn is not None:
val = nested_utils.collate_nested(self._collate_fn, val)
return val
def append(self, data: NestedTensor) -> Tuple[int, Optional[int]]:
return self._impl.append(data)
def extend(self,
data: Union[NestedTensor, Sequence[NestedTensor]],
stacked: bool = False) -> Tuple[np.ndarray, np.ndarray]:
if stacked:
batch_size = nested_utils.first(data).size(0)
data = data_utils.unstack_fields(data, batch_size)
return self._impl.extend(data)
| 2,650 | 30.559524 | 73 | py |
rlmeta | rlmeta-main/rlmeta/utils/asyncio_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
from typing import Awaitable
def handle_task_exception(task: asyncio.Task) -> None:
try:
task.result()
except asyncio.CancelledError:
pass
except Exception as e:
raise e
def create_task(loop: asyncio.BaseEventLoop, coro: Awaitable) -> asyncio.Task:
task = loop.create_task(coro)
task.add_done_callback(handle_task_exception)
return task
| 585 | 23.416667 | 78 | py |
rlmeta | rlmeta-main/rlmeta/utils/loss_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
_NAME_TO_LOSS = {
"huber": nn.HuberLoss,
"huber_loss": nn.HuberLoss,
"huberloss": nn.HuberLoss,
"l1": nn.L1Loss,
"l1_loss": nn.L1Loss,
"l1loss": nn.L1Loss,
"mse": nn.MSELoss,
"mse_loss": nn.MSELoss,
"mseloss": nn.MSELoss,
"smooth_l1": nn.SmoothL1Loss,
"smooth_l1_loss": nn.SmoothL1Loss,
"smoothl1": nn.SmoothL1Loss,
"smoothl1loss": nn.SmoothL1Loss,
}
def get_loss(name: str, args: Optional[Dict[str, Any]] = None) -> nn.Module:
loss = _NAME_TO_LOSS[name.lower()]
return loss(
reduction="none") if args is None else loss(reduction="none", **args)
| 872 | 26.28125 | 77 | py |
rlmeta | rlmeta-main/rlmeta/utils/optimizer_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Iterable, Dict, Optional, Union
import torch
_NAME_TO_OPTIMIZER = {
"adadelta": torch.optim.Adadelta,
"adagrad": torch.optim.Adagrad,
"adam": torch.optim.Adam,
"adamw": torch.optim.AdamW,
"sparseadam": torch.optim.SparseAdam,
"adamax": torch.optim.Adamax,
"asgd": torch.optim.ASGD,
"lbfgs": torch.optim.LBFGS,
"nadam": torch.optim.NAdam,
"radam": torch.optim.RAdam,
"rmsprop": torch.optim.RMSprop,
"rprop": torch.optim.Rprop,
"sgd": torch.optim.SGD,
}
def make_optimizer(params: Union[Iterable[torch.Tensor],
Dict[str, torch.Tensor]], name: str,
**kwargs) -> torch.optim.Optimizer:
optimizer_cls = _NAME_TO_OPTIMIZER[name.lower()]
return optimizer_cls(params, **kwargs)
| 990 | 29.96875 | 69 | py |
rlmeta | rlmeta-main/rlmeta/utils/random_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import numpy as np
import torch
def manual_seed(seed: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 377 | 21.235294 | 65 | py |
rlmeta | rlmeta-main/rlmeta/utils/data_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
from typing import Any, Dict, Sequence, Tuple, Union
import numpy as np
import torch
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.core.types import Tensor, NestedTensor
_NUMPY_DTYPE_TO_TORCH_MAP = {
bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128,
}
_TORCH_DTYPE_TO_NUMPY_MAP = {
torch.bool: bool,
torch.uint8: np.uint8,
torch.int8: np.int8,
torch.int16: np.int16,
torch.int32: np.int32,
torch.int64: np.int64,
torch.float16: np.float16,
torch.float32: np.float32,
torch.float64: np.float64,
torch.complex64: np.complex64,
torch.complex128: np.complex128,
}
def numpy_dtype_to_torch(dtype: np.dtype) -> torch.dtype:
return _NUMPY_DTYPE_TO_TORCH_MAP[dtype]
def torch_dtype_to_numpy(dtype: torch.dtype) -> np.dtype:
return _TORCH_DTYPE_TO_NUMPY_MAP[dtype]
def size(data: Tensor) -> Sequence[int]:
if isinstance(data, np.ndarray):
return data.shape
elif isinstance(data, torch.Tensor):
return data.size()
return ()
def to_numpy(data: Tensor) -> np.ndarray:
return data.detach().cpu().numpy() if isinstance(data,
torch.Tensor) else data
def to_torch(data: Tensor) -> torch.Tensor:
if isinstance(data, np.generic):
return torch.tensor(data)
if isinstance(data, np.ndarray):
data = torch.from_numpy(data)
return data
def stack_tensors(input: Sequence[Tensor]) -> Tensor:
size = input[0].size()
# torch.cat is much faster than torch.stack
# https://github.com/pytorch/pytorch/issues/22462
return torch.stack(input) if len(size) == 0 else torch.cat(input).view(
-1, *size)
def cat_fields(input: Sequence[NestedTensor]) -> NestedTensor:
assert len(input) > 0
return nested_utils.collate_nested(lambda x: torch.cat(x), input)
def stack_fields(input: Sequence[NestedTensor]) -> NestedTensor:
assert len(input) > 0
return nested_utils.collate_nested(stack_tensors, input)
def unstack_fields(input: NestedTensor,
batch_size: int) -> Tuple[NestedTensor, ...]:
if batch_size == 1:
return (nested_utils.map_nested(lambda x: x.squeeze(0), input),)
else:
return nested_utils.unbatch_nested(lambda x: torch.unbind(x), input,
batch_size)
def serialize_to_bytes(data: Any) -> bytes:
buffer = io.BytesIO()
torch.save(data, buffer)
return buffer.getvalue()
def parse_from_bytes(bytes: bytes) -> Any:
buffer = io.BytesIO(bytes)
return torch.load(buffer)
| 3,056 | 26.294643 | 76 | py |
rlmeta | rlmeta-main/rlmeta/utils/moolib_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
def generate_random_name() -> str:
return str(uuid.uuid4())
def expend_name_by_index(name: str, index: int) -> str:
return name + f"-{index}"
| 346 | 22.133333 | 65 | py |
rlmeta | rlmeta-main/rlmeta/utils/hydra_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import hydra
from omegaconf import DictConfig, OmegaConf
def config_to_json(cfg: OmegaConf) -> str:
return json.dumps(OmegaConf.to_container(cfg))
| 346 | 23.785714 | 65 | py |
rlmeta | rlmeta-main/rlmeta/utils/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 179 | 35 | 65 | py |
rlmeta | rlmeta-main/rlmeta/utils/nested_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from _rlmeta_extension.nested_utils import *
| 225 | 31.285714 | 65 | py |
rlmeta | rlmeta-main/rlmeta/utils/running_stats.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
class RunningRMS(nn.Module):
def __init__(self,
size: Union[int, Tuple[int]],
dtype: Optional[torch.dtype] = None) -> None:
super().__init__()
self._size = (size,) if isinstance(size, int) else size
self.register_buffer("_count", torch.zeros(1, dtype=torch.int64))
self.register_buffer("_mean_square", torch.zeros(self._size,
dtype=dtype))
def reset(self) -> None:
self._count.zero_()
self._mean_square.zero_()
def count(self) -> torch.Tensor:
return self._count
def mean_square(self) -> torch.Tensor:
return self._mean_square
def rms(self, eps: Optional[float] = None) -> torch.Tensor:
return self._mean_square.sqrt() if eps is None else ((
self._mean_square + eps).sqrt())
def rrms(self, eps: Optional[float] = None) -> torch.Tensor:
return self._mean_square.rsqrt() if eps is None else ((
self._mean_square + eps).rsqrt())
def update(self, x: torch.Tensor) -> None:
size = x.size()
if size == self._size:
self._count += 1
self._mean_square += (x.square() - self._mean_square) / self._count
else:
assert size[1:] == self._size
cnt = size[0]
self._count += cnt
c = 0.0 if self._count == 0 else cnt / self._count
delta = x.square().mean(dim=0) - self._mean_square
self._mean_square += c * delta
class RunningMoments(nn.Module):
def __init__(self,
size: Union[int, Tuple[int]],
dtype: Optional[torch.dtype] = None) -> None:
super().__init__()
self._size = (size,) if isinstance(size, int) else size
self.register_buffer("_m0", torch.zeros(1, dtype=torch.int64))
self.register_buffer("_m1", torch.zeros(self._size, dtype=dtype))
self.register_buffer("_m2", torch.zeros(self._size, dtype=dtype))
def reset(self) -> None:
self._m0.zero_()
self._m1.zero_()
self._m2.zero_()
def count(self) -> torch.Tensor:
return self._m0
def mean(self) -> torch.Tensor:
return self._m1
def var(self, ddof: int = 0) -> torch.Tensor:
return self._m2 / (self._m0 - ddof)
def std(self, ddof: int = 0, eps: Optional[float] = None) -> torch.Tensor:
return self.var(ddof).sqrt() if eps is None else (self.var(ddof) +
eps).sqrt()
def rstd(self, ddof: int = 0, eps: Optional[float] = None) -> torch.Tensor:
return self.var(ddof).rsqrt() if eps is None else (self.var(ddof) +
eps).rsqrt()
def update(self, x: torch.Tensor) -> None:
size = x.size()
if size == self._size:
self._m0 += 1
delta = x - self._m1
self._m1 += delta / self._m0
self._m2 += delta * (x - self._m1)
else:
assert size[1:] == self._size
m0 = size[0]
m2, m1 = torch.var_mean(x, dim=0, unbiased=False)
n = self._m0 + m0
c = 0.0 if n == 0 else m0 / n
delta = m1 - self._m1
self._m1 += c * delta
self._m2 += m0 * m2 + delta.square() * (c * self._m0)
self._m0 = n
class RunningTDError(nn.Module):
"""
Running TD Error estimation introduced by https://arxiv.org/abs/2105.05347
"""
def __init__(self,
size: Union[int, Tuple[int]],
dtype: Optional[torch.dtype] = None) -> None:
super().__init__()
self._running_gamma = RunningMoments(size, dtype)
self._running_r = RunningMoments(size, dtype)
self._running_g = RunningRMS(size, dtype)
def reset(self) -> None:
self._running_gamma.reset()
self._running_r.reset()
self._running_g.reset()
def var(self) -> torch.Tensor:
return (self._running_r.var() +
self._running_gamma.var() * self._running_g.mean_square())
def std(self) -> torch.Tensor:
return self.var().sqrt()
def update(self, gamma: torch.Tensor, r: torch.Tensor,
g: torch.Tensor) -> None:
self._running_gamma.update(gamma)
self._running_r.update(r)
self._running_g.update(g)
| 4,699 | 33.306569 | 79 | py |
rlmeta | rlmeta-main/rlmeta/utils/remote_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Union
from rlmeta.core.remote import Remotable, Remote
from rlmeta.core.server import Server
def make_remote(target: Remotable,
server: Server,
name: Optional[str] = None,
timeout: float = 60):
return Remote(target, server.name, server.addr, name, timeout)
| 521 | 29.705882 | 66 | py |
rlmeta | rlmeta-main/rlmeta/utils/stats_dict.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import json
import math
from typing import Dict, Optional
from tabulate import tabulate
class StatsItem:
def __init__(self,
key: Optional[str] = None,
val: Optional[float] = None) -> None:
self._key = key
self.reset()
if val is not None:
self.add(val)
@property
def key(self) -> str:
return self._key
def reset(self):
self._m0 = 0
self._m1 = 0.0
self._m2 = 0.0
self._min_val = float("inf")
self._max_val = float("-inf")
def add(self, v: float) -> None:
# Welford algorithm.
self._m0 += 1
delta = v - self._m1
self._m1 += delta / self._m0
self._m2 += delta * (v - self._m1)
self._min_val = min(self._min_val, v)
self._max_val = max(self._max_val, v)
def count(self) -> int:
return self._m0
def mean(self) -> float:
return self._m1
def var(self, ddof: int = 0) -> float:
return self._m2 / (self._m0 - ddof)
def std(self, ddof: int = 0) -> float:
return math.sqrt(self.var(ddof))
def min(self) -> float:
return self._min_val
def max(self) -> float:
return self._max_val
def dict(self) -> Dict[str, float]:
ret = {
"mean": self.mean(),
"std": self.std(),
"min": self.min(),
"max": self.max(),
"count": self.count(),
}
if self.key is not None:
ret["key"] = self.key
return ret
class StatsDict:
def __init__(self) -> None:
self._dict = {}
def __getitem__(self, key: str) -> StatsItem:
return self._dict[key]
def reset(self):
self._dict.clear()
def add(self, k: str, v: float) -> None:
if k in self._dict:
self._dict[k].add(v)
else:
self._dict[k] = StatsItem(k, v)
def extend(self, d: Dict[str, float]) -> None:
for k, v in d.items():
self.add(k, v)
def update(self, stats: StatsDict) -> None:
self._dict.update(stats._dict)
def dict(self) -> Dict[str, float]:
return {k: v.dict() for k, v in self._dict.items()}
def json(self, info: Optional[str] = None, **kwargs) -> str:
data = self.dict()
if info is not None:
data["info"] = info
data.update(kwargs)
return json.dumps(data)
def table(self, info: Optional[str] = None, **kwargs) -> str:
if info is None:
head = ["key", "mean", "std", "min", "max", "count"]
else:
head = ["info", "key", "mean", "std", "min", "max", "count"]
data = []
for k, v in self._dict.items():
if info is None:
row = [k, v.mean(), v.std(), v.min(), v.max(), v.count()]
else:
row = [info, k, v.mean(), v.std(), v.min(), v.max(), v.count()]
data.append(row)
for k, v in kwargs.items():
if info is None:
row = [k, v, 0.0, v, v, 1]
else:
row = [info, k, v, 0.0, v, v, 1]
data.append(row)
return tabulate(data,
head,
numalign="right",
stralign="right",
floatfmt=".8f")
| 3,586 | 24.992754 | 79 | py |
rlmeta | rlmeta-main/rlmeta/data/segment_tree.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Union
import numpy as np
import torch
from _rlmeta_extension import SumSegmentTreeFp32, SumSegmentTreeFp64
from rlmeta.core.types import Tensor
SumSegmentTreeImpl = Union[SumSegmentTreeFp32, SumSegmentTreeFp64]
Index = Union[int, np.ndarray, torch.Tensor]
Value = Union[float, np.ndarray, torch.Tensor]
class SumSegmentTree:
def __init__(self, size: int, dtype: np.dtype = np.float64) -> None:
self._dtype = dtype
if dtype == np.float32:
self._impl = SumSegmentTreeFp32(size)
elif dtype == np.float64:
self._impl = SumSegmentTreeFp64(size)
else:
assert False, "Unsupported data type " + str(dtype)
@property
def dtype(self) -> np.dtype:
return self._dtype
@property
def size(self) -> int:
return self._impl.size
@property
def capacity(self) -> int:
return self._impl.capacity
def __len__(self) -> int:
return len(self._impl)
def __getitem__(self, index: Index) -> Value:
return self._impl[index]
def at(self, index: Index) -> Value:
return self._impl.at(index)
def __setitem__(self, index: Index, value: Value) -> None:
self._impl[index] = value
def update(self,
index: Index,
value: Value,
mask: Optional[Tensor] = None) -> None:
if mask is None:
self._impl.update(index, value)
else:
self._impl.update(index, value, mask)
def query(self, l: Index, r: Index) -> Value:
return self._impl.query(l, r)
def scan_lower_bound(self, value: Value) -> Index:
return self._impl.scan_lower_bound(value)
| 1,899 | 26.941176 | 72 | py |
rlmeta | rlmeta-main/rlmeta/data/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from rlmeta.data.segment_tree import SumSegmentTree
__all__ = [
"SumSegmentTree",
]
| 269 | 23.545455 | 65 | py |
rlmeta | rlmeta-main/rlmeta/ops/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from _rlmeta_extension.ops import *
| 216 | 30 | 65 | py |
LDEAlgsComparison | LDEAlgsComparison-master/scripts/generate.py | import subprocess
import sys
res = subprocess.Popen("python3 stressTest.py %s" % ' '.join(sys.argv[1:]), shell=True)
if res.wait() != 0:
print("Error") | 155 | 21.285714 | 87 | py |
LDEAlgsComparison | LDEAlgsComparison-master/scripts/stressTest.py | import time
import os
import subprocess
import sys
from random import randint, seed
exes = ["./ldegraphmain", "./slopesV7i"]
def checkSkip(n, m, mxVal):
if len(sys.argv) > 1:
mx1, mx2, n1, m1, n2, m2 = map(int, sys.argv[1:])
if mxVal < mx1 or mxVal > mx2:
return True
if n < n1 or n > n2:
return True
if m < m1 or m > m2:
return True
if mxVal == 1021:
if n == 1 and m > 5: return True
if n == 2 and m > 4: return True
if n > 2: return True
if mxVal == 503:
if n == 1 and m > 7: return True
if n == 2 and m > 5: return True
if n == 3 and m > 3: return True
if n == 4: return True
if mxVal == 107:
if n == 1 and m == 9: return True
if n == 2 and m > 6: return True
if n == 3 and m > 5: return True
if n == 4 and m == 5: return True
return False
def genTest(n, m, mxVal):
return ' '.join(map(str, sorted(randint(1, mxVal) for _ in range(n))[::-1])) + ' 0 ' + ' '.join(map(str, sorted([mxVal] + [randint(1, mxVal) for _ in range(m - 1)]))) + ' 0'
nms = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9)]
nms += [(2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8)]
nms += [(3, 3), (3, 4), (3, 5), (3, 6), (4, 4), (4, 5)]
mxVals = [2, 3, 5, 13, 29, 39, 107, 503, 1021]
seed(216)
header = """
\\begin{figure}
\\begin{center}
\\begin{tabular}{c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c@{\hspace{0.5cm}}c}
\hline
& A & 2 & 3 & 5 & 13 & 29 & 39 & 107 & 503 & 1021 \\\\
N & M \\\\
\hline\n"""
finNoEps = open("tableNoEps.tex", "w")
finWithEps = open("tableWithEps.tex", "w")
finTimeouts = open("tableTimeouts.tex", "w")
finSumTime = open("tableSumTime.tex", "w")
for fin in [finNoEps, finWithEps, finTimeouts, finSumTime]:
fin.write(header)
totalClasses, totalTime = 0, 0
graphWinNoEps, slopesWinNoEps = 0, 0
graphWinWithEps, slopesWinWithEps = 0, 0
graphTotalTimeouts, slopesTotalTimeouts = 0, 0
graphTotalSumTime, slopesTotalSumTime = 0, 0
for n, m in nms:
if n == m:
for fin in [finNoEps, finWithEps, finTimeouts, finSumTime]:
fin.write("\\hline\n")
for fin in [finNoEps, finWithEps, finTimeouts, finSumTime]:
fin.write("%d & %d" % (n, m))
for mxVal in mxVals:
if checkSkip(n, m, mxVal):
for fin in [finNoEps, finWithEps, finTimeouts, finSumTime]:
fin.write(" & ")
continue
graphNoEps, slopesNoEps = 0, 0
graphWithEps, slopesWithEps = 0, 0
graphTimeouts, slopesTimeouts = 0, 0
graphSumTime, slopesSumTime = 0, 0
for __ in range(10):
test = genTest(n, m, mxVal)
rt, sols = [], []
for exe in exes:
times = []
for __ in range(5):
cmd = "echo %s | %s 1>temp.txt" % (test, exe)
start = time.perf_counter()
try:
output = subprocess.check_output(cmd, timeout=600, shell=True)
except:
open("temp.txt", "w").write("-1")
end = time.perf_counter()
times.append(end - start)
sols.append(int(open('temp.txt', 'r').readline()))
sys.stderr.write("%s %s %d %.4lf\n" % (test, exe, sols[-1], times[-1]))
sys.stderr.flush()
if times[-1] > 15 or sols[-1] == -1:
if 'graph' in exe and sols[-1] == -1:
graphTimeouts += 1
if 'slopes' in exe and sols[-1] == -1:
slopesTimeouts += 1
break
if times[-1] > 15:
rt.append(sum(times) / len(times))
else:
times.sort()
rt.append(sum(times[1:-1]) / 3)
if rt[0] < rt[1]:
graphNoEps += 1
elif rt[1] < rt[0]:
slopesNoEps += 1
if abs(rt[0] - rt[1]) < 1e-2:
graphWithEps += 0.5
slopesWithEps += 0.5
elif rt[0] < rt[1]:
graphWithEps += 1
else:
slopesWithEps += 1
graphSumTime += rt[0]
slopesSumTime += rt[1]
totalClasses += 1
totalTime += graphSumTime + slopesSumTime
if graphNoEps > 7:
graphWinNoEps += 1
elif slopesNoEps > 7:
slopesWinNoEps += 1
if int(graphWithEps) > 7:
graphWinWithEps += 1
elif int(slopesWithEps) > 7:
slopesWinWithEps += 1
graphTotalTimeouts += graphTimeouts
slopesTotalTimeouts += slopesTimeouts
graphTotalSumTime += graphSumTime
slopesTotalSumTime += slopesSumTime
finNoEps.write(" & %d:%d " % (graphNoEps, slopesNoEps))
finWithEps.write(" & %d:%d " % (int(graphWithEps), int(slopesWithEps)))
finTimeouts.write(" & %d:%d " % (graphTimeouts, slopesTimeouts))
finSumTime.write(" & %.1lf:%.1lf " % (graphSumTime, slopesSumTime))
for fin in [finNoEps, finWithEps, finTimeouts, finSumTime]:
fin.write('\\\\\n')
header = """
\hline
\end{tabular}
\caption{\label{%s}%s}
\end{center}
\end{figure}\n"""
finNoEps.write(header % ("fig1", "Comparison of Graph and Slopes algorithms. %d %d %d" % (graphWinNoEps, slopesWinNoEps, totalClasses)))
finWithEps.write(header % ("fig2", "Comparison of Graph and Slopes algorithms using an Epsilong of 0.01. %d %d %d" % (graphWinWithEps, slopesWinWithEps, totalClasses)))
finTimeouts.write(header % ("fig3", "Number of timeouts (10 minutes) for Graph and Slopes algorithms. %d %d" % (graphTotalTimeouts, slopesTotalTimeouts)))
finSumTime.write(header % ("fig4", "The total time spent for each algorithm (Graph and Slopes) on each test class. %.02lf %.02lf" % (graphTotalSumTime, slopesTotalSumTime)))
| 5,592 | 34.624204 | 199 | py |
LDEAlgsComparison | LDEAlgsComparison-master/scripts/doComparison.py | from os import system
import sys
system("gcc -static slopesV7i.c -std=c11 -O3 -o slopesV7i")
system("g++ -static -lm -s -x c++ -std=c++17 -O3 -o ldegraphmain ldegraphmain.cpp ../src/ldegraphalg.cpp ../src/ldealg.cpp")
system("python3 generate.py %s" % ' '.join(sys.argv[1:])) | 276 | 45.166667 | 124 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_ce_lshtc1.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20220510 # gonna use this integer to sample random seeds for different functions
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Neural neworks baseline on LSHTC1")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
print("num of classes: ", num_classes)
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, num_classes):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, num_classes),
)
def forward(self, x):
return self.model(x)
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=2048, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 10
loss_f = torch.nn.CrossEntropyLoss(reduction='sum')
model = Net(num_features, 4096, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-2, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,797 | 40.282609 | 170 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_dmoz.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on Dmoz")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, embed_dim),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=256, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
def run_exp(embed_dim):
label_embed = (rng.integers(low=0, high=2, size=(num_classes, embed_dim)) * 2 - 1) / math.sqrt(embed_dim)
epsilon = np.max(np.abs(label_embed @ label_embed.T - np.eye(num_classes)))
print("epsilon = ", epsilon)
label_embed = torch.tensor(np.float32(label_embed)).to(device)
epochs = 5
loss_f = torch.nn.MSELoss(reduction='sum')
model = Net(num_features=num_features, hidden_dim=2500, embed_dim=embed_dim).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-5)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2, 4], gamma=0.1)
epoch_time_hist = []
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
for epoch in range(1, epochs+1):
start = time()
train_le(model, label_embed, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_le(model, label_embed, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
prediction_time = time() - prediction_start
return val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time
val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time = run_exp(int(sys.argv[1]))
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 4,139 | 42.125 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_odp.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on Dmoz")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(hidden_dim),
torch.nn.Linear(hidden_dim, 16*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(16*hidden_dim),
torch.nn.Linear(16*hidden_dim, 8*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(8*hidden_dim),
torch.nn.Linear(8*hidden_dim, embed_dim),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=2048, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
def run_exp(embed_dim):
label_embed = (rng.integers(low=0, high=2, size=(num_classes, embed_dim)) * 2 - 1) / math.sqrt(embed_dim)
label_embed = torch.tensor(np.float32(label_embed)).to(device)
epochs = 20
loss_f = torch.nn.MSELoss(reduction='sum')
model = Net(num_features=num_features, hidden_dim=2048, embed_dim=embed_dim).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-5)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, ], gamma=0.1)
epoch_time_hist = []
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
for epoch in range(1, epochs+1):
start = time()
train_le(model, label_embed, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_le(model, label_embed, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
prediction_time = time() - prediction_start
return val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time
val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time = run_exp(int(sys.argv[1]))
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 4,345 | 42.029703 | 170 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_lshtc1.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on LSHTC1")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, embed_dim),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=128, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
def run_exp(embed_dim):
label_embed = (rng.integers(low=0, high=2, size=(num_classes, embed_dim)) * 2 - 1) / math.sqrt(embed_dim)
epsilon = np.max(np.abs(label_embed @ label_embed.T - np.eye(num_classes)))
print("epsilon = ", epsilon)
label_embed = torch.tensor(np.float32(label_embed)).to(device)
epochs = 5
loss_f = torch.nn.MSELoss(reduction='sum')
model = Net(num_features=num_features, hidden_dim=4096, embed_dim=embed_dim).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-5)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2 ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_le(model, label_embed, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_le(model, label_embed, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_le(model, label_embed, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
prediction_time = time() - prediction_start
return val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time
val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time = run_exp(int(sys.argv[1]))
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 4,522 | 44.23 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_sq_dmoz.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20220510 # gonna use this integer to sample random seeds for different functions
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Neural neworks baseline on Dmoz")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
print("num of classes: ", num_classes)
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, num_classes):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, num_classes),
)
def forward(self, x):
return self.model(x)
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=256, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 5
loss_f = SquaredLoss()
model = Net(num_features, 2500, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,764 | 39.923913 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_ce_dmoz.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20220510 # gonna use this integer to sample random seeds for different functions
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Neural neworks baseline on Dmoz")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
print("num of classes: ", num_classes)
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, num_classes):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, num_classes),
)
def forward(self, x):
return self.model(x)
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=256, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 5
loss_f = torch.nn.CrossEntropyLoss(reduction='sum')
model = Net(num_features, 2500, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-2, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,793 | 40.23913 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_sq_odp.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on odp")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(hidden_dim),
torch.nn.Linear(hidden_dim, 16*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(16*hidden_dim),
torch.nn.Linear(16*hidden_dim, 4*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(4*hidden_dim),
torch.nn.Linear(4*hidden_dim, embed_dim),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=2048, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 20
loss_f = SquaredLoss()
model = Net(num_features, 2048, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. sq_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. sq_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. sq_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. sq_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 4,026 | 40.091837 | 170 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_sq_lshtc1.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20220510 # gonna use this integer to sample random seeds for different functions
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Neural neworks baseline on LSHTC1")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
print("num of classes: ", num_classes)
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, num_classes):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, num_classes),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=128, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 5
loss_f = SquaredLoss()
model = Net(num_features, 4096, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,849 | 39.957447 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_ce_odp.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on odp")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(hidden_dim),
torch.nn.Linear(hidden_dim, 16*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(16*hidden_dim),
torch.nn.Linear(16*hidden_dim, 4*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(4*hidden_dim),
torch.nn.Linear(4*hidden_dim, embed_dim),
)
def forward(self, x):
return self.model(x)
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=2048, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 20
loss_f = torch.nn.CrossEntropyLoss(reduction='sum')
model = Net(num_features, 2048, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-2, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. ce_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. ce_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. ce_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. ce_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,972 | 40.385417 | 170 | py |
JOLLE | JOLLE-main/label_embedding_python/nn_utils.py | import torch
from sklearn.metrics import pairwise_distances
import numpy as np
class sparse_dataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
self.n_features = x.shape[1]
def __len__(self):
return self.x.shape[0]
def __getitem__(self, i):
return self.x.indices[self.x.indptr[i]:self.x.indptr[i+1]], self.x.data[self.x.indptr[i]:self.x.indptr[i+1]], self.y[i], self.n_features
def sparse_collate_coo(batch):
r = []
c = []
vals = []
y = []
n_features = batch[0][-1]
for i, (indices, data, yi, _) in enumerate(batch):
r.extend([i] * indices.shape[0])
c.extend(indices)
vals.extend(data)
y.append(yi)
return ([r, c], vals, (len(batch), n_features)), y
class SquaredLoss(torch.nn.Module):
def __init__(self):
super(SquaredLoss, self).__init__()
def forward(self, outputs, targets):
one_hot_approx = torch.zeros_like(outputs)
one_hot_approx.scatter_(1, targets.unsqueeze(1), 1)
return torch.sum((outputs - one_hot_approx) ** 2)
def train_le(model, label_embed, loss_f, device, train_loader, optimizer, epoch, log_interval=50):
model.train()
for idx, ((locs, vals, size), y) in enumerate(train_loader):
x = torch.sparse_coo_tensor(locs, vals, size=size, dtype=torch.float32, device=device)
y_embed = torch.index_select(label_embed, 0, torch.tensor(y, dtype=torch.int32).to(device))
optimizer.zero_grad()
embed_out = model(x)
loss = loss_f(embed_out, y_embed) / len(y)
loss.backward()
optimizer.step()
if (idx + 1) % log_interval == 0:
print("train epoch: {}, batch: {}/{}, loss: {:.6f}".format(epoch, idx+1, len(train_loader), loss.item()))
def find1NN_cuda(out_cuda, label_embed_cuda):
#dist_m = torch.cdist(out_cuda.reshape(1, out_cuda.shape[0], -1), label_embed_cuda.reshape(1, label_embed_cuda.shape[0], -1))
#dist_m = dist_m.reshape(dist_m.shape[1], -1)
#oneNNs = torch.argmin(dist_m, dim=1)
gram_m = torch.matmul(out_cuda, torch.transpose(label_embed_cuda, 0, 1))
return torch.argmax(gram_m, dim=1)
def test_le(model, label_embed, loss_f, device, test_loader):
model.eval()
mean_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for idx, ((locs, vals, size), y) in enumerate(test_loader):
x = torch.sparse_coo_tensor(locs, vals, size=size, dtype=torch.float32, device=device)
y_embed = torch.index_select(label_embed, 0, torch.tensor(y, dtype=torch.int32).to(device))
embed_out = model(x)
mean_loss += loss_f(embed_out, y_embed).item()
embed_out_detached = embed_out.detach()
preds = find1NN_cuda(embed_out_detached, label_embed).cpu().numpy()
correct += np.sum(preds==y)
total += preds.shape[0]
del x, y_embed, embed_out
return mean_loss / len(test_loader.dataset), correct/total
def train_ce(model, loss_f, device, train_loader, optimizer, epoch, log_interval=50):
model.train()
for idx, ((locs, vals, size), y) in enumerate(train_loader):
x = torch.sparse_coo_tensor(locs, vals, size=size, dtype=torch.float32, device=device)
optimizer.zero_grad()
out = model(x)
loss = loss_f(out, torch.tensor(y, dtype=torch.int64).to(device)) / len(y)
loss.backward()
optimizer.step()
if (idx + 1) % 10 == 0:
print("train epoch: {}, batch: {}/{}, loss: {:.6f}".format(epoch, idx+1, len(train_loader), loss.item()))
def test_ce(model, loss_f, device, test_loader):
model.eval()
mean_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for idx, ((locs, vals, size), y) in enumerate(test_loader):
x = torch.sparse_coo_tensor(locs, vals, size=size, dtype=torch.float32, device=device)
out = model(x)
mean_loss += loss_f(out, torch.tensor(y, dtype=torch.int64).to(device)).item()
preds = out.detach().cpu().argmax(dim=1, keepdim=False).numpy()
correct += np.sum(preds==np.array(y))
total += preds.shape[0]
return mean_loss / len(test_loader.dataset), correct/total | 4,296 | 41.127451 | 145 | py |
graphlaxy | graphlaxy-master/GraphlaxyDataGen.py | #!/usr/bin/env python
import argparse
import sys
class Graphlaxy(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='Tool used to create synthetic graph datasets using \'Nash Bargin Scheme\' optimization.',
usage='''gdg <command> [<args>]
The available commands are:
optimization Create a baseline dataset and optimize the parameters.
generate Using the fitted parameters generate a synthetic graph dataset.
plots Generate plots showing different characteristics of the baseline, sampled, and final datasets.
statistics Print some basic statistics of target dataset
''')
parser.add_argument('command', help='Subcommand to run')
commands = {
"optimization":self.optimization,
"generate":self.generate,
"plots": self.plot,
"statistics": self.statistics
}
args = parser.parse_args(sys.argv[1:2])
if not args.command in commands:
print('Unrecognized command')
parser.print_help()
exit(1)
commands[args.command]()
def optimization(self):
parser = argparse.ArgumentParser(description = "Steps for the optimization.",
usage='''gdg optimiation <subcommand> [<args>]
The available subcommands are:
baseline Only creates the baseline dataset
metrics Calculate the metrics of a dataset
optimize Use sampling and the Powell method with cooperative bargaining to optimize the input RMat parameters
*************************************
To run the full optimization in steps:
First, create the baseline dataset, then take the metrics and finally optimize the parameters.''')
parser.add_argument('subcommand', help='Subcommand to run')
commands = {
"baseline":self.baseline,
"metrics":self.metrics,
"optimize": self.optimize
}
args = parser.parse_args(sys.argv[2:3])
if not args.subcommand in commands:
print('Unrecognized command')
parser.print_help()
exit(1)
commands[args.subcommand]()
def generate(self):
parser = argparse.ArgumentParser(description = "Using the fitted parameters generate a synthetic graph dataset.")
parser.add_argument('-f', "--folder", metavar = "str", type = str,
help = "Folder where to generate the dataset.", default= "../result_dataset")
parser.add_argument('-s', "--dataset-size", metavar = "int", type = int,
help = "The size of the dataset to generate.", default= 5000)
parser.add_argument('-e', "--edges-between", nargs = 2, metavar = "int", type = int,
help = "The min and max vallue the edges argument can take.", default= (100000, 2000000))
parser.add_argument('-m', '--multiprocess', action="store_true", help = "Add to take advantage of multiple cores.")
parser.add_argument('-w', "--custom-weights", nargs = 8, metavar = "float", type = float,
help = "List of waights for the beta distributions.",
default= [1.3500523980958758,0.9756729865636893,1.4562248430720026,0.22767153268062393,1.055699069458428,0.9060404341929743,0.35052426603213255,1.157122011830607])
parser.add_argument('-F', '--from-file', action="store_true",
help = "Use a stored set of waights. Use with --parameters-file and --name parameters to indicate where to get the waights from. By seting this parameters the paramete --custom-weights gets disabled.")
parser.add_argument('-p', "--parameters-file", metavar = "str", type = str,
help = "File where the parameters are", default= "../baseline_dataset/optimized_parameters.csv")
parser.add_argument('-n', "--name", metavar = "str", type = str,
help = "An id for the parameters.", default= "result")
args = parser.parse_args(sys.argv[2:])
from processes.result_dataset import generate_result_dataset
generate_result_dataset(args.from_file, args.custom_weights, args.parameters_file, args.name, args.folder, args.dataset_size, args.edges_between, args.multiprocess)
def statistics(self):
parser = argparse.ArgumentParser(description = "Calculate some statistics over a dataset.")
parser.add_argument('-f', "--folder", metavar = "str", type = str,
help = "Folder where the dataset to analize was generated.", default= "data/validation_dataset")
parser.add_argument('-s', "--sample-size", metavar = "int", type = int,
help = "The size of the sample.", default= 1000)
args = parser.parse_args(sys.argv[2:])
from processes.statistics import statistics
statistics(args.folder, args.sample_size)
def plot(self):
parser = argparse.ArgumentParser(description = "Some plots to analyze the results.")
parser.add_argument('-f', "--folder", metavar = "str", type = str,
help = "Folder where the dataset to analize was generated.", default= "../baseline_dataset")
parser.add_argument('-v', "--validation-metrics", metavar = "str", type = str,
help = "File where the validation metrics are.", default= "data/validation_dataset/dataset_metrics.csv")
parser.add_argument('-F', "--format", metavar = "str", type = str,
help = "Format to save generated images in.", default= "svg")
parser.add_argument('-o', "--output-folder", metavar = "str", type = str,
help = "Folder where to save plots.", default= "../plots")
parser.add_argument('-s', "--sample-size", metavar = "int", type = int,
help = "The size of the sample.", default= 1000)
parser.add_argument('-sh', '--show-plots', action="store_true", help = "Show plots instead of saving them.")
choices = ["fitness_evolution", "clustering_density", "dlog_density", "density_param", "validation", "param_dlog", "param_clustering", "sample_param", "sample_paramdist", "sample_grid"]
default = ["sample_param", "sample_paramdist", "sample_grid"]
parser.add_argument('-p', "--plot-selection", nargs = '+', metavar = "str", type = str,
help = "Selects the plots to make. Posible values: {}".format(choices), default= default,
choices= choices)
parser.add_argument('-w', "--custom-weights", nargs = 8, metavar = "float", type = float,
help = "List of waights for the beta distributions.",
default= ((1,1,1,1,1,1,1,1)))
choices = ["custom", "initial"]
parser.add_argument('-ws', "--weight-source", metavar = "str", type = str,
help = "Where to get the waights used for the plot from. Posible values: {}".format(choices), default= "custom",
choices= choices)
parser.add_argument('-n', "--name", metavar = "str", type = str,
help = "Name of the params to use for the fitness_evolution.", default= "result")
args = parser.parse_args(sys.argv[2:])
from processes.plot import plot
plot(args.folder, args.validation_metrics, args.sample_size, args.show_plots, args.format, args.output_folder, args.plot_selection, args.custom_weights, args.weight_source, args.name)
def baseline(self):
parser = argparse.ArgumentParser(description = "Creates the baseline dataset.")
parser.add_argument('-f', "--folder", metavar = "str", type = str,
help = "Folder where to generate the baseline dataset.", default= "../baseline_dataset")
parser.add_argument('-s', "--dataset-size", metavar = "int", type = int,
help = "The size of the baseline dataset.", default= 10000)
parser.add_argument('-e', "--edges-between", nargs = 2, metavar = "int", type = int,
help = "The min and max vallue the edges argument can take.", default= (100000, 2000000))
parser.add_argument('-m', '--multiprocess', action="store_true", help = "Add to take advantage of multiple cores.")
args = parser.parse_args(sys.argv[3:])
from processes.baseline_dataset import generate_baseline
generate_baseline(args.folder, args.dataset_size, args.edges_between, args.multiprocess)
def metrics(self):
parser = argparse.ArgumentParser(description = "Calculate metrics of each graph in a dataset.")
parser.add_argument('-f', "--folder", metavar = "str", type = str,
help = "Folder where the dataset is.", default= "../baseline_dataset")
parser.add_argument('-t', "--clustering-trials", metavar = "int", type = int,
help = "Number of trials used to approximate the clustering cooeficient.", default=1000)
parser.add_argument('-m', '--multiprocess', action="store_true", help = "Add to take advantage of multiple cores.")
args = parser.parse_args(sys.argv[3:])
from processes.metrics import calculate_metrics
calculate_metrics(args.folder, args.clustering_trials, args.multiprocess)
def optimize(self):
parser = argparse.ArgumentParser(description = "Calculate metrics of each graph in a dataset.")
parser.add_argument('-n', "--name", metavar = "str", type = str,
help = "An id for the result.", default= "result")
parser.add_argument('-f', "--folder", metavar = "str", type = str,
help = "Folder where the dataset is.", default= "../baseline_dataset")
parser.add_argument('-g', "--grid-size", metavar = "int", type = int,
help = "The number of rows and columns the grid has.", default=15)
parser.add_argument('-w', "--custom-weights", nargs = 8, metavar = "float", type = float,
help = "Initial weights for optimization.",
default= [1.3500523980958758,0.9756729865636893,1.4562248430720026,0.22767153268062393,1.055699069458428,0.9060404341929743,0.35052426603213255,1.157122011830607])
args = parser.parse_args(sys.argv[3:])
from processes.optimization import optimize
optimize(args.name, args.folder, args.grid_size, args.custom_weights)
if __name__ == "__main__":
Graphlaxy()
| 10,767 | 55.376963 | 213 | py |
graphlaxy | graphlaxy-master/processes/optimization.py | from pathlib import Path
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from utils.filesystem import add_to_csv
from .bargin import grid_bargin, gen_metric_grid, gen_param_grid
def store_params(dataset_folder, name, params, i = None):
if i is not None:
name = "{}_{}".format(name,i)
print("{}: {}".format(name, params))
alfa_a, beta_a, alfa_b, beta_b, alfa_c, beta_c, alfa_N, beta_N = params
add_to_csv(Path(dataset_folder, "optimized_parameters.csv"),{
'name': name, 'iteration': i,
'alfa_a': alfa_a, 'beta_a': beta_a,
'alfa_b': alfa_b, 'beta_b': beta_b,
'alfa_c': alfa_c, 'beta_c': beta_c,
'alfa_N': alfa_N, 'beta_N': beta_N,
})
def optimize(
name = 'result',
dataset_folder = "../baseline_dataset",
grid_size = 10,
custom_weights = [1] * 8):
df_m = pd.read_csv(Path(dataset_folder, "dataset_metrics.csv"))
df_d = pd.read_csv(Path(dataset_folder, "dataset_description.csv"))
df = pd.merge(df_m, df_d, on="name")
df[df["density_log"] < -1]
m = grid_size
M = m * m
gen_metric_grid(df, ["clustering", "density_log"], m)
gen_param_grid(df)
i = 1
def callback(x):
nonlocal i
store_params(dataset_folder, name, x, i)
i += 1
store_params(dataset_folder, name, custom_weights, 0)
res = minimize(grid_bargin(df, M), custom_weights, bounds=[(1e-32,100)] * 8,
tol = 1e-3, callback = callback)
print(res)
store_params(dataset_folder, name, res["x"]) | 1,536 | 28.557692 | 80 | py |
graphlaxy | graphlaxy-master/processes/baseline_dataset.py | import random
import numpy as np
from pathlib import Path
from utils.rmat import rmat_to_file
def generate_baseline(
dataset_folder = "../baseline_dataset",
dataset_size = 10000,
edges_between = (1000,1000000),
multiprocess = False):
Path(dataset_folder,'graphs').mkdir(parents=True, exist_ok=True)
parameters = []
for i in range(0,dataset_size):
E = random.randint(edges_between[0],edges_between[1])
n_0 = np.floor(np.sqrt(E * 20))
N = int(np.floor(random.uniform(n_0, E)))
a = random.uniform(0.25, 1)
b = random.uniform(max(0,1-3*a), min(a, 1-a))
c = random.uniform(max(0,1-2*a-b), min(a, 1-a-b))
d = 1-a-b-c
params = {
"i": i, "N": N, "E": E,
"a": a, "b": b, "c": c, "d": d
}
print("Queue params: ", params)
parameters.append(params)
if multiprocess:
from pebble import ProcessPool
from utils.multiprocess import pebble_timeout_callback
with ProcessPool() as pool:
for param in parameters:
future = pool.schedule(rmat_to_file,
args=(param['N'],param['E'],param['a'],param['b'],param['c'],param['d'],dataset_folder, param['i']),
timeout=600)
future.add_done_callback(pebble_timeout_callback)
else:
for param in parameters:
rmat_to_file(param['N'],param['E'],param['a'],param['b'],param['c'],param['d'],dataset_folder, param['i']) | 1,520 | 31.361702 | 121 | py |
graphlaxy | graphlaxy-master/processes/result_dataset.py | import random
import numpy as np
from pathlib import Path
import pandas as pd
from utils.rmat import rmat_to_file
from utils.probability import beta_rvs_shifted, beta_rvs_discrete_shifted
def generate_result_dataset(
from_file = True,
custom_weights = [1] *8,
param_file = "../baseline_dataset/parameters.csv",
name = "result",
dataset_folder = '../resulting_dataset',
dataset_size = 10000,
edges_between = (1000,1000000),
multiprocess = False):
if from_file:
df = pd.read_csv(param_file)
params = df[df["name"] == name].iloc[-1][[
"alfa_a", "beta_a", "alfa_b", "beta_b", "alfa_c", "beta_c", "alfa_N", "beta_N"
]]
else:
params = custom_weights
print(params)
alfa_a, beta_a, alfa_b, beta_b, alfa_c, beta_c, alfa_N, beta_N = params
Path(dataset_folder,'graphs').mkdir(parents=True, exist_ok=True)
parameters = []
for i in range(0,dataset_size):
E = random.randint(edges_between[0], edges_between[1])
n_0 = np.floor(np.sqrt(E * 20))
N = beta_rvs_discrete_shifted(alfa_N, beta_N, n_0, E)
a = beta_rvs_shifted(alfa_a, beta_a, 1/4, 1)
b = beta_rvs_shifted(alfa_b, beta_b, max(0,1-3*a), min(a, 1-a))
c = beta_rvs_shifted(alfa_c, beta_c, max(0,1-2*a-b), min(a, 1-a-b))
d = 1 - a - b - c
params = {
"i": i, "N": N, "E": E,
"a": a, "b": b, "c": c, "d": d
}
print("Queue params: ", params)
parameters.append(params)
if multiprocess:
from pebble import ProcessPool
from utils.multiprocess import pebble_timeout_callback
with ProcessPool() as pool:
for param in parameters:
future = pool.schedule(rmat_to_file,
args=(param['N'],param['E'],param['a'],param['b'],param['c'],param['d'],dataset_folder, param['i']),
timeout=300)
future.add_done_callback(pebble_timeout_callback)
else:
for param in parameters:
rmat_to_file(param['N'],param['E'],param['a'],param['b'],param['c'],param['d'],dataset_folder, param['i']) | 2,200 | 30.898551 | 121 | py |
graphlaxy | graphlaxy-master/processes/bargin.py | import numpy as np
import pandas as pd
from utils.probability import beta_cdf_interval, beta_cdf_mean, beta_cdf_mean_2d
def get_grid(m=10,
limits = [(0,1),(-6,-1)]):
block0 = np.linspace(limits[0][0], limits[0][1], m + 1)
block1 = np.linspace(limits[1][0], limits[1][1], m + 1)
return [block0, block1]
def gen_metric_grid(df, metrics, m):
if len(metrics) != 2:
raise NotImplementedError("Only implemented 2D Grid. Please send only one pair of metrics.")
blocks = get_grid(m)
df["metric_bucket_1"] = pd.cut(df[metrics[0]], blocks[0], labels=list(range(m)), include_lowest =True)
df["metric_bucket_2"] = pd.cut(df[metrics[1]], blocks[1], labels=list(range(m)), include_lowest =True)
def interval_b(a):
return (max(0,1-3*a), min(a, 1-a))
def interval_c(a,b):
return (max(0,1-2*a-b), min(a, 1-a-b))
def interval_c_mean(a, b):
a_maen = (a.right + a.left) /2
b_mean = (b.right + b.left) /2
return interval_c(a_maen, b_mean)
def interval_c_leftleft(a,b):
return interval_c(a.left, b.left)
def interval_c_leftright(a,b):
return interval_c(a.left, b.right)
def interval_c_rightleft(a,b):
return interval_c(a.right, b.left)
def interval_c_rightright(a,b):
return interval_c(a.right, b.right)
def interval_b_mean(a):
a_maen = (a.right + a.left) /2
return interval_b(a_maen)
def interval_b_left(a):
return interval_b(a.left)
def interval_b_right(a):
return interval_b(a.right)
def gen_param_grid(df):
precision = 0.05
intervals = np.arange(0,1.001,precision)
df["NE"] = (df["N"] - np.floor(np.sqrt(df["E"] * 20))) / df["E"]
df["a_bucket"] = pd.cut(df["a"], intervals, include_lowest =True)
df["b_bucket"] = pd.cut(df["b"], intervals, include_lowest =True)
df["c_bucket"] = pd.cut(df["c"], intervals, include_lowest =True)
df["NE_bucket"] = pd.cut(df["NE"], intervals, include_lowest =True)
df["param_bucket_count"] = df.groupby(['a_bucket', 'b_bucket', 'c_bucket', 'NE_bucket'])[['a_bucket']].transform('count')
def gen_weights(df, res):
alfa_a, beta_a, alfa_b, beta_b,alfa_c, beta_c, alfa_N, beta_N = res
weights = df.apply(lambda row: (beta_cdf_interval(row['a_bucket'],alfa_a, beta_a,(1/4, 1)) *
beta_cdf_mean(row['b_bucket'],alfa_b, beta_b, interval_b_left(row['a_bucket']), interval_b_mean(row['a_bucket']), interval_b_right(row['a_bucket'])) *
beta_cdf_mean_2d(row['c_bucket'],alfa_c, beta_c, interval_c_mean(row['a_bucket'], row['b_bucket']),
interval_c_leftleft(row['a_bucket'], row['b_bucket']), interval_c_leftright(row['a_bucket'], row['b_bucket']),
interval_c_rightleft(row['a_bucket'], row['b_bucket']), interval_c_rightright(row['a_bucket'], row['b_bucket'])) *
beta_cdf_interval(row['NE_bucket'],alfa_N, beta_N, (0, 1))) / row["param_bucket_count"],
axis=1)
weights[weights < 0] = 0
df["weight"] = weights
def grid_bargin(df, M):
def _grid_bargin(params):
if any(x <= 0 for x in params):
return 1
gen_weights(df, params)
total = df["weight"].sum()
print(total)
buckets = df[(df["metric_bucket_1"] != np.NaN) & (df["metric_bucket_2"] != np.NaN)].groupby(["metric_bucket_1", "metric_bucket_2"])
bucket_prob = buckets["weight"].sum() / total
bargin = - sum(np.log2( 1 + (M-1) * bucket_prob)) / M
return bargin
return _grid_bargin
| 3,465 | 32.326923 | 156 | py |
graphlaxy | graphlaxy-master/processes/statistics.py | import pandas as pd
from pathlib import Path
def statistics(
dataset_folder = "../baseline_dataset",
samples = 1000
):
print("Loading Dataset...")
df = pd.read_csv(Path(dataset_folder, "dataset_metrics.csv")).head(samples)
print("correlation: ", df["density_log"].corr(df["clustering"]))
print("covariance: ", df["density_log"].cov(df["clustering"]))
print("density_log min: ", df["density_log"].min())
print("density_log mean: ", df["density_log"].mean())
print("density_log max: ", df["density_log"].max())
print("clustering min: ", df["clustering"].min())
print("clustering mean: ", df["clustering"].mean())
print("clustering max: ", df["clustering"].max()) | 723 | 37.105263 | 79 | py |
graphlaxy | graphlaxy-master/processes/metrics.py | import pandas as pd
import networkx as nx
import numpy as np
from pathlib import Path
from utils.filesystem import read_graph, add_to_csv
import multiprocessing as mp
lock = mp.Lock()
def _metrics(dataset_folder, row, trials):
a = row['a']
b = row['b']
c = row['c']
d = 1 - a - b - c
G = read_graph(Path(dataset_folder,"graphs", row['name']))
#Gcc = max(nx.connected_components(G), key=len)
#G = G.subgraph(Gcc)
density = nx.density(G)
clustering = nx.algorithms.approximation.clustering_coefficient.average_clustering(G,trials)
max_degree = max([x[1] for x in nx.degree(G)])
with lock:
add_to_csv(Path(dataset_folder, "dataset_metrics.csv"), {
'name': row['name'], 'nodes': G.number_of_nodes(), 'edges': G.number_of_edges(),
'density': density,
"max_degree": max_degree,
'density_log': np.log10(density),
'clustering': clustering
})
return row['name']
def calculate_metrics(
dataset_folder = "../baseline_dataset",
clusterig_trails = 1000,
multiprocess = False):
df = pd.read_csv(Path(dataset_folder, 'dataset_description.csv'))
if multiprocess:
from pebble import ProcessPool
from utils.multiprocess import pebble_timeout_callback
with ProcessPool() as pool:
for _, row in df.iterrows():
future = pool.schedule(_metrics,
args = (dataset_folder, row, clusterig_trails),
timeout = 300)
future.add_done_callback(pebble_timeout_callback)
else:
for _, row in df.iterrows():
_metrics(dataset_folder, row, clusterig_trails) | 1,723 | 30.345455 | 96 | py |
graphlaxy | graphlaxy-master/processes/plot.py | from argparse import ArgumentError
import random
from statistics import mean
import pandas as pd
import numpy as np
from pathlib import Path
from matplotlib import pyplot as plt
from utils.probability import beta_rvs_shifted
from scipy.stats import beta, uniform
from .bargin import gen_param_grid, gen_weights, gen_metric_grid, grid_bargin, get_grid
def annotate_df(row, ax):
ax.annotate(row["name"], row[["density_log","clustering"]],
xytext=(3, -2),
textcoords='offset points',
size=12,
color='darkslategrey')
def plot_paramdensity(res, s):
alfa_a, beta_a, alfa_b, beta_b, alfa_c, beta_c, alfa_N, beta_N = res
param_list = []
for _ in range(s):
a = beta_rvs_shifted(alfa_a, beta_a, 1/3, 1)
b = beta_rvs_shifted(alfa_b, beta_b, max(0,1-3*a), min(a, 1-a))
c = beta_rvs_shifted(alfa_c, beta_c, max(0,1-2*a-b), min(a, 1-a-b))
d = 1-a-b-c
params = {'a': a, 'b': b, 'c': c, 'd': d}
param_list.append(params)
df = pd.DataFrame(param_list)
plt.figure()
plt.hist(df, bins=20, label=["a","b", "c","d"], stacked=False, density=True)
plt.xlabel("value")
plt.ylabel("density")
plt.legend()
plt.xlim(-0,1)
plt.ylim(0,20)
def plot_clustering_density(df):
plt.figure()
plt.hist(df["clustering"], bins=20, density=True)
index = (0,1)
plt.plot(index, uniform.pdf(index), label='Uniform')
plt.ylim(0,10)
plt.legend()
plt.xlabel("clustering")
plt.ylabel("denisty")
def plot_dlog_density(df):
plt.figure()
plt.hist(df["density_log"], bins=20, density=True)
index = (-5.5,0)
plt.plot(index, uniform.pdf(index, loc=-5.5, scale =5.5), label='Uniform')
plt.ylim(0,0.5)
plt.legend()
plt.xlabel("Dlog")
plt.ylabel("denisty")
def plot_sample_paramdist(res):
alfa_a, beta_a, alfa_b, beta_b,alfa_c, beta_c, alfa_N, beta_N = res
plt.figure()
index = np.arange(0,1, 0.01)
plt.plot(index, beta.pdf(index,alfa_a, beta_a), label='a')
plt.plot(index, beta.pdf(index,alfa_b, beta_b), label='b')
plt.plot(index, beta.pdf(index,alfa_c, beta_c), label='c')
plt.plot(index, beta.pdf(index,alfa_N, beta_N), label='N')
plt.xlabel("value (before shifting and scaling)")
plt.ylabel("density")
plt.legend()
def plot_sample_grid(df):
ax= df.plot.scatter("density_log","clustering", c="weight_log", colormap='magma')
grid = get_grid()
for c in grid[0]:
ax.plot([-5.5, 0], [c, c], color = 'green', linestyle = '--', linewidth = 0.5)
for d in grid[1]:
ax.plot([d, d], [0, 1], color = 'green', linestyle = '--', linewidth = 0.5)
def plot_sample_params(df):
ax= df.plot.scatter("NE","diff", c="weight_log", colormap='magma') # c="gray")
#sample.plot.scatter("NE","diff", ax = ax)
plt.xlabel("N / E")
plt.ylabel("a - d")
def plot_param_clustering(df):
df.plot.scatter("NE","diff", c="clustering", colormap='magma')
plt.xlabel("N / E")
plt.ylabel("a - d")
def plot_fitness_evolution(df, M, params, name):
param_serie = params[params["name"].str.startswith("{}_".format(name))].copy()
param_serie["iteration"] = param_serie["name"].str.extract("_(\d+)$").astype(int)
param_serie["fitness"] = param_serie[
["alfa_a", "beta_a", "alfa_b", "beta_b", "alfa_c", "beta_c", "alfa_N", "beta_N"]
].apply(lambda row: grid_bargin(df, M)(row), axis=1)
ax = param_serie.plot("iteration", "fitness", marker="o")
param_serie.apply(lambda e:
ax.annotate(
"{:.2f}".format(e["fitness"]),
e[["iteration", "fitness"]],
xytext=(-11,6),
textcoords='offset points',
size=8,
color='darkslategrey'
), axis=1)
def plot_param_dlog(df):
df.plot.scatter("NE","diff", c="density_log", colormap='magma')
plt.xlabel("N / E")
plt.ylabel("a - d")
def plot_validation(df, df_val):
ax = df.plot.scatter("density_log","clustering", c="gray")
df_val.plot.scatter("density_log","clustering", ax = ax)
plt.xlabel("Dlog")
plt.xlim(-6,0.01)
plt.ylim(-0.01,1.01)
df_val.apply(lambda row: annotate_df(row,ax), axis=1)
def figure_print(show, folder, name, format):
if not show:
Path(folder).mkdir(parents=True, exist_ok=True)
plt.savefig(Path(folder, "{}.{}".format(name,format)))
def plot(
dataset_folder = "../baseline_dataset",
validation_metrics = "../validation_dataset/dataset_metrics.csv",
samples = 0,
show = True,
format = 'svg',
output_folder = "../plots/initial",
plot_selection = ["validation"],
custom_weights = [1] * 8,
weight_source = "custom",
name = "r10"
):
plt.rcParams.update({'font.size': 22})
if weight_source == "custom":
weights = custom_weights
elif weight_source == "initial":
weights = [1] * 8
print("Will plot:", plot_selection)
if set(["sample_grid", "sample_param", "validation", "dlog_density", "clustering_density",
"fitness_evolution", "param_clustering", "param_dlog"]) & set(plot_selection):
print("Loading Dataset...")
df_m = pd.read_csv(Path(dataset_folder, "dataset_metrics.csv"))
df_d = pd.read_csv(Path(dataset_folder, "dataset_description.csv"))
df_b = pd.merge(df_m, df_d, on="name")
df_b = df_b.sample(samples) if samples > 0 else df_b
df_b["NE"] = df_b["N"] / df_b["E"]
df_b["diff"] = df_b["a"] - df_b["d"]
if set(["sample_grid", "sample_param", "fitness_evolution"]) & set(plot_selection):
print("Generating weights...")
#sample = gen_sample(df_b, weights, samples)
m = 10
gen_param_grid(df_b)
gen_metric_grid(df_b, ["clustering", "density_log"], m)
gen_weights(df_b, weights)
df_b["weight_log"] = np.log10(df_b["weight"])
if "validation" in plot_selection:
print("Loading Validation dataset...")
df_val = pd.read_csv(validation_metrics)
if "fitness_evolution" in plot_selection:
if name is None:
raise ArgumentError("Name must be supplied to plot fitness evolution")
print("Loading optimized_parameters data...")
params = pd.read_csv(Path(dataset_folder, "optimized_parameters.csv"))
if "param_dlog" in plot_selection:
print("Generating plot: param_dlog...")
plot_param_dlog(df_b)
figure_print(show, output_folder, "param_dlog", format)
if "param_clustering" in plot_selection:
print("Generating plot: param_clustering...")
plot_param_clustering(df_b)
figure_print(show, output_folder, "param_clustering", format)
if "sample_grid" in plot_selection:
print("Generating plot: sample_grid...")
plot_sample_grid(df_b)#, sample)
figure_print(show, output_folder, "grid", format)
if "sample_paramdist" in plot_selection:
print("Generating plot: sample_paramdist...")
plot_sample_paramdist(weights)
figure_print(show, output_folder, "paramdist", format)
if "sample_param" in plot_selection:
print("Generating plot: sample_param...")
plot_sample_params(df_b) #, sample)
figure_print(show, output_folder, "params", format)
if "validation" in plot_selection:
print("Generating plot: validation...")
plot_validation(df_b, df_val)
figure_print(show, output_folder, "validation", format)
if "density_param" in plot_selection:
print("Generating plot: density_param...")
plot_paramdensity(weights, samples)
figure_print(show, output_folder, "density_param", format)
if "clustering_density" in plot_selection:
print("Generating plot: clustering_density...")
plot_clustering_density(df_b)
figure_print(show, output_folder, "clustering_density", format)
if "dlog_density" in plot_selection:
print("Generating plot: dlog_density...")
plot_dlog_density(df_b)
figure_print(show, output_folder, "dlog_density", format)
if "fitness_evolution" in plot_selection:
print("Generating plot: fitness_evolution...")
plot_fitness_evolution(df_b, m*m, params, name)
figure_print(show, output_folder, "fitness_evolution", format)
if show:
print("Showing plots...")
plt.show()
| 8,500 | 34.569038 | 94 | py |
graphlaxy | graphlaxy-master/processes/__init__.py | __all__ = ["baseline_dataset", "metrics", "optimization", "plot", "result_dataset"] | 83 | 83 | 83 | py |
graphlaxy | graphlaxy-master/utils/probability.py | import numpy as np
from scipy.stats import beta
def beta_cdf_interval(interval, a, b, interval_shift):
low = interval_shift[0]
up = interval_shift[1]
if up - low <= 0:
return 0
return beta.cdf(interval.right, a, b, loc = low, scale = up - low) -\
beta.cdf(interval.left, a, b, loc = low, scale = up - low)
def beta_cdf_mean(interval, a, b, interval_left, interval_mean, interval_right):
return (beta_cdf_interval(interval, a, b, interval_left) +\
2 * beta_cdf_interval(interval, a, b, interval_mean) +\
beta_cdf_interval(interval, a, b, interval_right)) / 4
def beta_cdf_mean_2d(interval, a, b, interval_mean, interval1, interval2, interval3, interval4):
return (4*beta_cdf_interval(interval, a, b, interval_mean) +\
beta_cdf_interval(interval, a, b, interval1) +\
beta_cdf_interval(interval, a, b, interval2) +\
beta_cdf_interval(interval, a, b, interval3) +\
beta_cdf_interval(interval, a, b, interval4)) / 8
def beta_rvs_shifted(a, b, low, up):
return beta.rvs(a, b, loc = low, scale = up - low)
def beta_rvs_discrete_shifted(a, b, low, up):
return int(np.floor(beta_rvs_shifted(a,b,low,up)))
### Not used any more
def beta_cdf_diferential(event, a, b, low, up):
epsilon = 1e-8
return beta.cdf(event + epsilon, a, b, loc = low, scale = up - low) -\
beta.cdf(event - epsilon, a, b, loc = low, scale = up - low)
def beta_cdf_discrete(event, a, b, low, up):
return beta.cdf(event + 1, a, b, loc= low , scale= up - low) -\
beta.cdf(event, a, b, loc= low , scale= up - low) | 1,558 | 34.431818 | 96 | py |
graphlaxy | graphlaxy-master/utils/rmat.py | from pathlib import Path
import numpy as np
import multiprocessing as mp
import networkit as nk
from utils.filesystem import add_to_csv
lock = mp.Lock()
def rmat_to_file(N, E, a, b, c, d, dataset_folder, s):
scale = np.ceil(np.log2(N))
factor = E/N
reduce = np.power(2, scale) - N
Graph = nk.generators.RmatGenerator(scale, factor, a, b, c, d, weighted = True, reduceNodes = reduce).generate()
Graph = nk.graph.Graph(Graph, False, False) # To undirected and unweigted
Graph.removeSelfLoops()
Graph = nk.components.ConnectedComponents(Graph).extractLargestConnectedComponent(Graph, compactGraph = True)
if Graph.numberOfEdges() > 100:
name = 'RMAT_{}.txt'.format(s)
out_filename = Path(dataset_folder,'graphs',name)
print("Wrinting to:" + str(out_filename))
nk.writeGraph(Graph, str(out_filename), nk.Format.EdgeListTabOne)
with lock:
add_to_csv(Path(dataset_folder,"dataset_description.csv"), {
'N': N, 'E':E, 'a': a, 'b': b, 'c': c, 'd': d, 'name': name, 'scale': scale, 'factor': factor, 'reduce': reduce
})
return s | 1,080 | 37.607143 | 119 | py |
graphlaxy | graphlaxy-master/utils/filesystem.py | import os
import csv
import networkx as nx
def add_to_csv(path, data):
if os.path.exists(path):
with open(path, 'a', newline='') as f:
w = csv.DictWriter(f, data.keys())
w.writerow(data)
else:
with open(path, 'w', newline='') as f:
w = csv.DictWriter(f, data.keys())
w.writeheader()
w.writerow(data)
def read_graph(path):
with open(path, 'r') as f:
data = nx.readwrite.edgelist.read_edgelist(f)
return data | 483 | 23.2 | 49 | py |
graphlaxy | graphlaxy-master/utils/__init__.py | 0 | 0 | 0 | py |
|
graphlaxy | graphlaxy-master/utils/multiprocess.py | def pebble_timeout_callback(future):
try:
future.result() # blocks until results are ready
except TimeoutError as error:
print("Function took longer than %d seconds" % error.args[1])
except Exception as error:
print("Function raised %s" % error)
if hasattr(error, "traceback"):
print(error.traceback) # traceback of the function | 384 | 41.777778 | 69 | py |
apicarver | apicarver-main/restats/app.py | from pathlib import Path
import sys
import json
import core.pairing as pairing
import core.statistic as stat
import utils.parsers as par
def callOptionMethod(confDict):
modules = confDict['modules']
# Extract data from specification (needed to parse pairs)
specDict = par.extractSpecificationData(conf['specification'])
# Pop the base paths of the API
bases = specDict.pop('bases')
if modules == 'dataCollection':
paths = list(specDict.keys())
pairing.generatePairs(confDict, paths, bases, dbFile=confDict['dbPath'])
elif modules == 'statistics':
stat.generateStats(specDict, dbfile=confDict['dbPath'], dest=confDict['reportsDir'])
elif modules == 'carver':
paths = list(specDict.keys())
pairing.generatePairs(confDict, paths, bases, dbFile=confDict['dbPath'])
stat.generateStats(specDict, dbfile=confDict['dbPath'], dest=confDict['reportsDir'])
elif modules == 'all':
paths = list(specDict.keys())
pairing.generatePairs(confDict, paths, bases, dbFile=confDict['dbPath'])
stat.generateStats(specDict, dbfile=confDict['dbPath'], dest=confDict['reportsDir'])
pairing.generatePairs(confDict, paths, bases, dbFile=confDict['cassetteDbPath'], schemathesis=True)
stat.generateStats(specDict, dbfile=confDict['dbPath'], dest=confDict['cassetteReportsDir'])
elif modules == 'specCompare':
specDict = par.extractSpecificationData(conf['specification'])
# paths = list(specDict.keys())
# pairing.generateSpecPairs(confDict, paths, bases)
# stat.generateStats(specDict, confDict)
# pairing.compareSpecs(specDict, confDict)
pairing.compareSpecsNew(confDict)
elif modules == "schemathesis":
paths = list(specDict.keys())
pairing.generatePairs(confDict, paths, bases, dbFile=confDict['cassetteDbPath'], schemathesis=True)
stat.generateStats(specDict, dbfile=confDict['cassetteDbPath'], dest=confDict['cassetteReports'])
elif modules == "merge":
paths = list(specDict.keys())
pairing.generatePairs(confDict, paths, bases, dbFile=confDict['mergeDb'], schemathesis=True)
pairing.generatePairs(confDict, paths, bases, dbFile=confDict['mergeDb'])
stat.generateStats(specDict, dbfile=confDict['mergeDb'], dest=confDict['mergeReports'])
else:
raise Exception('Wrong module. [pair/statistics/all]')
if __name__ == '__main__':
try:
cFilePath = sys.argv[1]
except:
# cFilePath = "petclinic2/config.json"
cFilePath = "/Users/apicarv/git/TestCarving/testCarver/out/jawa/20220711_025641/oas/20220711_032415/probe_oas_json_conf.json"
# Read configuration file
with open(cFilePath) as j:
conf = json.load(j)
for k in conf:
conf[k] = conf[k][:-1] if conf[k][-1] == '/' else conf[k]
callOptionMethod(conf)
''' verbose = conf['verbose']
# Extract data from specification (needed to parse pairs)
specDict = par.extractSpecificationData(conf['specification'])
# Pop the base paths of the API
bases = specDict.pop('bases')
paths = list(specDict.keys())
if conf['option'] == 'pair':
pairing.generatePairs(conf, paths, bases)
elif conf['option'] == 'statistics':
stat.generateStats(specDict, conf['reportDirPath'], conf['dbFilePath'])
else:
raise Exception('Not implemented. WIP.')'''
| 3,165 | 33.043011 | 127 | py |
apicarver | apicarver-main/restats/__init__.py | 0 | 0 | 0 | py |
|
apicarver | apicarver-main/restats/core/statistic.py | from pathlib import Path
import json
import utils.parsers as parsers
import utils.dbmanager as dbm
# dest = None
jsonTestedKey = 'documentedAndTested'
jsonNotTestedKey = 'documentedAndNotTested'
jsonNotExpectedKey = 'notDocumentedAndTested'
jsonFoundKey = 'totalTested'
jsonTotalKey = 'documented'
def getPathCoverage(paths, dbfile, dest):
dbm.create_connection(dbfile)
count = dbm.getPathCount()
testedPaths = dbm.getPathNames()
untestedPaths = paths.copy()
for p in testedPaths:
untestedPaths.remove(p)
with open(dest + '/path_coverage.json', 'w+') as out:
json.dump({jsonTestedKey : testedPaths, jsonNotTestedKey : untestedPaths}, out, indent='\t')
dbm.close()
return {jsonTotalKey : len(paths), jsonTestedKey : count, jsonFoundKey : count}
# with open(dest + 'path_coverage.json', 'w+') as out:
# json.dump({jsonTestedKey : testedPaths, jsonNotTestedKey : untestedPaths}, out, indent='\t')
#
# dbm.close()
#
# return {jsonTestedKey : count, 'total' : len(paths), jsonNotTestedKey : len(untestedPaths)}
def getOperationCoverage(specDict, dbfile, dest):
dbm.create_connection(dbfile)
# Count the number of methods for each path in the specification
operationsPerPathCount = sum([len(path.keys()) for path in specDict.values()])
operationsTestedCount = operationsFoundCount = dbm.getOperationCount()
# Get every operation name for every path in the specification
operationsPerPath = {}
not_expected = {}
for path, desc in specDict.items():
operationsPerPath[path] = list(desc.keys())
# Get every operation name for every path that has been tested
operationsTested = dbm.getOperationNames()
tested = {}
documentedAndTested = {}
for r in operationsTested:
tested.setdefault(r[1], []).append(r[0])
# Remove tested methods
# TODO prevedere metodi non nella specifica
for path in tested:
# Keep operations in the test set that do not appear in spec
not_expected[path] = \
[x for x in tested[path] if x not in operationsPerPath[path]]
# Keep operations in the spec that do not appear in test set
operationsPerPath[path] = \
[x for x in operationsPerPath[path] if x not in tested[path]]
# Remove the count of the unexpected from the found to get the tested
operationsTestedCount = operationsTestedCount - len(not_expected[path])
documentedAndTested[path] = [x for x in tested[path] if x not in not_expected[path]]
with open(dest + '/operation_coverage.json', 'w+') as out:
json.dump({jsonTestedKey : documentedAndTested, jsonNotTestedKey : operationsPerPath, jsonNotExpectedKey : not_expected}, out, indent='\t')
dbm.close()
return {jsonTotalKey : operationsPerPathCount, jsonTestedKey : operationsTestedCount, jsonFoundKey : operationsFoundCount}
def getStatusCoverage(specDict, dbfile, dest):
dbm.create_connection(dbfile)
statusInSpecCount = 0
statusInSpec = {}
# Get every status possible for each path, operation described in the specification
for path, vals in specDict.items():
for method, x in vals.items():
statusInSpecCount = statusInSpecCount + len(x['responses'])
statusInSpec.setdefault(path, {})
statusInSpec[path][method] = x['responses']
# get status tested from the db
statusTested = dbm.getStatusCodes()
not_expected = {}
tested = {}
statusTestedCount = 0
for path, method, status in statusTested:
try:
statusInSpec[path][method].remove(status)
# Moving those lines before the previous makes the unexpected
# appear in the tested set
tested.setdefault(path, {})
tested[path].setdefault(method, []).append(status)
statusTestedCount = statusTestedCount + 1
# There could be more codes than the ones in the specification
except (KeyError, ValueError):
not_expected.setdefault(path, {})
not_expected[path].setdefault(method, []).append(status)
with open(dest + '/status_coverage.json', 'w+') as out:
json.dump({jsonTestedKey : tested, jsonNotTestedKey : statusInSpec, jsonNotExpectedKey : not_expected}, out, indent='\t')
statusFoundCount = dbm.getStatusCount()
dbm.close()
return {jsonTotalKey : statusInSpecCount, jsonTestedKey : statusTestedCount, jsonFoundKey : statusFoundCount}
# TODO
# Update with found. Need to check whether set.add added or not.
def getStatusClassCoverage(specDict, dbfile, dest):
dbm.create_connection(dbfile)
statusInSpecCount = 0
statusInSpec = {}
for path, vals in specDict.items():
for method, x in vals.items():
for code in x['responses']:
statusInSpec.setdefault(path, {})
statusInSpec[path].setdefault(method, set()).add(code[0])
statusInSpecCount = statusInSpecCount + len(statusInSpec[path][method])
not_expected = {}
tested = {}
statusTested = dbm.getStatusCodes()
for path, method, status in statusTested:
try:
statusInSpec[path][method].remove(status[0])
# Moving those lines before the previous makes the unexpected
# appear in the tested set
tested.setdefault(path, {})
tested[path].setdefault(method, set()).add(status[0])
# There could be more codes than the ones in the specification
except (KeyError, ValueError):
not_expected.setdefault(path, {})
not_expected[path].setdefault(method, set()).add(status[0])
statusTestedCount = 0
for path, vals in tested.items():
for method, x in vals.items():
statusTestedCount = statusTestedCount + len(x)
# Cast the sets to dict in order to save it as json
for path, vals in statusInSpec.items():
for method, x in vals.items():
statusInSpec[path][method] = list(x)
for path, vals in tested.items():
for method, x in vals.items():
tested[path][method] = list(x)
for path, vals in not_expected.items():
for method, x in vals.items():
not_expected[path][method] = list(x)
with open(dest + 'status_class_coverage.json', 'w+') as out:
json.dump({jsonTestedKey : tested, jsonNotTestedKey : statusInSpec, jsonNotExpectedKey : not_expected}, out, indent='\t')
dbm.close()
return {jsonTotalKey : statusInSpecCount, jsonTestedKey : statusTestedCount}
def getResponseContentTypeCoverage(specDict, dbfile, dest):
dbm.create_connection(dbfile)
typesInSpecCount = 0
typesInSpec = {}
for path, vals in specDict.items():
for method, x in vals.items():
typesInSpecCount = typesInSpecCount + len(x['produces'])
typesInSpec.setdefault(path, {})
typesInSpec[path][method] = x['produces'].copy()
statusTested = dbm.getResponseTypes()
not_expected = {}
tested = {}
typesTestedCount = 0
for path, method, t in statusTested:
try:
typesInSpec[path][method].remove(t)
# Moving those lines before the previous makes the unexpected
# appear in the tested set
tested.setdefault(path, {})
tested[path].setdefault(method, []).append(t)
typesTestedCount = typesTestedCount + 1
# There could be more codes than the ones in the specification
except (KeyError, ValueError):
not_expected.setdefault(path, {})
not_expected[path].setdefault(method, []).append(t)
with open(dest + '/response_type_coverage.json', 'w+') as out:
json.dump({jsonTestedKey : tested, jsonNotTestedKey : typesInSpec, jsonNotExpectedKey : not_expected}, out, indent='\t')
typesFoundCount = dbm.getResponseTypesCount()
dbm.close()
return {jsonTotalKey : typesInSpecCount, jsonTestedKey : typesTestedCount, jsonFoundKey : typesFoundCount}
# TODO
# Copia esatta del response. Collassare tutto in una sola funzione?
def getRequestContentTypeCoverage(specDict, dbfile, dest):
dbm.create_connection(dbfile)
typesInSpecCount = 0
typesInSpec = {}
for path, vals in specDict.items():
for method, x in vals.items():
typesInSpecCount = typesInSpecCount + len(x['consumes'])
typesInSpec.setdefault(path, {})
typesInSpec[path][method] = x['consumes'].copy()
statusTested = dbm.getRequestTypes()
not_expected = {}
tested = {}
typesTestedCount = 0
for path, method, t in statusTested:
try:
typesInSpec[path][method].remove(t)
# Moving those lines before the previous makes the unexpected
# appear in the tested set
tested.setdefault(path, {})
tested[path].setdefault(method, []).append(t)
typesTestedCount = typesTestedCount + 1
# There could be more codes than the ones in the specification
except (KeyError, ValueError):
not_expected.setdefault(path, {})
not_expected[path].setdefault(method, []).append(t)
with open(dest + '/request_type_coverage.json', 'w+') as out:
json.dump({jsonTestedKey : tested, jsonNotTestedKey : typesInSpec, jsonNotExpectedKey : not_expected}, out, indent='\t')
typesFoundCount = dbm.getRequestTypesCount()
dbm.close()
return {jsonTotalKey : typesInSpecCount, jsonTestedKey : typesTestedCount, jsonFoundKey : typesFoundCount}
def getParameterCoverage(specDict, dbfile, dest):
dbm.create_connection(dbfile)
parametersInSpecCount = 0
parametersInSpec = {}
for path, vals in specDict.items():
for method, x in vals.items():
parametersInSpecCount = parametersInSpecCount + len(x['parameters'])
parametersInSpec.setdefault(path, {})
parametersInSpec[path][method] = list(x['parameters'].keys())
parametersTested = dbm.getParameters()
not_expected = {}
tested = {}
parametersTestedCount = 0
for path, method, param in parametersTested:
try:
parametersInSpec[path][method].remove(param)
# Moving those lines before the previous makes the unexpected
# appear in the tested set
tested.setdefault(path, {})
tested[path].setdefault(method, []).append(param)
parametersTestedCount = parametersTestedCount + 1
# There could be more codes than the ones in the specification
except (KeyError, ValueError):
not_expected.setdefault(path, {})
not_expected[path].setdefault(method, []).append(param)
with open(dest + '/parameter_coverage.json', 'w+') as out:
json.dump({jsonTestedKey : tested, jsonNotTestedKey : parametersInSpec, jsonNotExpectedKey : not_expected}, out, indent='\t')
parametersFoundCount = dbm.getParametersCount()
dbm.close()
return {jsonTotalKey : parametersInSpecCount, jsonTestedKey : parametersTestedCount, jsonFoundKey : parametersFoundCount}
def getParameterValueCoverage(specDict, dbfile, dest):
dbm.create_connection(dbfile)
# Extract compatible parameters from specification
pvalCompatibleCount = 0
pvalCompatible = {}
pvalTestedCount = 0
tested = {}
not_expected = {}
for path, vals in specDict.items():
path_id = dbm.getPathIDByName(path)
if path_id is None:
continue
for method, x in vals.items():
for p, v in x['parameters'].items():
if len(v) > 0:
v = set(v)
testedValues = dbm.getParameterValues(path_id, method, p)
testedValues = set(testedValues) if testedValues is not None else set()
tested.setdefault(path, {})
tested[path].setdefault(method, {})
tested[path][method][p] = list(v.intersection(testedValues))
pvalTestedCount = pvalTestedCount + len(tested[path][method][p])
pvalCompatibleCount = pvalCompatibleCount + len(v)
not_expected.setdefault(path, {})
not_expected[path].setdefault(method, {})
not_expected[path][method][p] = list(testedValues - v)
pvalCompatible.setdefault(path, {})
pvalCompatible[path].setdefault(method, {})
pvalCompatible[path][method][p] = list(v - testedValues)
with open(dest + '/parameter_value_coverage.json', 'w+') as out:
json.dump({jsonTestedKey : tested, jsonNotTestedKey : pvalCompatible, jsonNotExpectedKey : not_expected}, out, indent='\t')
return {jsonTotalKey : pvalCompatibleCount, jsonTestedKey : pvalTestedCount}
def computeTCL(coverageDictionary):
# TCL 0
tcl = 0
# TCL 1
if coverageDictionary['pathCoverage']['rate'] >= 1:
tcl += 1
else:
return tcl
# TCL 2
if coverageDictionary['operationCoverage']['rate'] >= 1:
tcl += 1
else:
return tcl
# TCL 3
if coverageDictionary['responseTypeCoverage']['rate'] >= 1 and coverageDictionary['requestTypeCoverage']['rate'] >= 1:
tcl += 1
else:
return tcl
# TCL 4
if coverageDictionary['statusClassCoverage']['rate'] >= 1 and coverageDictionary['parameterCoverage']['rate'] >= 1:
tcl += 1
else:
return tcl
# TCL 5
if coverageDictionary['statusCoverage']['rate'] >= 1:
tcl += 1
else:
return tcl
# TCL 6
if coverageDictionary['parameterValueCoverage']['rate'] >= 1:
tcl += 1
else:
return tcl
def generateStats(specDict, dbfile, dest):
# global dest
calcRate = lambda d : d[jsonTestedKey] / d[jsonTotalKey] if d[jsonTotalKey] != 0 else None
newStatEntry = lambda d : {'raw' : d, 'rate' : calcRate(d)}
# dest = confDict['reportsDir']
#temp fix
if dest[-1] != '/':
dest = dest + '/'
# dbfile = confDict['dbPath']
covDict = {}
paths = list(specDict.keys())
#-----------
pathCovergage = getPathCoverage(paths, dbfile, dest)
covDict['pathCoverage'] = newStatEntry(pathCovergage)
#-----------
operationCoverage = getOperationCoverage(specDict, dbfile, dest)
covDict['operationCoverage'] = newStatEntry(operationCoverage)
#-----------
statusClassCoverage = getStatusClassCoverage(specDict, dbfile, dest)
covDict['statusClassCoverage'] = newStatEntry(statusClassCoverage)
#-----------
statusCoverage = getStatusCoverage(specDict, dbfile, dest)
covDict['statusCoverage'] = newStatEntry(statusCoverage)
#-----------
respTypeCoverage = getResponseContentTypeCoverage(specDict, dbfile, dest)
covDict['responseTypeCoverage'] = newStatEntry(respTypeCoverage)
#-----------
reqTypeCoverage = getRequestContentTypeCoverage(specDict, dbfile, dest)
covDict['requestTypeCoverage'] = newStatEntry(reqTypeCoverage)
#-----------
paramCoverage = getParameterCoverage(specDict, dbfile, dest)
covDict['parameterCoverage'] = newStatEntry(paramCoverage)
#-----------
paramValueCoverage = getParameterValueCoverage(specDict, dbfile, dest)
covDict['parameterValueCoverage'] = newStatEntry(paramValueCoverage)
covDict['TCL'] = computeTCL(covDict)
with open(dest + '/stats_old.json', 'w+') as out:
json.dump(covDict, out, indent='\t')
print('Metrics and statistics computed successfully. Reports are available at', dest)
| 14,042 | 29.728665 | 141 | py |
apicarver | apicarver-main/restats/core/__init__.py | 0 | 0 | 0 | py |
|
apicarver | apicarver-main/restats/core/pairing.py | import os
from pathlib import Path
import re
import json
from ruamel import yaml
import utils
import utils.parsers as parsers
import utils.dbmanager as dbm
import ruamel.yaml
def addSpecToDB(pair):
#####################
#### POPULATE DB ####
pathID = dbm.getPathID(pair['request']['path'])
method = pair['request']['method']
# Add query/form parameters to the db
for p in pair['request']['parameters']:
paramID = dbm.getParameterID(pathID, method, p['name'])
dbm.addParameterValue(paramID, p['value'])
# # Add body parameters to the db
# for p, v in pair['request']['data']['requestBody'].items():
# paramID = dbm.getParameterID(pathID, method, p)
# dbm.addParameterValue(paramID, v)
# Sometimes some responses can be empty. Just avoid to add it to the db
if pair['response'] != {}:
# Add response parameters to the db
isResponseAdded = False
for p in pair['response']['parameters']:
if p['name'] == 'Content-Type':
dbm.addResponse(pathID, method, pair['response']['status'], p['value'])
isResponseAdded = True
break
# Content type is not mandatory, even thought it should be in the message
# If it is absent, it is assumed ad 'application/octet-stream'
if not isResponseAdded:
dbm.addResponse(pathID, method, pair['response']['status'], 'application/octet-stream')
def addPairToDB(pair):
#####################
#### POPULATE DB ####
pathID = dbm.getPathID(pair['request']['path'])
method = pair['request']['method']
# Add query/form parameters to the db
for p in pair['request']['parameters']:
paramID = dbm.getParameterID(pathID, method, p['name'])
dbm.addParameterValue(paramID, p['value'])
# Add body parameters to the db
for p, v in pair['request']['body'].items():
paramID = dbm.getParameterID(pathID, method, p)
dbm.addParameterValue(paramID, v)
# Sometimes some responses can be empty. Just avoid to add it to the db
if pair['response'] != {}:
# Add response parameters to the db
isResponseAdded = False
for p in pair['response']['parameters']:
if p['name'] == 'Content-Type':
dbm.addResponse(pathID, method, pair['response']['status'], p['value'])
isResponseAdded = True
break
# Content type is not mandatory, even thought it should be in the message
# If it is absent, it is assumed ad 'application/octet-stream'
if not isResponseAdded:
dbm.addResponse(pathID, method, pair['response']['status'], 'application/octet-stream')
def addSourceEntries(source, paths_re, pathsInSpec):
unmatched = [] # unmatched requests/responses
matched = []
prev_number = '' # previous file number
pair = {} # pair map for easy json writing
# Gets every entry in the directory, and keeps only files
files_in_source = (entry for entry in Path(source).iterdir() if entry.is_file())
files_in_source = sorted(files_in_source)
for file in files_in_source:
# print('pair number: ', prev_number)
if prev_number == '':
prev_number = file.name.split('-')[0]
request = parsers.RawHTTPRequest2Dict(file)
pair['pairNumber'] = prev_number
# To check if a path matches one in the spec
match = False
# print('actual path: ', request['path'])
# replace the path extracted from the request with the specification matching one
for (r, path) in zip(paths_re, pathsInSpec):
# print('re:', r, 'path:', path)
if (r.match(request['path'])):
match = True
request['path'] = path
break
# x = input()
pair['request'] = request
elif prev_number == file.name.split('-')[0]:
response = parsers.RawHTTPResponse2Dict(file)
pair['response'] = response
# parsers.pair2json(pair, prev_number, dest)
# If there is no match with the API specification paths
# The path is ignored and not counted in the statistics.
if not match:
unmatched.append(pair['request']['path'])
prev_number = ''
pair.clear()
continue
else:
matched.append(pair['request']['path'])
addPairToDB(pair)
prev_number = ''
pair.clear()
else:
unmatched.append(file.name)
return {"unmatched": unmatched, "pair": pair}
def addInferredSpecEntries(inferredDict, paths_re, pathsInSpec):
unmatched = [] # unmatched requests/responses
matched = []
pair = {} # pair map for easy json writing
builtPaths = []
buildPairs = []
for inferredPath in inferredDict.keys():
for method in inferredDict[inferredPath].keys():
parameters = inferredDict[inferredPath][method]["parameters"].keys()
paramList = []
buildPath = inferredPath.replace("{", "")
buildPath = buildPath.replace("}", "")
if len(parameters)>0:
buildPath = buildPath[:-1] + '?' if buildPath[-1] == '/' else buildPath + '?'
for parameter in parameters:
paramList.append({"name": parameter, "value": "xyz"})
buildPath += parameter + "=xyz"
builtPaths.append(buildPath)
request = {"method": method, "buildPath": buildPath, "parameters": paramList}
buildPair = {"request": request, "response": {}}
buildPairs.append(buildPair)
print(builtPaths)
for buildPair in buildPairs:
request = buildPair['request']
response = buildPair['response']
buildPath = request['buildPath']
match = False
for (r, path) in zip(paths_re, pathsInSpec):
# print('re:', r, 'path:', path)
if r.match(buildPath):
match = True
request['path'] = path
break
pair['request'] = request
pair['response'] = response
if not match:
unmatched.append(buildPath)
pair.clear()
continue
matched.append(pair['request']['path'])
addSpecToDB(pair)
pair.clear()
return {"unmatched": unmatched, "matched": matched}
def addCassetteEntries(yamlResponses, paths_re, pathsInSpec):
unmatched = [] # unmatched requests/responses
matched = []
pair = {} # pair map for easy json writing
for yamlResponse in yamlResponses:
print(yamlResponse)
requestResponse = parsers.yamlResponse2Dict(yamlResponse)
request = requestResponse['request']
response = requestResponse['response']
match = False
# print('actual path: ', request['path'])
# replace the path extracted from the request with the specification matching one
for (r, path) in zip(paths_re, pathsInSpec):
# print('re:', r, 'path:', path)
if (r.match(request['path'])):
match = True
request['path'] = path
break
# x = input()
pair['request'] = request
pair['response'] = response
if not match:
unmatched.append(pair['request']['path'])
pair.clear()
continue
matched.append(pair['request']['path'])
addPairToDB(pair)
pair.clear()
return {"unmatched": unmatched, "matched": matched}
def addJsonEntries(jsonResponses, paths_re, pathsInSpec):
unmatched = [] # unmatched requests/responses
matched = []
pair = {} # pair map for easy json writing
for jsonResponse in jsonResponses:
requestResponse = parsers.JsonResponse2Dict(jsonResponse)
request = requestResponse["request"]
response = requestResponse["response"]
# To check if a path matches one in the spec
match = False
# print('actual path: ', request['path'])
# replace the path extracted from the request with the specification matching one
for (r, path) in zip(paths_re, pathsInSpec):
# print('re:', r, 'path:', path)
if (r.match(request['path'])):
match = True
request['path'] = path
break
# x = input()
pair['request'] = request
pair['response'] = response
if not match:
unmatched.append(pair['request']['path'])
pair.clear()
continue
matched.append(pair['request']['path'])
addPairToDB(pair)
pair.clear()
return {"unmatched": unmatched, "matched": matched}
def generatePairs(confDict, pathsInSpec, basesInSpec, dbFile, schemathesis=False):
if schemathesis:
yamlResponsesPath = confDict['cassette']
with open(yamlResponsesPath) as yamlFile:
yaml = ruamel.yaml.YAML(typ='safe')
data = yaml.load(yamlFile)
yamlResponses = json.loads(json.dumps(data))
source = None
jsonResponses = None
elif "results" in confDict:
jsonResponsesPath = confDict['results']
with open(jsonResponsesPath) as jsonFile:
jsonResponses = json.load(jsonFile)
source = None
yamlResponses = None
else:
jsonResponses = None
yamlResponses = None
source = confDict['dumpsDir']
# dest = confDict['pairsDir']
# dbFile = confDict['dbPath']
'''
Sorting paths in the specification to try to avoid path collision:
"/user/{id}" and "/user/auth" have a collision because the second
can be matched by the regex of the first. With a sorted list, the order is
inverted, so the first regex matching should be the right one.
'''
pathsInSpec.sort()
'''
Have to be sure that every resource from every possible server is taken
in consideration.
'''
regPaths = []
for i in range(len(pathsInSpec)):
suffix = ''
actualPath = pathsInSpec[i]
actualPath = actualPath.replace('*', '.*')
# print('path:', pathsInSpec[i], 'actualPath:', actualPath)
for b in basesInSpec:
suffix = suffix + '(' + b.replace('/', '/') + ')|'
regPaths.append('(' + suffix[:-1] + ')' + actualPath)
'''
From every path in the specification extract a regular expression
for pattern matching with the actual paths found in the requests.
'''
paths_re = [re.sub('\{{1}[^{}}]*\}{1}', '[^/]+', x) for x in regPaths]
paths_re = [x + '?$' if x[-1] == '/' else x + '/?$' for x in paths_re]
paths_re = [re.compile(x) for x in paths_re]
#####################
#### POPULATE DB ####
dbm.create_connection(dbFile)
dbm.createTables()
#### END ####
#####################
if yamlResponses is not None:
print("Adding YAML responses to DB")
addCassetteEntries(yamlResponses['http_interactions'], paths_re, pathsInSpec)
if jsonResponses is not None:
print("Adding json responses to DB")
addJsonEntries(jsonResponses, paths_re, pathsInSpec)
if source is not None:
print("Adding source dump to DB")
addSourceEntries(source, paths_re, pathsInSpec)
#####################
#### POPULATE DB ####
# dbm.getValues()
dbm.closeAndCommit()
def compareParams(matchDict, fpDict, pathsInSpec, basesInSpec, inferredBases):
if basesInSpec is not None and len(basesInSpec)>0:
base = basesInSpec[0]
else:
base = ""
if inferredBases is not None and len(inferredBases)>0:
infBase = inferredBases[0]
else:
infBase = ""
allVars = []
for path in pathsInSpec:
path = base+path
pathSplit = path.split("/")
for i in range(len(pathSplit)):
splitElem = pathSplit[i]
if splitElem.startswith("{"):
allVars.append(path+":"+splitElem)
matchedVars = []
fpVars = []
for matchedPath in matchDict:
print(matchedPath)
specPath = base + matchDict[matchedPath]
infPath = infBase + matchedPath
if matchedPath in fpDict:
infPath = fpDict[matchedPath]
specPathSplit = specPath.split("/")
infPathSplit = infPath.split("/")
if len(specPathSplit)!= len(infPathSplit):
print("Something is wrong")
continue
for pathIndex in range(len(specPathSplit)):
if specPathSplit[pathIndex].startswith("{"):
if infPathSplit[pathIndex].startswith("{"):
matchedVar = specPath + ":" + specPathSplit[pathIndex]
if matchedVar not in matchedVars:
matchedVars.append(matchedVar)
else:
if infPathSplit[pathIndex].startswith("{"):
fpVars.append(infPath + ":" + infPathSplit[pathIndex])
print(matchedVars)
print(fpVars)
print(allVars)
return matchedVars, fpVars, allVars
def compareSpecsNew(confDict):
original = confDict['specification']
if "inferred" in confDict:
inferredSpec = confDict['inferred']
# inferredDict = parsers.extractSpecificationData(inferredSpec)
else:
print("Add the key 'inferred' in config file for the spec to be compared")
return
originalSpec = json.load(open(original))
if 'openapi' in originalSpec.keys():
parsedOriginal = parsers.parseOpenAPI3(originalSpec)
else:
parsedOriginal = parsers.parseSwagger2(originalSpec)
paths = parsedOriginal
inferredSpec = json.load(open(inferredSpec))
if 'openapi' in inferredSpec.keys():
parsedInf = parsers.parseOpenAPI3(inferredSpec)
else:
parsedInf = parsers.parseSwagger2(inferredSpec)
paths_inf = parsedInf
compareSpecs_base_new(oldSpec=parsedOriginal, inferredDict=parsedInf, confDict=confDict, inferredSpec=inferredSpec)
def compareSpecs(oldSpec, confDict):
if "inferred" in confDict:
inferredSpec = confDict['inferred']
inferredDict = parsers.extractSpecificationData(inferredSpec)
compareSpecs_base(oldSpec, inferredSpec, confDict)
else:
print("Add the key 'inferred' in config file for the spec to be compared")
return
def compareSpecs_base_new(oldSpec, inferredDict, confDict, inferredSpec):
pathVarFP = 0
path_TP = 0
path_TN = 0
path_FP = []
path_FN = 0
op_TP = 0
op_TN = 0
op_FP = []
op_FN = 0
inferredBases = inferredDict.pop('bases')
inferredPaths = list(inferredDict.keys())
dbFile = confDict['specDbPath']
'''
Sorting paths in the specification to try to avoid path collision:
"/user/{id}" and "/user/auth" have a collision because the second
can be matched by the regex of the first. With a sorted list, the order is
inverted, so the first regex matching should be the right one.
'''
basesInSpec = oldSpec.pop('bases')
pathsInSpec = list(oldSpec.keys())
pathsInSpec.sort()
'''
Have to be sure that every resource from every possible server is taken
in consideration.
'''
regPaths = []
concPaths = []
for i in range(len(pathsInSpec)):
suffix = ''
actualPath = pathsInSpec[i]
actualPath = actualPath.replace('*', '.*')
# print('path:', pathsInSpec[i], 'actualPath:', actualPath)
for b in basesInSpec:
suffix = suffix + '(' + b.replace('/', '/') + ')|'
regPaths.append('(' + suffix[:-1] + ')' + actualPath)
'''
From every path in the specification extract a regular expression
for pattern matching with the actual paths found in the requests.
'''
paths_re = [re.sub('\{{1}[^{}}]*\}{1}', '[^/]+', x) for x in regPaths]
paths_re = [x + '?$' if x[-1] == '/' else x + '/?$' for x in paths_re]
paths_re = [re.compile(x) for x in paths_re]
print(paths_re)
opDict = {}
pairDict = {}
fpDict = {}
print("inferred Paths: {}".format(inferredPaths))
for inferredPath in inferredPaths:
inferredPathWhole = inferredBases[0] + inferredPath
buildPath = inferredPathWhole.replace("{", "")
buildPath = buildPath.replace("}", "")
print(buildPath)
match = False
for (r, path) in zip(paths_re, pathsInSpec):
print('re:', r, 'path:', path)
if r.match(buildPath):
match = True
inferredOperations = inferredDict[inferredPath].keys()
originalOperations = oldSpec[path].keys()
print(inferredOperations)
print(originalOperations)
for opKey in inferredOperations:
if opKey in originalOperations:
opDict[inferredPath+"-"+opKey] = path+"-"+opKey
else:
op_FP.append(inferredPath+"-"+opKey)
pairDict[inferredPath] = path
print(pairDict)
break
if not match:
# build concrete paths from examples and match
concMatch = False
try:
concMatches = []
specElem = inferredSpec['paths'][inferredPath]['parameters']
buildPaths = []
if specElem is not None and len(specElem) >0:
for exampleIndex in range(len(specElem[0]['examples'].values())):
inferredPathWhole = inferredBases[0] + inferredPath
buildPath = inferredPathWhole
for parameter in specElem:
print(parameter['examples'])
print(parameter['examples'].values())
buildPath = buildPath.replace('{'+parameter['name'] +'}', list(parameter['examples'].values())[exampleIndex]['value'])
if buildPath in buildPaths:
continue
else:
buildPaths.append(buildPath)
for (r, path) in zip(paths_re, pathsInSpec):
print('re:', r, 'path:', path)
if r.match(buildPath):
if path in concMatches:
break
concMatch = True
concMatches.append(path)
fpDict[buildPath] = inferredPath
inferredOperations = inferredDict[inferredPath].keys()
originalOperations = oldSpec[path].keys()
print(inferredOperations)
print(originalOperations)
for opKey in inferredOperations:
if opKey in originalOperations:
opDict[buildPath+"-"+opKey] = path+"-"+opKey
else:
op_FP.append(buildPath+"-"+opKey)
pairDict[buildPath] = path
print(pairDict)
break
except Exception as ex:
print(ex)
if not concMatch:
path_FP.append(inferredPath + "-" + str(inferredDict[inferredPath].keys()))
inferredOperations = inferredDict[inferredPath].keys()
for opKey in inferredOperations:
op_FP.append(inferredPath+"-"+opKey)
print(pairDict)
print(fpDict)
numVars = 0
matchedVars, fpVars, allVars = compareParams(pairDict, fpDict, pathsInSpec, basesInSpec, inferredBases)
covered = set(pairDict.values())
coverage = len(covered)/len(pathsInSpec)
precision = len(covered)/len(pairDict.keys())
print(covered)
print(pathsInSpec)
print("coverage : {}, precision : {}".format(coverage, precision))
opCovered = set(opDict.values())
totalOperations = 0
for path in oldSpec.keys():
totalOperations+=len(oldSpec[path].keys())
operationPr= len(opCovered)/len(opDict.keys())
operationRe= len(opCovered)/totalOperations
# covDict = {"pathPr": precision, "pathRe": coverage, "operationPr": operationPr, "operationRe": operationRe}
covDict = {
"path":{
"matched": len(pairDict.keys()),
"unmatched": len(path_FP),
"matched_unique": len(covered),
"gt": len(pathsInSpec),
"fp": path_FP
},
"op":{
"matched": len(opDict.keys()),
"unmatched": len(op_FP),
"matched_unique": len(opCovered),
"gt": totalOperations,
"fp": op_FP
},
"var":{
"matched": len(matchedVars),
"fp": len(fpVars),
"gt": len(allVars)
}
}
print(covDict)
with open(confDict["specReports"] + '/stats.json', 'w+') as out:
json.dump(covDict, out, indent='\t')
print('Metrics and statistics computed successfully. Reports are available at', confDict["specReports"])
return covDict
def compareSpecs_base(oldSpec, inferredDict, confDict):
pathVarFP = 0
path_TP = 0
path_TN = 0
path_FP = []
path_FN = 0
op_TP = 0
op_TN = 0
op_FP = []
op_FN = 0
inferredBases = inferredDict.pop('bases')
inferredPaths = list(inferredDict.keys())
dbFile = confDict['specDbPath']
'''
Sorting paths in the specification to try to avoid path collision:
"/user/{id}" and "/user/auth" have a collision because the second
can be matched by the regex of the first. With a sorted list, the order is
inverted, so the first regex matching should be the right one.
'''
basesInSpec = oldSpec.pop('bases')
pathsInSpec = list(oldSpec.keys())
pathsInSpec.sort()
'''
Have to be sure that every resource from every possible server is taken
in consideration.
'''
regPaths = []
concPaths = []
for i in range(len(pathsInSpec)):
suffix = ''
actualPath = pathsInSpec[i]
actualPath = actualPath.replace('*', '.*')
# print('path:', pathsInSpec[i], 'actualPath:', actualPath)
for b in basesInSpec:
suffix = suffix + '(' + b.replace('/', '/') + ')|'
regPaths.append('(' + suffix[:-1] + ')' + actualPath)
'''
From every path in the specification extract a regular expression
for pattern matching with the actual paths found in the requests.
'''
paths_re = [re.sub('\{{1}[^{}}]*\}{1}', '[^/]+', x) for x in regPaths]
paths_re = [x + '?$' if x[-1] == '/' else x + '/?$' for x in paths_re]
paths_re = [re.compile(x) for x in paths_re]
print(paths_re)
opDict = {}
pairDict = {}
print("inferred Paths: {}".format(inferredPaths))
for inferredPath in inferredPaths:
buildPath = inferredPath.replace("{", "")
buildPath = buildPath.replace("}", "")
print(buildPath)
match = False
for (r, path) in zip(paths_re, pathsInSpec):
print('re:', r, 'path:', path)
if r.match(buildPath):
match = True
inferredOperations = inferredDict[inferredPath].keys()
originalOperations = oldSpec[path].keys()
print(inferredOperations)
print(originalOperations)
for opKey in inferredOperations:
if opKey in originalOperations:
opDict[inferredPath+"-"+opKey] = path+"-"+opKey
else:
op_FP.append(inferredPath+"-"+opKey)
pairDict[inferredPath] = path
print(pairDict)
break
if not match:
path_FP.append(inferredPath + "-" + str(inferredDict[inferredPath].keys()))
print(pairDict)
numVars = 0
covered = set(pairDict.values())
coverage = len(covered)/len(pathsInSpec)
precision = len(covered)/len(pairDict.keys())
print(covered)
print(pathsInSpec)
print("coverage : {}, precision : {}".format(coverage, precision))
opCovered = set(opDict.values())
totalOperations = 0
for path in oldSpec.keys():
totalOperations+=len(oldSpec[path].keys())
operationPr= len(opCovered)/len(opDict.keys())
operationRe= len(opCovered)/totalOperations
# covDict = {"pathPr": precision, "pathRe": coverage, "operationPr": operationPr, "operationRe": operationRe}
covDict = {
"path":{
"matched": len(pairDict.keys()),
"unmatched": len(path_FP),
"matched_unique": len(covered),
"gt": len(pathsInSpec),
"fp": path_FP
},
"op":{
"matched": len(opDict.keys()),
"unmatched": len(op_FP),
"matched_unique": len(opCovered),
"gt": totalOperations,
"fp": op_FP
}
}
print(covDict)
with open(confDict["specReports"] + '/stats.json', 'w+') as out:
json.dump(covDict, out, indent='\t')
print('Metrics and statistics computed successfully. Reports are available at', confDict["specReports"])
return covDict
def generateSpecPairs(confDict, pathsInSpec, basesInSpec):
if "inferred" in confDict:
inferredSpec = confDict['inferred']
inferredDict = parsers.extractSpecificationData(inferredSpec)
else:
print("Add the key 'inferred' in config file for the spec to be compare")
return
inferredBases = inferredDict.pop('bases')
inferredPaths = inferredDict.keys()
dbFile = confDict['specDbPath']
'''
Sorting paths in the specification to try to avoid path collision:
"/user/{id}" and "/user/auth" have a collision because the second
can be matched by the regex of the first. With a sorted list, the order is
inverted, so the first regex matching should be the right one.
'''
pathsInSpec.sort()
'''
Have to be sure that every resource from every possible server is taken
in consideration.
'''
regPaths = []
for i in range(len(pathsInSpec)):
suffix = ''
actualPath = pathsInSpec[i]
actualPath = actualPath.replace('*', '.*')
# print('path:', pathsInSpec[i], 'actualPath:', actualPath)
for b in basesInSpec:
suffix = suffix + '(' + b.replace('/', '/') + ')|'
regPaths.append('(' + suffix[:-1] + ')' + actualPath)
'''
From every path in the specification extract a regular expression
for pattern matching with the actual paths found in the requests.
'''
paths_re = [re.sub('\{{1}[^{}}]*\}{1}', '[^/]+', x) for x in regPaths]
paths_re = [x + '?$' if x[-1] == '/' else x + '/?$' for x in paths_re]
paths_re = [re.compile(x) for x in paths_re]
print(paths_re)
#####################
#### POPULATE DB ####
dbm.create_connection(dbFile)
dbm.createTables()
#### END ####
#####################
if inferredPaths is not None:
print("Adding json responses to DB")
addInferredSpecEntries(inferredDict, paths_re, pathsInSpec)
#####################
#### POPULATE DB ####
# dbm.getValues()
dbm.closeAndCommit()
#### END ####
#####################
if __name__=="__main__":
with open("/Users/apicarv/git/TestCarving/testCarver/out/parabank/20220710_035630/oas/20220710_043329/oas_conf.json") as j:
# with open("/Users/apicarv/git/TestCarving/testCarver/out/booker/20220711_144103/oas/20220711_151840/oas_conf.json") as j:
conf = json.load(j)
for k in conf:
conf[k] = conf[k][:-1] if conf[k][-1] == '/' else conf[k]
specDict = utils.parsers.extractSpecificationData(conf['specification'])
compareSpecs(specDict, conf)
| 23,948 | 27.612903 | 125 | py |
apicarver | apicarver-main/restats/utils/dbmanager.py | import sqlite3
from sqlite3 import Error
conn = None
def create_connection(dbfile):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
global conn
try:
conn = sqlite3.connect(dbfile)
conn.execute('PRAGMA synchronous = OFF')
except Error as e:
print(e)
quit()
def closeAndCommit():
global conn
conn.commit()
conn.close()
def close():
global conn
conn.close()
def createTables():
with open('./utils/create_tables.sql', 'r') as sqlfile:
sql = sqlfile.read()
try:
conn.executescript(sql)
except Error as e:
pass
def getPathID(path):
c = conn.cursor()
sql = 'SELECT id FROM paths WHERE path = ?'
c.execute(sql, (path,))
row = c.fetchone()
if row == None:
sql = 'INSERT INTO paths VALUES (NULL, ?)'
c.execute(sql, (path,))
conn.commit()
return getPathID(path)
return row[0]
def getParameterID(pathID, method, parameter):
method = method.lower()
c = conn.cursor()
sql = 'SELECT id FROM parameters WHERE name = ? AND method = ? AND path_id = ?'
c.execute(sql, (parameter, method, pathID))
row = c.fetchone()
if row == None:
sql = 'INSERT INTO parameters VALUES (NULL, ?, ?, ?)'
c.execute(sql, (parameter, method, pathID))
conn.commit()
return getParameterID(pathID, method, parameter)
return row[0]
def addParameterValue(parameterID, value):
sql = 'INSERT OR IGNORE INTO pvalues VALUES (?, ?)'
conn.execute(sql, (parameterID, str(value)))
def addResponse(pathID, method, status, cType):
method = method.lower()
sql = 'INSERT OR IGNORE INTO responses VALUES (?, ?, ?, ?)'
conn.execute(sql, (pathID, method, status, cType))
def getValues():
sql = 'SELECT * FROM paths'
c = conn.cursor()
c.execute(sql)
for l in c.fetchall():
print(l)
sql = 'SELECT * FROM parameters'
c = conn.cursor()
c.execute(sql)
for l in c.fetchall():
print(l)
sql = 'SELECT * FROM pvalues'
c = conn.cursor()
c.execute(sql)
for l in c.fetchall():
print(l)
sql = 'SELECT * FROM responses'
c = conn.cursor()
c.execute(sql)
for l in c.fetchall():
print(l)
def getPathNames():
sql = 'SELECT path FROM paths'
paths = [row[0] for row in conn.execute(sql)]
return paths
def getPathCount():
sql = 'SELECT COUNT(*) FROM paths'
return conn.execute(sql).fetchone()[0]
def getOperationNames():
sql = '''
SELECT parameters.method, paths.path
FROM parameters JOIN paths ON parameters.path_id = paths.id
GROUP BY parameters.method, paths.path
'''
return conn.execute(sql).fetchall()
def getOperationCount():
sql = '''
SELECT COUNT(*)
FROM
(SELECT 1 FROM parameters GROUP BY method, path_id)
'''
return conn.execute(sql).fetchone()[0]
def getStatusCodes():
sql = '''
SELECT p.path, r.method, r.status
FROM responses AS r JOIN paths AS p ON r.path_id = p.id
GROUP BY p.path, r.method, r.status
'''
return conn.execute(sql).fetchall()
# NOT WORKING AS EXPECTED.
# It also counts response status not expected in the specification
# It counts the status codes produced by the API during testing
def getStatusCount():
sql = '''
SELECT COUNT(*)
FROM
(SELECT 1 FROM responses GROUP BY path_id, method, status)
'''
return conn.execute(sql).fetchone()[0]
def getResponseTypes():
sql = '''
SELECT p.path, r.method, r.content_type
FROM responses AS r JOIN paths AS p ON r.path_id = p.id
GROUP BY p.path, r.method, r.content_type
'''
return conn.execute(sql).fetchall()
# NOT WORKING.
# It also counts response types not expected in the specification
# It counts the types produced by the API during testing
def getResponseTypesCount():
sql = '''
SELECT COUNT(*)
FROM
(SELECT DISTINCT path_id, method, content_type FROM responses)
'''
return conn.execute(sql).fetchone()[0]
def getRequestTypes():
sql = '''
SELECT p.path, v.method, v.value
FROM paths AS p JOIN
(SELECT p.path_id AS path_id, p.method AS method, v.value AS value
FROM pvalues AS v JOIN
(SELECT *
FROM parameters
WHERE name = "Content-Type" AND method IN ("post", "put", "patch", "delete"))
AS p ON v.param_id = p.id)
AS v ON p.id = v.path_id
'''
return conn.execute(sql).fetchall()
def getRequestTypesCount():
sql = '''
SELECT COUNT(*)
FROM pvalues AS v JOIN
(SELECT *
FROM parameters
WHERE name = "Content-Type" AND method IN ("post", "put", "patch", "delete"))
AS p ON v.param_id = p.id
'''
return conn.execute(sql).fetchone()[0]
def getParameters():
sql = '''
SELECT pt.path, pm.method, pm.name
FROM parameters AS pm JOIN paths AS pt ON pm.path_id = pt.id
'''
return conn.execute(sql).fetchall()
# Headers are counted as params. The following count is wrong due to use of
# unexpected not considered in the specification.
def getParametersCount():
sql = 'SELECT COUNT(*) FROM parameters'
return conn.execute(sql).fetchone()[0]
# Used to retrieve path id for parameter value coverage
def getPathIDByName(path):
sql = 'SELECT id FROM paths WHERE path = ?'
try:
res = conn.execute(sql, (path,)).fetchone()[0]
return res
except TypeError:
return None
def getParameterValues(path_id, method, paramName):
sql = '''
SELECT v.value
FROM pvalues AS v JOIN
(SELECT id FROM parameters WHERE path_id = ? AND method = ? AND name = ?)
AS p ON p.id = v.param_id
'''
res = conn.execute(sql, (path_id, method, paramName)).fetchall()
res = [x[0] for x in res]
return res | 6,221 | 22.044444 | 93 | py |
apicarver | apicarver-main/restats/utils/parsers.py | import traceback
from urllib.parse import parse_qs, urlsplit
import json
import ruamel.yaml
methodsWithRequestBody = {'post', 'put', 'patch'}
def parsePostData(postData):
params = {}
if postData is None:
print("Cannot parse None")
return None
if type(postData) is dict and "string" in postData.keys():
postData = postData["string"]
try:
split = postData.split(sep="&")
for splitItem in split:
if splitItem is None or len(splitItem.strip()) == 0:
continue
paramItemSplit = splitItem.split(sep="=")
if len(paramItemSplit) >= 2:
name = paramItemSplit[0]
value = "".join(paramItemSplit[1:])
elif len(paramItemSplit) == 1:
name = paramItemSplit[0]
value = ''
else:
continue
params[name] = value
except:
print("cannot parse {}".format(postData))
return params
'''for (int i = 0; i < split.length; i++) {
System.out.println(split[i]);
try {
String[] splitParam = split[i].split("=");
if (split[i].contains("=") & & splitParam.length < 2) {
params.add(new BasicNameValuePair(splitParam[0], ""));
} else {
params.add(new BasicNameValuePair(splitParam[0], splitParam[1]));
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
} catch(Exception
ex) {
ex.printStackTrace();
LOG.error("Cannot parse Post Data");
}
return params;'''
def parseJsonResult(jsonFile):
results = []
try:
with open(jsonFile) as jsonF:
results = json.load(jsonF)
except:
print("Cannot get results from " + str(jsonFile))
return
if len(results) <= 0:
print("Could not find any results in : " + str(jsonFile))
return []
returnDict = []
for result in results:
returnDictElem = JsonResponse2Dict(result)
print(returnDictElem)
returnDict.append(returnDictElem)
# print(results)
return returnDict
def yamlResponse2Dict(yamlResult):
request = yamlResult["request"]
response = yamlResult["response"]
if 'body' in request:
postData = parsePostData(request['body'])
else:
postData = {}
parameters = []
path = urlsplit(request['uri'])[2]
queryParam = parse_qs(urlsplit(request["uri"])[3])
# Add query parameters in the parameters dictionary
for k, v in queryParam.items():
parameters.append({'in': 'query', 'name': k, 'value': v[0]})
for header in request["headers"]:
parameters.append({'in': 'header', 'name': header, 'value': request["headers"][header]})
requestDict = {
'method': request["method"].lower(),
'url': request["uri"],
'version': "HTTP 1.0",
'path': path,
'parameters': parameters,
'body': postData
}
if response is not None:
status = response["status"]["code"]
message = response["status"]["message"]
responseParams = []
for header in response["headers"]:
parameters.append({'in': 'header', 'name': header, 'value': response["headers"][header]})
# body = response["body"]
body = ''
responseDict = {
'status': status,
'message': message,
'parameters': responseParams,
'body': body
}
else:
responseDict = {}
return {"request": requestDict, "response": responseDict}
def JsonResponse2Dict(jsonResult):
request = jsonResult["request"]
if "response" in jsonResult:
response = jsonResult["response"]
else:
response = None
if "postData" in request:
postData = parsePostData(request["postData"])
else:
postData = {}
parameters = []
path = urlsplit(request["requestUrl"])[2]
# Parse query parameters
# It is a dictionary like
# {param_name : [param_value]}
queryParam = parse_qs(urlsplit(request["requestUrl"])[3])
# Add query parameters in the parameters dictionary
for k, v in queryParam.items():
parameters.append({'in': 'query', 'name': k, 'value': v[0]})
for header in request["headers"]:
parameters.append({'in': 'header', 'name': header["name"], 'value': header["value"]})
requestDict = {
'method': request["method"].lower(),
'url': request["requestUrl"],
'version': "HTTP 1.0",
'path': path,
'parameters': parameters,
'body': postData
}
if response is not None:
status = response["status"]
message = response["message"]
responseParams = []
for header in response["headers"]:
parameters.append({'in': 'header', 'name': header["name"], 'value': header["value"]})
# body = response["body"]
body = ''
responseDict = {
'status': status,
'message': message,
'parameters': responseParams,
'body': body
}
else:
responseDict = {}
return {"request": requestDict, "response": responseDict}
def RawHTTPRequest2Dict(requestFile):
"""
Parses a raw HTTP Request from a file and casts it to a dictionary.
"""
method = ''
url = ''
endpoint = ''
parameters = []
body = {}
hasFormParams = hasJSONbody = False
# Have to open the file as binary because of the payload
with requestFile.open('rb') as f:
line = str(f.readline(), 'UTF-8')
line = line.split()
method = line[0]
url = line[1]
version = line[2]
path = urlsplit(url)[2]
# Parse query parameters
# It is a dictionary like
# {param_name : [param_value]}
queryParam = parse_qs(urlsplit(url)[3])
# Add query parameters in the parameters dictionary
for k, v in queryParam.items():
parameters.append({'in': 'query', 'name': k, 'value': v[0]})
# The first line has already been read. The next lines contain the headers
lines = f.readlines()
for i in range(len(lines)):
line = str(lines[i], 'UTF-8')
# If an empty line is found, then there are no other headers.
# There could only be some POST parameters left to parse.
if line == '\r\n':
stop = i
break
line = line.split(':')
name = line[0]
value = ':'.join([x.strip(' \r\n') for x in line[1:]])
# Remove burp plugin injected header
if name == 'X-Burp-Comment': continue
# Add every header as parameter
parameters.append({'in': 'header', 'name': name, 'value': value})
# Check if POST (form) parameters are present
if name == 'Content-Type' and value == 'application/x-www-form-urlencoded':
hasFormParams = True
# Check if there is a json body
elif name == 'Content-Type' and value in ('application/vnd.api+json', 'application/json'):
hasJSONbody = True
if hasFormParams:
line = str(f.readline(), 'UTF-8')
formParams = parse_qs(line)
for k, v in formParams.items():
parameters.append({'in': 'query', 'name': k, 'value': v[0]})
elif hasJSONbody:
lines = [str(x, 'UTF-8') for x in lines[stop + 1:]]
lines = " ".join(lines)
try:
body = dict(json.loads(lines))
except:
body = {}
return {
'method': method.lower(),
'url': url,
'version': version,
'path': path,
'parameters': parameters,
'body': body
}
def RawHTTPResponse2Dict(responseFile):
"""
Parses a raw HTTP Response from a file and casts it to a dictionary.
"""
status = ''
message = ''
parameters = []
body = ''
# Have to open the file as binary because of the payload
with responseFile.open('rb') as f:
line = str(f.readline(), 'UTF-8')
# Check whether the file is empty or not
if line == '': return {}
line = line.split()
status = line[1]
message = ' '.join(line[2:]) # Joins with a whitespace all the words from the message
# The first line has already been read. The next lines contain the headers
for line in f.readlines():
line = str(line, 'UTF-8')
# If an empty line is found, then there are no other headers.
if line == '\r\n': break
line = line.split(':')
# Remove burp plugin injected header
if line[0] == 'X-Burp-Comment': continue
# If the header is the content type, cut out the charset part (if any)
if line[0] == 'Content-Type':
line[1] = line[1].split(';')[0]
parameters.append(
{'in': 'header', 'name': line[0], 'value': ':'.join([x.strip(' \r\n') for x in line[1:]])})
return {
'status': status,
'message': message,
'parameters': parameters,
'body': body
}
def pair2json(pairDict, number, dirPath):
"""
Takes a dictionary with a pair request/response and saves it as a JSON file.
The suffix of the file name depends on the current pair number retrieved from
the log files.
"""
filename = dirPath + number + '-' + '/pair.json'
with open(filename, 'w+') as out:
json.dump(pairDict, out, indent='\t')
def json2pair(jsonFile):
with open(jsonFile) as jf:
data = json.load(jf)
return data
def extractSpecificationData(specFile):
'''
In pratica questo parsing servirà solamente a rimuovere dei campi alla specifica
e renderla più fruibile come python dict e non come un json (scomodo per ottenere
la maggior parte dei risultati).
{
'path1' :
{
'GET' :
{
'parameters' :
{
'param1' : {enum1, enum2},
'param2' : {true, false},
'param3' : {}
},
'responses' : {status1, status2, status3, ...},
'produces' : {type1, type2, type3, ...},
'consumes' : {type1, type2, type3, ...}
},
'POST':
{
'parameters' :
{
'param1' : {enum1, enum2},
'param2' : {true, false},
'param3' : {}
},
'responses' : {status1, status2, status3, ...},
'produces' : {type1, type2, type3, ...},
'consumes' : {type1, type2, type3, ...}
}
}
Per i parametri bisogna anche salvare la posizione per evitare la sovrapposizione di
parametri con lo stesso nome in luoghi differenti. In caso il parametro sia un enum
o un bool, vengono salvati i possibili valori.
Il tutto va in un dizionario di dizionario di set (più comodi rispetto ad una lista).
Gli status e i content-type possono andare in un semplice set.
'''
try:
if ('.yml' in specFile) or ('.yaml' in specFile):
with open(specFile) as spec:
yaml = ruamel.yaml.YAML(typ='safe')
data = yaml.load(spec)
data = json.loads(json.dumps(data))
else:
with open(specFile) as spec:
data = json.load(spec)
# Check the specification version
if 'swagger' in data.keys():
extractedData = parseSwagger2(data)
elif 'openapi' in data.keys():
extractedData = parseOpenAPI3(data)
else:
raise Exception('Version not parsable')
return extractedData
except Exception as ex:
print(ex)
traceback.print_ex()
print('Could not open specification file.')
quit()
def parseOpenAPI3(data):
newSpec = dict()
# Add the base path of every resource served by the API
if 'servers' in data.keys():
newSpec['bases'] = [urlsplit(s['url'])[2] for s in data['servers']]
# Since the standard defines that every path MUST begin with a '/', it is
# not needed in the base path
newSpec['bases'] = [p[0:-1] if len(p) > 0 and p[-1] == '/' else p for p in newSpec['bases']]
newSpec['bases'] = list(set(newSpec['bases']))
else:
newSpec['bases'] = ['/']
# Iterate through all the paths in the specification
for path in data['paths']:
newSpec[path] = {}
# Get every parameter description for the method
# Before check it there are parameters
pathParameters = []
parameters = {}
if 'parameters' in data['paths'][path].keys():
for parameter in data['paths'][path]['parameters']:
if parameter['in'] == 'header':
continue
# If parameter in path treat it differently (for parameter coverage)
if parameter['in'] == 'path':
pathParameters.append(parameter['name'])
# In OpenAPI 3 there could be schema xor content
elif 'schema' in parameter.keys():
if 'enum' in parameter['schema'].keys():
parameters[parameter['name']] = parameter['schema']['enum']
elif parameter['schema']['type'] == 'boolean':
# Use true, false instead of False, True because of python json serialization
parameters[parameter['name']] = ['true', 'false']
else:
parameters[parameter['name']] = []
else:
parameters[parameter['name']] = []
# Iterate through all the methods of a path
for method in data['paths'][path]:
method = method.lower()
if method == 'parameters':
#Ignore common parameters. It is not a method
continue
newSpec[path][method] = \
{'parameters': parameters, 'pathParameters': pathParameters, 'responses': [], 'produces': [], 'consumes': []}
# Get every parameter description for the method
# Before check it there are parameters
if 'parameters' in data['paths'][path][method].keys():
for parameter in data['paths'][path][method]['parameters']:
if parameter['in'] == 'header':
continue
# If parameter in path treat it differently (for parameter coverage)
if parameter['in'] == 'path':
newSpec[path][method]['pathParameters'].append(parameter['name'])
# In OpenAPI 3 there could be schema xor content
elif 'schema' in parameter.keys():
if 'enum' in parameter['schema'].keys():
newSpec[path][method]['parameters'][parameter['name']] = parameter['schema']['enum']
elif parameter['schema']['type'] == 'boolean':
# Use true, false instead of False, True because of python json serialization
newSpec[path][method]['parameters'][parameter['name']] = ['true', 'false']
else:
newSpec[path][method]['parameters'][parameter['name']] = []
else:
newSpec[path][method]['parameters'][parameter['name']] = []
# Extract status codes
for status, val in data['paths'][path][method]['responses'].items():
newSpec[path][method]['responses'].append(status)
# Extract output content-types
if 'content' in val.keys():
newSpec[path][method]['produces'] = newSpec[path][method]['produces'] + list(val['content'].keys())
# Extract input content-types
if method in methodsWithRequestBody:
# Check the content-type header parameter
# It overwrites the consumes: this header is always present in an HTTP request.
if 'Content-Type' in newSpec[path][method]['parameters'].keys():
newSpec[path][method]['consumes'] = newSpec[path][method]['parameters']['Content-Type']
elif 'requestBody' in data['paths'][path][method].keys():
newSpec[path][method]['consumes'] = list(
data['paths'][path][method]['requestBody']['content'].keys())
# Remove duplicates in produces and consumes
newSpec[path][method]['produces'] = list(set(newSpec[path][method]['produces']))
# newSpec[path][method]['consumes'] = list(set(newSpec[path][method]['consumes']))
return newSpec
def parseSwagger2(data):
newSpec = dict()
defaultConsumes = [] # only affects operations with a request body
defaultProduces = []
# Set default consumes & produces
if 'consumes' in data.keys():
defaultConsumes = data['consumes']
if 'produces' in data.keys():
defaultProduces = data['produces']
# Add the base path of every resource served by the API
if 'basePath' in data.keys():
p = data['basePath']
# Since the standard defines that every path MUST begin with a '/', it is
# not needed in the base path
newSpec['bases'] = [p[0:-1] if len(p) > 0 and p[-1] == '/' else p]
else:
newSpec['bases'] = ['']
# Iterate through all the paths in the specification
for path in data['paths']:
newSpec[path] = {}
# Iterate through all the methods of a path
for method in data['paths'][path]:
method = method.lower()
newSpec[path][method] = \
{'parameters': {}, 'pathParameters': [], 'responses': [], 'produces': [], 'consumes': []}
# Get every parameter description for the method
# Before check if there are parameters
if 'parameters' in data['paths'][path][method].keys():
for parameter in data['paths'][path][method]['parameters']:
# If parameter in path treat it differently (for parameter coverage)
if parameter['in'] == 'path':
newSpec[path][method]['pathParameters'].append(parameter['name'])
# If parameter in body it is not counted in the parameter coverage, so it is not added
elif parameter['in'] == 'body':
continue
# If the parameter has the 'enum' field, save the possible values
elif 'enum' in parameter.keys():
newSpec[path][method]['parameters'][parameter['name']] = parameter['enum']
# schema is only used with in: body parameters. Any other parameters expect a primitive type
# there cuold be schema instead of type
elif 'type' in parameter.keys() and parameter['type'] == 'boolean':
# Use true, false instead of False, True because of python json serialization
newSpec[path][method]['parameters'][parameter['name']] = ['true', 'false']
else:
newSpec[path][method]['parameters'][parameter['name']] = []
# Extract status codes
newSpec[path][method]['responses'] = list(data['paths'][path][method]['responses'].keys())
# Extract input content-types
if method in methodsWithRequestBody:
# Check the content-type header parameter
# It overwrites the consumes: this header is always present in an HTTP request.
if 'Content-Type' in newSpec[path][method]['parameters'].keys():
newSpec[path][method]['consumes'] = newSpec[path][method]['parameters']['Content-Type']
elif 'consumes' in data['paths'][path][method].keys():
newSpec[path][method]['consumes'] = data['paths'][path][method]['consumes']
else:
newSpec[path][method]['consumes'] = defaultConsumes
# Check also the content-type header parameter
# Extract output content-types
if 'produces' in data['paths'][path][method].keys():
newSpec[path][method]['produces'] = data['paths'][path][method]['produces']
else:
newSpec[path][method]['produces'] = defaultProduces
return newSpec
if __name__ == '__main__':
# d = extractSpecificationData('../specifications/slim.json')
spec = extractSpecificationData("../petclinic/spec.json")
resultDict = parseJsonResult(
"../petclinic/resultResponses.json")
print()
| 17,544 | 26.982456 | 113 | py |
apicarver | apicarver-main/restats/utils/__init__.py | 0 | 0 | 0 | py |
|
apicarver | apicarver-main/testCarver/pythonCode/runEvoMaster.py | import glob
import os
import shutil
from datetime import datetime
import constants
from constants import RUN_SCHEMATHESIS_COMMAND, APPS, STATUS_SUCCESSFUL, STATUS_SKIPPED, STATUS_ERRORED, CASETTE_YAML, \
SCHEMATHESIS_OUTPUT
from utilsRun import monitorProcess, cleanup, startProcess, restartDocker, MODE
def runAllApps(RUNTIME=30):
succesful = []
unsuccesful = []
skipped = []
for app in APPS:
if app in excludeApps:
continue
results = runAlgo(app, RUNTIME=RUNTIME)
for result in results:
status = result["status"]
command = result["command"]
if status == STATUS_SUCCESSFUL:
succesful.append(command)
elif status == STATUS_SKIPPED:
skipped.append(command)
elif status == STATUS_ERRORED:
unsuccesful.append(command)
print("succesful : {0}".format(str(len(succesful))))
print(succesful)
print("skipped : {0}".format(str(len(skipped))))
print(skipped)
print("unsuccesful : {0}".format(str(len(unsuccesful))))
print(unsuccesful)
if DRY_RUN:
print("Predicted run time : " + str(RUNTIME * len(succesful)))
def getExistingRuns(appName, ALL_CRAWLS=os.path.join(os.path.abspath(".."), "out")):
gtYaml = []
crawljaxOutputPath = os.path.abspath(os.path.join(ALL_CRAWLS, appName))
if os.path.exists(crawljaxOutputPath):
gtYaml = glob.glob(crawljaxOutputPath + "/" + constants.EVOMASTER_OUTPUT + "/" + CASETTE_YAML)
return {"path": crawljaxOutputPath, "existingValidCrawls": gtYaml}
return {"path": None, "gtYaml": gtYaml}
def getSwaggerUrl(appName):
if appName == "petclinic":
return "http://localhost:9966/petclinic/v3/api-docs"
elif appName == "parabank":
return "http://localhost:8080/parabank-3.0.0-SNAPSHOT/services/bank/swagger.yaml"
elif appName == "realworld":
return "http://localhost:3000/api"
elif appName == "booker":
return {"booking": "http://localhost:3000/booking/v3/api-docs/booking-api",
"branding" : "http://localhost:3002/branding/v3/api-docs/branding-api",
"message": "http://localhost:3006/message/v3/api-docs/message-api",
"report": "http://localhost:3005/report/v3/api-docs/report-api",
"room": "http://localhost:3001/room/v3/api-docs/room-api",
"auth": "http://localhost:3004/auth/v3/api-docs/auth-api"
}
elif appName == "jawa":
return "http://localhost:8080/v2/api-docs"
elif appName == "ecomm":
return "http://localhost:8080/api/v2/api-docs"
elif appName == "medical":
return "http://localhost:8080/v2/api-docs"
elif appName == "shopizer":
return "http://localhost:8080/v2/api-docs"
RUN_EVOMASTER_COMMAND = ['java', '-jar', './libs/evomaster.jar', '--blackBox', 'true']
def buildEvoMasterCommand(outputDir, baseURL, maxTime, targetURL=None):
command = RUN_EVOMASTER_COMMAND.copy()
command.append("--bbSwaggerUrl")
command.append(baseURL)
command.append('--outputFormat')
command.append('JAVA_JUNIT_4')
if not os.path.exists(outputDir):
os.makedirs(outputDir)
command.append('--outputFolder')
command.append(outputDir)
command.append('--maxTime')
command.append(maxTime)
if targetURL is not None:
command.append('--bbTargetUrl')
command.append(targetURL)
return command
def runAlgo(appName, RUNTIME=60,
logFile=os.path.join("../logs", "evomaster_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
rerun=False, EVOMASTER_OUTPUT="evomaster"):
maxTime= str(RUNTIME) + "m"
results = []
commands = []
# For GroundTruth OpenAPI
srcPath = os.path.join("..", "src", "main", "resources", "webapps", appName)
openApiPath = os.path.join(srcPath, "openapi.yml")
for runIndex in range(1):
curr_commands = []
outputDir = os.path.join("..", "out", appName, EVOMASTER_OUTPUT, str(runIndex))
if appName == "parabank":
# No online swagger available
command = buildEvoMasterCommand(outputDir=outputDir, baseURL=getSwaggerUrl(appName), maxTime=maxTime, targetURL=constants.getHostURL(appName))
curr_commands.append(command)
elif appName == "booker":
baseURLs=getSwaggerUrl(appName)
for key in baseURLs.keys():
curr_commands.append(buildEvoMasterCommand(outputDir=os.path.join(outputDir, key), baseURL=baseURLs[key], maxTime=str(round(RUNTIME/6)+1)+'m'))
else:
command = buildEvoMasterCommand(outputDir=outputDir, baseURL=getSwaggerUrl(appName), maxTime=maxTime)
curr_commands.append(command)
if (not rerun) and os.path.exists(os.path.join(outputDir, constants.CASETTE_YAML)):
# There is a previous execution and rerun is disabled
results.append({"command": curr_commands, "status": STATUS_SKIPPED, "message": "previous execution data exists"})
else:
commands.append({"command":curr_commands, "outputDir": outputDir})
if not DRY_RUN:
SLEEPTIME = 30
if appName=="shopizer":
SLEEPTIME= 120
restartDocker(appName, SLEEPTIME)
for command in curr_commands:
if DRY_RUN:
results.append({"command": command, "status": STATUS_SUCCESSFUL, "message": "DRYRUN"})
continue
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=None)
if proc == None:
print("Ignoring error command.")
results.append({"command": command, "status": STATUS_ERRORED, "message": "Command could not be executed"})
continue
status = monitorProcess(proc, timeStep=30)
print("Done : {0}".format(command))
results.append({"command": command, "status": STATUS_SUCCESSFUL, "message": "Succesful"})
if not DRY_RUN:
cleanup(MODE.ST, appName, os.path.join(outputDir, "cov"))
# if DRY_RUN:
# status = STATUS_SUCCESSFUL
# return results
#
# if isDockerized(appName):
# restartDocker(getDockerName(appName))
return results
def getExistingTest():
for app in APPS:
print(getExistingRuns(app))
DRY_RUN = False
excludeApps = ['tmf', 'mdh']
if __name__ == "__main__":
print("hello")
# getExistingTest()
runAllApps(RUNTIME=2)
| 6,598 | 35.865922 | 160 | py |
apicarver | apicarver-main/testCarver/pythonCode/constants.py | import os.path
# APPS = ['medical']
APPS = ['petclinic', 'parabank', 'realworld', 'booker', 'jawa', 'medical', 'ecomm']
DOCKER_LOCATION = os.path.abspath('../src/main/resources/webapps')
RUN_CARVER_COMMAND = ['java', '-Xmx8G', '-Xss1G', '-cp', 'target/testCarver-0.0.1-SNAPSHOT-jar-with-dependencies.jar',
'com.apicarv.testCarver.Main']
RUN_SCHEMATHESIS_COMMAND = [os.path.abspath('venv/bin/st'), 'run']
RESTATS_PATH = os.path.abspath('../../restats')
RUN_RESTATS_COMMAND = [os.path.abspath('venv/bin/python'),
os.path.join(RESTATS_PATH, 'app.py')]
JACOCO_MERGE_COMMAND = ['java', '-jar', os.path.abspath('libs/org.jacoco.cli-0.8.8-nodeps.jar'), 'merge']
JACOCO_REPORT_COMMAND = ['java', '-jar', os.path.abspath('libs/org.jacoco.cli-0.8.8-nodeps.jar'), 'report']
CASETTE_YAML = "cassette.yaml"
RESULT_RESPONSES_JSON = "resultResponses.json"
PROBER_RESPONSES_JSON = "allPrresultResponses.json"
INFERRED_YAML = "oas.yaml"
PROBER_YAML = "probe_oas.yaml"
INFERRED_JSON = "oas.json"
PROBER_JSON = "probe_oas.json"
ENHANCED_YAML = "openAPI_enhanced.yaml"
SCHEMATHESIS_OUTPUT = "schemathesis"
SCHEMATHESIS_CARVER = SCHEMATHESIS_OUTPUT + "_carver"
SCHEMATHESIS_PROBER = SCHEMATHESIS_OUTPUT + "_prober"
EVOMASTER_OUTPUT = "evomaster"
RESTATS_OUT_DIR = "reports"
COV_XML = "cov.xml"
COV_CARVER_XML = "covcarver.xml"
COV_PROBER_XML = "covprober.xml"
COV_JAWA_XML = "app.xml"
COV_JAWA_CARVER_XML = "appcarver.xml"
COV_JAWA_PROBER_XML = "appprober.xml"
COV_PARABANK_CARVER_XML = "jacococarver.xml"
COV_PARABANK_PROBER_XML = "jacocoprober.xml"
STATUS_SUCCESSFUL = "successful"
STATUS_NO_OUTPUT = "noOutput"
STATUS_STRAY_TERMINATED = "strayProcessTerminated_OutputObtained"
STATUS_SKIPPED = "skipped"
STATUS_ERRORED = "errored"
def getHostURL(appName):
if appName == "petclinic":
return "http://localhost:9966/petclinic/api"
if appName == "parabank":
return "http://localhost:8080/parabank-3.0.0-SNAPSHOT/services/"
if appName == "realworld":
return "http://localhost:3000/api"
if appName == "booker":
return "http://localhost:8080"
if appName == "jawa":
return "http://localhost:8080"
if appName == "shopizer":
return "http://localhost:8080/api"
if appName == "medical":
return "http://localhost:8080"
if appName == "ecomm":
return "http://localhost:8080/api"
return None
NYC_REPORT = "index.html"
NYC_CARVER_REPORT = "indexcarver.html"
NYC_PROBER_REPORT = "indexprober.html"
COV_EXEC = "*.exec"
BOOKER_MODULES = ["assets", "auth", "booking", "branding", "message", "report", "room"]
| 2,633 | 29.627907 | 118 | py |
apicarver | apicarver-main/testCarver/pythonCode/runGeneratedTests.py | import glob
import os
from datetime import datetime, timedelta
import constants
from constants import APPS, STATUS_SUCCESSFUL, STATUS_ERRORED
from utilsRun import restartDocker, startProcess, monitorProcess, getDockerName, cleanup, MODE, exportJson
# BASE_COMMAND_HYBRID = ['sh', 'runTests.sh']
BASE_COMMAND = ['sh', 'runTests.sh']
# BASE_COMMAND=['java', '-jar', '/art-fork_icseBranch/crawljax/examples/target/crawljax-examples-3.7-SNAPSHOT-jar-with-dependencies.jar']
def executeTestsDummy(appName, algo, crawl, url=None,
logFile=os.path.join("logs", "testRunLog_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
testResultsFolder=None):
try:
status = saveTestRunInfo(crawl=crawl, url=url,
dockerName=getDockerName(appName),
testResultsFolder=testResultsFolder,
version=APP_VERSION)
except Exception as ex:
print(ex)
print("Exception saving test run info")
status = False
def executeTests(appName, algo, crawl, url=None,
logFile=os.path.join("logs", "testRunLog_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
testResultsFolder=None):
command = BASE_COMMAND.copy()
command.append(crawl)
# if url is not None:
# command.append(url)
if appName in ["petclinic", "booker", "medical", "ecomm"]:
command.append(appName)
if DRY_RUN:
status = STATUS_SUCCESSFUL
return status, command
restartDocker(appName)
startTime = datetime.now()
proc = startProcess(command, logFile, changeDir=None, DEBUG=False)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
return status, command
timeout = 200
status = monitorProcess(proc, 6 * 60, timeStep=5)
print("Done : {0}".format(command))
endTime = datetime.now()
testDuration = (endTime - startTime)/timedelta(milliseconds=1)
try:
status = saveTestRunInfo(crawl=crawl, url=url,
dockerName=getDockerName(appName),
testResultsFolder=testResultsFolder,
version=APP_VERSION, duration = testDuration)
except Exception as ex:
print(ex)
print("Exception saving test run info")
status = False
cleanup(MODE.CARVER, appName=appName, outputDir=testResultsFolder)
return status, command
def saveTestRunInfo(crawl,url, dockerName=None, testResultsFolder=None, version=None, duration = None):
if version is None:
version=APP_VERSION
testRunInfo = {'version': version, 'url': url, 'docker':dockerName, 'duration': duration}
testRunInfoFile = os.path.join(testResultsFolder, 'testRunInfo.json')
if testResultsFolder == None:
testResultsFolder = os.path.join(crawl, 'test-results', '0')
print("Assuming test results folder {0}".format(testResultsFolder))
if not os.path.exists(testResultsFolder):
print("Test results folder not found {0}".format(testResultsFolder))
print("Error: Test Run not successful!!")
return False
if os.path.exists(testRunInfoFile):
print("Error: Test run file already exists at {0}".format(testRunInfo))
return False
else:
print(testRunInfo)
if not DRY_RUN:
exportJson(testRunInfoFile, testRunInfo)
return True
def getTestRun(crawl):
returnList = []
testResultsFolder = os.path.join(crawl, "test-results")
if os.path.exists(testResultsFolder):
testRunList = os.listdir(testResultsFolder)
print("Found test runs {0}".format(testRunList))
for testRun in testRunList:
if testRun == '.DS_Store':
continue
returnList.append(os.path.join(testResultsFolder, testRun))
return returnList
return []
def runTests(crawl, rerun=False):
split = os.path.split(os.path.split(os.path.split(crawl)[0])[0])
appName = os.path.split(split[0])[1]
runInfo = split[1]
print(appName)
print(runInfo)
testRuns = getTestRun(crawl)
if len(testRuns) > 0:
if not rerun:
return False
else:
status, command = executeTests(
appName, "HYBRID", crawl,
url=None,
logFile=os.path.join(crawl, "testRun_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
testResultsFolder=os.path.join(crawl,'test-results', str(len(testRuns))))
print(command)
print(status)
return True
def runAllTests(crawls, rerun=False):
success = []
skipped = []
for crawl in crawls:
status = runTests(crawl, rerun)
if status:
success.append(crawl)
else:
skipped.append(crawl)
print("succeeded {0}: {1}".format(len(success), success))
print("skipped {0}: {1}".format(len(skipped), skipped))
return success, skipped
def getHostNames():
return ["localhost"]
def getExistingCrawl(appName, algo, threshold, runtime, ALL_CRAWLS = os.path.join(os.path.abspath(".."), "out")):
existingValidCrawls = []
hostNames = getHostNames()
for host in hostNames:
crawlFolderName = appName + "_" + algo + "_" + str(float(threshold))+ "_" + str(runtime) + "mins"
crawljaxOutputPath = os.path.abspath(os.path.join(ALL_CRAWLS, appName, crawlFolderName, host))
if os.path.exists(crawljaxOutputPath):
existingValidCrawls = glob.glob(crawljaxOutputPath + "/crawl*/result.json")
return {"path": crawljaxOutputPath, "existingValidCrawls": existingValidCrawls}
return {"path": None, "existingValidCrawls": existingValidCrawls}
def getCrawlsToAnalyze(crawlPath=None,app=None, host=None, runtime = 5, bestCrawls = False):
if crawlPath==None:
crawlPath = os.path.join(".","out")
crawlMap = {}
returnCrawls = []
missingCrawls = []
for appName in APPS:
if app!=None and app!=appName:
continue
algoStr = "HYBRID"
threshold = "-1.0"
existingCrawlData = getExistingCrawl(appName, algoStr, threshold, runtime, ALL_CRAWLS = crawlPath)
existingValidCrawls = existingCrawlData['existingValidCrawls']
crawljaxOutputPath = existingCrawlData['path']
print(existingCrawlData)
if crawljaxOutputPath is None or len(existingValidCrawls) == 0:
crawlFolderName = appName + "_" + algoStr + "_" + str(float(threshold))+ "_" + str(runtime) + "mins"
crawljaxOutputPath = os.path.abspath(os.path.join(crawlPath, appName, crawlFolderName))
missingCrawls.append(crawljaxOutputPath)
for validCrawl in existingValidCrawls:
if validCrawl not in returnCrawls:
path,file = os.path.split(validCrawl)
returnCrawls.append(path)
crawlMap[path] = appName
print(len(returnCrawls))
return returnCrawls, crawlMap, missingCrawls
# APPS=["medical"]
DRY_RUN = False
APP_VERSION = -1
if __name__ == "__main__":
# testCleanup()
# testGetThresholds()
# testRestartDocker()
# testChangeDir()
# testGetBestThresholds()
returnCrawls, crawlMap, missingCrawls = getCrawlsToAnalyze(crawlPath="../crawlOut", app=None, host="localhost",
runtime=30, bestCrawls=True)
print(returnCrawls)
print(crawlMap)
print("Missing")
print(missingCrawls)
# executeTestsDummy("petclinic", "HYBRID", "/TestCarving/crawlOut/petclinic/petclinic_HYBRID_-1.0_30mins/localhost/crawl0",
# None)
runAllTests(returnCrawls, rerun=False)
# addTestRunInfos(returnCrawls, app_version=APP_VERSION)
| 6,899 | 29 | 137 | py |
apicarver | apicarver-main/testCarver/pythonCode/utilsRun.py | import csv
import json
import os
import subprocess
from datetime import datetime
from enum import Enum
from subprocess import check_call, CalledProcessError, Popen
from time import sleep
import psutil
from constants import DOCKER_LOCATION, STATUS_SUCCESSFUL
def getDockerName(appName):
return appName
def restartDockerVersion(appName):
# if version == None:
# restartDocker(getDockerName(appName))
# return
# dockerList = getDockerList(version)
# print(dockerList[appName])
restartDocker(getDockerName(appName=appName))
def restartDocker(dockerName, SLEEPTIME=30):
stopDocker = [os.path.join(DOCKER_LOCATION, dockerName, 'stop-docker.sh')]
try:
check_call(stopDocker)
except CalledProcessError as ex:
print("Could not stop docker docker? ")
print(ex)
startDocker = [os.path.join(DOCKER_LOCATION, dockerName, 'run-docker.sh')]
try:
check_call(startDocker)
sleep(SLEEPTIME)
except CalledProcessError as ex:
print("No matching processes Found for docker? ")
print(ex)
class MODE(Enum):
CARVER = "carver"
ST = "schemathesis"
def cleanup(mode, appName=None, outputDir = None):
if mode is MODE.CARVER:
killChromeDriverCommand = ['killall', 'chromedriver']
try:
check_call(killChromeDriverCommand)
except CalledProcessError as ex:
print("No matching processes Found for chromedriver? ")
print(ex)
killGoogleChromeCommand = ['killall', 'chrome']
try:
check_call(killGoogleChromeCommand)
except CalledProcessError as ex:
print("No matching processes Found for Google Chrome? ")
print(ex)
if appName is None:
print("No appName provided. Not resetting Docker")
return
dockerName = getDockerName(appName)
if not dockerName is None:
stopDocker = [os.path.join(DOCKER_LOCATION, dockerName, 'stop-docker.sh')]
if outputDir is not None:
stopDocker.append(outputDir)
try:
check_call(stopDocker)
except CalledProcessError as ex:
print("Could not stop docker docker? ")
print(ex)
def kill_process(pid):
try:
proc = psutil.Process(pid)
print("Killing", proc.name())
proc.kill()
except psutil.NoSuchProcess as ex:
print("No Such Process : {0}".format(pid))
def monitorProcess(proc, runtime=30, timeStep=30, timeout=200, crawljaxOutputPath=None, existing=-1):
done = False
timeDone = 0
graceTime = 60
status = None
while not done:
poll = proc.poll()
if poll == None:
print("process still running {0}/{1}".format(str(timeDone), str(runtime * 60)))
sleep(timeStep)
timeDone += timeStep
else:
done = True
status = STATUS_SUCCESSFUL
break
return status
def changeDirectory(path):
try:
os.chdir(path)
return True
except OSError as ex:
print("Could not change director")
print(ex)
return False
def startProcess(command, outputPath="output_crawljax_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log",
changeDir=None,
DEBUG=False):
changed = False
current = os.getcwd()
try:
if changeDir is not None:
changed = changeDirectory(changeDir)
if DEBUG:
process = Popen(command)
return process
else:
print("outputtting log to {0}".format(outputPath))
with open(outputPath, 'w') as outputFile:
proc = Popen(command, stderr=subprocess.STDOUT, stdout=outputFile)
print("Started {0} with PID {1}".format(command, proc.pid))
return proc
except Exception as ex:
print(ex)
print("Exception try to run {0} : ".format(command))
finally:
if changed:
changeDirectory(current)
def exportJson(file, jsonData):
with open(file, "w") as write_file:
json.dump(jsonData, write_file)
def writeCSV_Dict(csvFields, csvRows, dst):
# print(csvRows)
with open(dst, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csvFields)
writer.writeheader()
for row in csvRows:
writer.writerow(row)
def writeCSV(rows, dest):
with open(dest, 'w') as csvFile:
writer = csv.writer(csvFile, rows)
for row in rows:
writer.writerow(row)
writer.writerow(row)
def importJson(jsonFile):
try:
with open(jsonFile, encoding='utf-8') as data_file:
data = json.loads(data_file.read())
return data
except Exception as ex:
print("Exception occured while importing json from : " + jsonFile)
print(ex)
return None
if __name__=="__main__":
cleanup(MODE.ST, appName="realworld", outputDir="../out/testProbe/cov") | 4,316 | 22.983333 | 113 | py |
apicarver | apicarver-main/testCarver/pythonCode/runSchemathesis.py | import glob
import os
import shutil
from datetime import datetime
import constants
from constants import RUN_SCHEMATHESIS_COMMAND, APPS, STATUS_SUCCESSFUL, STATUS_SKIPPED, STATUS_ERRORED, CASETTE_YAML, \
SCHEMATHESIS_OUTPUT
from utilsRun import monitorProcess, cleanup, startProcess, restartDocker, MODE
def runAllApps(RUNTIME=30):
succesful = []
unsuccesful = []
skipped = []
for app in APPS:
if app in excludeApps:
continue
baseURL = constants.getHostURL(app)
if baseURL is None:
skipped.append(app)
continue
results = runAlgo(app, baseURL)
for result in results:
status = result["status"]
command = result["command"]
if status == STATUS_SUCCESSFUL:
succesful.append(command)
elif status == STATUS_SKIPPED:
skipped.append(command)
elif status == STATUS_ERRORED:
unsuccesful.append(command)
print("succesful : {0}".format(str(len(succesful))))
print(succesful)
print("skipped : {0}".format(str(len(skipped))))
print(skipped)
print("unsuccesful : {0}".format(str(len(unsuccesful))))
print(unsuccesful)
if DRY_RUN:
print("Predicted run time : " + str(RUNTIME * len(succesful)))
def getExistingRuns(appName, ALL_CRAWLS=os.path.join(os.path.abspath(".."), "out")):
gtYaml = []
crawljaxOutputPath = os.path.abspath(os.path.join(ALL_CRAWLS, appName))
if os.path.exists(crawljaxOutputPath):
gtYaml = glob.glob(crawljaxOutputPath + "/" + SCHEMATHESIS_OUTPUT + "/" + CASETTE_YAML)
carverYaml = glob.glob(crawljaxOutputPath + "/" + constants.SCHEMATHESIS_CARVER + "/" + CASETTE_YAML)
proberYaml = glob.glob(crawljaxOutputPath + "/" + constants.SCHEMATHESIS_PROBER + "/" + CASETTE_YAML)
return {"path": crawljaxOutputPath, "existingValidCrawls": gtYaml}
return {"path": None, "gtYaml": gtYaml, "carverYaml": carverYaml, "proberYaml": proberYaml}
def buildSchemathesisCommand(outputDir, openApiPath, baseURL):
command = RUN_SCHEMATHESIS_COMMAND.copy()
command.append("--cassette-path")
if not os.path.exists(outputDir):
os.makedirs(outputDir)
command.append(os.path.join(outputDir, CASETTE_YAML))
command.append(openApiPath)
command.append("--base-url")
command.append(baseURL)
command.append('--hypothesis-max-examples')
command.append('1000')
return command
def getEnhancedYaml(appName):
appOutput = os.path.abspath(os.path.join("../out", appName))
if not os.path.exists(appOutput):
print("no output folder for {}".format(appName))
return None
carverYaml = glob.glob(appOutput + "/*/run/*/" + constants.ENHANCED_YAML)
proberYaml = glob.glob(appOutput + "/*/oas/*/" + constants.ENHANCED_YAML)
return {"carverYaml": carverYaml, "proberYaml": proberYaml}
def runAlgo(appName, baseURL,
logFile=os.path.join("../logs", "schemaThesis_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
rerun=False):
results = []
commands = []
# For GroundTruth OpenAPI
srcPath = os.path.join("..", "src", "main", "resources", "webapps", appName)
openApiPath = os.path.join(srcPath, "openapi.yml")
enhancedYaml = getEnhancedYaml(appName)
for runIndex in range(1):
outputDir = os.path.join("..", "out", appName, SCHEMATHESIS_OUTPUT, str(runIndex))
command_gtYaml = buildSchemathesisCommand(outputDir=outputDir, openApiPath=openApiPath, baseURL=baseURL)
if (not rerun) and os.path.exists(os.path.join(outputDir, constants.CASETTE_YAML)):
# There is a previous execution and rerun is disabled
results.append({"command": command_gtYaml, "status": STATUS_SKIPPED, "message": "previous execution data exists"})
else:
commands.append({"command":command_gtYaml, "outputDir":outputDir})
if (enhancedYaml is not None) and len(enhancedYaml['carverYaml']) > 0:
# For Carver Enhanced OpenAPI
outputDir = os.path.join("..", "out", appName, constants.SCHEMATHESIS_CARVER, str(runIndex))
openApiPath = enhancedYaml['carverYaml'][0]
command_carverYaml = buildSchemathesisCommand(outputDir=outputDir, openApiPath=openApiPath, baseURL=baseURL)
if (not rerun) and os.path.exists(os.path.join(outputDir, constants.CASETTE_YAML)):
# There is a previous execution and rerun is disabled
results.append({"command": command_carverYaml, "status": STATUS_SKIPPED, "message": "previous execution data exists"})
else:
commands.append({"command":command_carverYaml, "outputDir":outputDir})
if (enhancedYaml is not None) and len(enhancedYaml['proberYaml']) > 0:
# For Carver Enhanced OpenAPI
outputDir = os.path.join("..", "out", appName, constants.SCHEMATHESIS_PROBER, str(runIndex))
openApiPath = enhancedYaml['proberYaml'][0]
command_proberYaml = buildSchemathesisCommand(outputDir=outputDir, openApiPath=openApiPath, baseURL=baseURL)
if (not rerun) and os.path.exists(os.path.join(outputDir, constants.CASETTE_YAML)):
# There is a previous execution and rerun is disabled
results.append({"command": command_proberYaml, "status": STATUS_SKIPPED, "message": "previous execution data exists"})
else:
commands.append({"command":command_proberYaml, "outputDir":outputDir})
for command in commands:
if DRY_RUN:
results.append({"command": command["command"], "status": STATUS_SUCCESSFUL, "message": "DRYRUN"})
continue
SLEEPTIME = 30
if appName == "shopizer":
SLEEPTIME = 120
restartDocker(appName, SLEEPTIME)
print("sending command {0}".format(command["command"]))
proc = startProcess(command["command"], logFile, changeDir=None)
if proc == None:
print("Ignoring error command.")
results.append({"command": command["command"], "status": STATUS_ERRORED, "message": "Command could not be executed"})
continue
status = monitorProcess(proc, timeStep=30)
print("Done : {0}".format(command["command"]))
cleanup(MODE.ST, appName, os.path.join(command["outputDir"], "cov"))
results.append({"command": command["command"], "status": STATUS_SUCCESSFUL, "message": "Succesful"})
# if DRY_RUN:
# status = STATUS_SUCCESSFUL
# return results
#
# if isDockerized(appName):
# restartDocker(getDockerName(appName))
return results
def getExistingTest():
for app in APPS:
print(getExistingRuns(app))
DRY_RUN = Falser
excludeApps = ['tmf', 'mdh', 'shopizer']
if __name__ == "__main__":
print("hello")
# getExistingTest()
runAllApps()
| 6,970 | 39.063218 | 134 | py |
apicarver | apicarver-main/testCarver/pythonCode/rq1_executionTime.py | import glob
import os.path
from datetime import datetime
import utilsRun
from constants import APPS
from coverageStats import getCovFiles
from runCarver import getExistingCarverRun
from runGeneratedTests import getCrawlsToAnalyze, getExistingCrawl
from utilsRun import importJson
def findAllOutputs(ALL_CRAWLS="../crawlOut"):
allOutputs = {}
for appName in APPS:
try:
# print(appName)
algoStr = "HYBRID"
threshold = "-1.0"
existingCrawlData = getExistingCrawl(appName, algoStr, threshold, 30, ALL_CRAWLS = ALL_CRAWLS)
existingValidCrawls = existingCrawlData['existingValidCrawls']
crawljaxOutputPath = existingCrawlData['path']
# print(existingValidCrawls[0])
existingCarverData = getExistingCarverRun(appName)
existingValidCarverOutputs = existingCarverData['existingValidCrawls']
carverOutputPath = existingCarverData['path']
# print(existingValidCarverOutputs[0])
outputs = {"carver": existingValidCarverOutputs[0], "crawler": existingValidCrawls[0], "success": True}
except Exception as ex:
outputs = {"success": False, "message": "error finding outputs"}
print(ex)
allOutputs[appName] = outputs
return allOutputs
def getExecutionTime(outputs):
duration = {}
validCrawl = outputs['crawler']
validCarve = outputs['carver']
try:
crawlPath, file = os.path.split(validCrawl)
testExecutionResultFile = glob.glob(crawlPath + "/test-results/0/testRunInfo.json")[0]
executionData = importJson(jsonFile=testExecutionResultFile)
executionTime = executionData['duration']
# print("Crawler time {}".format(executionTime))
# print("Crawler time {}".format(int(executionTime)))
duration['crawler'] = int(executionTime)
except Exception as ex:
print(ex)
print("Exception getting UI test execution data")
duration['crawler'] = None
try:
carvePath, file = os.path.split(validCarve)
carveResultFile = glob.glob(carvePath + "/run/*/resultResponses.json")[0]
carverResults = importJson(carveResultFile)
executionTime = 0
for apiResult in carverResults:
executionTime += apiResult['duration']
# print("Carver time {}".format(executionTime))
duration['carver'] = executionTime
except Exception as ex:
print(ex)
print("Unable to find carver execution time")
duration['carver'] = None
return duration
def getCoverageData(app):
print(getCovFiles(app))
if __name__ == "__main__":
allOutputs = findAllOutputs()
print(allOutputs)
durations = []
coverages = []
for app in APPS:
if allOutputs[app]['success']:
duration = getExecutionTime(allOutputs[app])
duration['app'] = app
durations.append(duration)
else:
print("Cannot get results for {}".format(app))
duration = {'app': app, 'result' : "error"}
print(durations)
utilsRun.writeCSV_Dict(durations[0].keys(), csvRows=durations,dst="../results/durations_"+datetime.now().strftime("%Y%m%d-%H%M%S")+".csv")
# getCoverageData("petclinic") | 3,289 | 37.255814 | 142 | py |
apicarver | apicarver-main/testCarver/pythonCode/parseRestatsOutput.py | import os
from datetime import datetime
import constants
import runRestats
import utilsRun
def fetchRestatsOutputDir(appName):
returnDict = {}
toolOutputs = runRestats.getExistingOutput(appName)
if toolOutputs is None:
print("No restats results found {}".format(appName))
return None
for toolOutput in toolOutputs.keys():
if len(toolOutputs[toolOutput]) == 0:
print(toolOutput + " : No output" )
continue
returnDict[toolOutput] = []
if len(toolOutputs[toolOutput]) == 1:
outputDir = toolOutputs[toolOutput][0]
print(toolOutput + " : " + outputDir + " : " + str(os.path.exists(outputDir)))
if toolOutput == "carverMerge" :
reportsDir = os.path.splitext(outputDir)[0] + "carver" + "_reports"
elif toolOutput == "proberMerge":
reportsDir = os.path.splitext(outputDir)[0] + "prober" + "_reports"
elif toolOutput in ["inferredJson", "proberJson"]:
reportsDir = os.path.splitext(outputDir)[0] + "_json_reports"
else:
reportsDir = os.path.splitext(outputDir)[0] + "_reports"
print(reportsDir + " " + str(os.path.exists(reportsDir)))
if os.path.exists(reportsDir):
returnDict[toolOutput].append(reportsDir)
continue
for runIndex in range(len(toolOutputs[toolOutput])):
outputDir = toolOutputs[toolOutput][runIndex]
print(toolOutput + str(runIndex) + " : " + outputDir + " : " + str(os.path.exists(outputDir)))
if toolOutput == "carverMerge" :
reportsDir = os.path.splitext(outputDir)[0] + "carver" + "_reports"
elif toolOutput == "proberMerge":
reportsDir = os.path.splitext(outputDir)[0] + "prober" + "_reports"
else:
reportsDir = os.path.splitext(outputDir)[0] + "_reports"
print(reportsDir + " " + str(os.path.exists(reportsDir)))
if os.path.exists(reportsDir):
returnDict[toolOutput].append(reportsDir)
return returnDict
def getOASCoverageStats(toolKey, reportsDir):
statsFile = os.path.join(reportsDir, "stats.json")
if not os.path.exists(statsFile):
print("Stats not available at {}", statsFile)
return None
statsJson = utilsRun.importJson(jsonFile=statsFile)
return {"tool": toolKey,
"pathTotal": statsJson["pathCoverage"]["raw"]["documented"],
"pathTested": statsJson["pathCoverage"]["raw"]["documentedAndTested"],
"pathRate": statsJson["pathCoverage"]["rate"],
"operationTotal": statsJson["operationCoverage"]["raw"]["documented"],
"operationTested": statsJson["operationCoverage"]["raw"]["documentedAndTested"],
"operationRate": statsJson["operationCoverage"]["rate"],
"statusClassTotal": statsJson["statusClassCoverage"]["raw"]["documented"],
"statusClassTested": statsJson["statusClassCoverage"]["raw"]["documentedAndTested"],
"statusClassRate": statsJson["statusClassCoverage"]["rate"],
"statusTotal": statsJson["statusCoverage"]["raw"]["documented"],
"statusTested": statsJson["statusCoverage"]["raw"]["documentedAndTested"],
"statusRate": statsJson["statusCoverage"]["rate"],
"responseTypeTotal": statsJson["responseTypeCoverage"]["raw"]["documented"],
"responseTypeTested": statsJson["responseTypeCoverage"]["raw"]["documentedAndTested"],
"responseTypeRate": statsJson["responseTypeCoverage"]["rate"],
"requestTypeTotal": statsJson["requestTypeCoverage"]["raw"]["documented"],
"requestTypeTested": statsJson["requestTypeCoverage"]["raw"]["documentedAndTested"],
"requestTypeRate": statsJson["requestTypeCoverage"]["rate"],
"parameterTotal": statsJson["parameterCoverage"]["raw"]["documented"],
"parameterTested": statsJson["parameterCoverage"]["raw"]["documentedAndTested"],
"parameterRate": statsJson["parameterCoverage"]["rate"]
}
def getOASCompStats(toolKey, reportsDir):
statsFile = os.path.join(reportsDir, "stats.json")
if not os.path.exists(statsFile):
print("Stats not available at {}", statsFile)
return None
statsJson = utilsRun.importJson(jsonFile=statsFile)
# return {"tool": toolKey,
# "pathPr": statsJson["pathPr"],
# "pathRe": statsJson["pathRe"],
# "operationPr": statsJson["operationPr"],
# "operationRe": statsJson["operationRe"]
# }
if toolKey in ["inferredYaml", "proberYaml"]:
return {"tool": toolKey,
"paths_matched": statsJson["path"]["matched"],
"paths_unmatched": statsJson["path"]["unmatched"],
"paths_matched_unique": statsJson["path"]["matched_unique"],
"paths_original": statsJson["path"]["gt"],
"op_matched": statsJson["op"]["matched"],
"op_unmatched": statsJson["op"]["unmatched"],
"op_matched_unique": statsJson["op"]["matched_unique"],
"op_original": statsJson["op"]["gt"],
"params_matched": -1,
"params_fp": -1,
"params_original": -1
}
else:
return {"tool": toolKey,
"paths_matched": statsJson["path"]["matched"],
"paths_unmatched": statsJson["path"]["unmatched"],
"paths_matched_unique": statsJson["path"]["matched_unique"],
"paths_original": statsJson["path"]["gt"],
"op_matched": statsJson["op"]["matched"],
"op_unmatched": statsJson["op"]["unmatched"],
"op_matched_unique": statsJson["op"]["matched_unique"],
"op_original": statsJson["op"]["gt"],
"params_matched": statsJson["var"]["matched"],
"params_fp": statsJson["var"]["fp"],
"params_original": statsJson["var"]["gt"]
}
def getRestatsResults(appName):
results = []
oasResults = []
outputs = fetchRestatsOutputDir(appName)
if outputs is None:
print("No outputs found for {}".format(appName))
return None, None
for output in outputs.keys():
if not (output == "carverRecords" or output == "uiRecords" or output == "proberRecords"):
avgResult = {"tool": output,
"pathTotal": 0,
"pathTested": 0,
"pathRate": 0,
"operationTotal": 0,
"operationTested": 0,
"operationRate": 0,
"statusClassTotal": 0,
"statusClassTested": 0,
"statusClassRate": 0,
"statusTotal": 0,
"statusTested": 0,
"statusRate": 0,
"responseTypeTotal": 0,
"responseTypeTested": 0,
"responseTypeRate": 0,
"requestTypeTotal": 0,
"requestTypeTested": 0,
"requestTypeRate": 0,
"parameterTotal": 0,
"parameterTested": 0,
"parameterRate": 0,
"app":appName
}
for runIndex in range(len(outputs[output])):
if output in ["inferredYaml", "proberYaml", "inferredJson", "proberJson"]:
result = getOASCompStats(output, outputs[output][runIndex])
if result is not None:
result['app'] = appName
result['file'] = outputs[output][runIndex]
oasResults.append(result)
else:
if output == "carverRecords" or output == "uiRecords" or output == "proberRecords":
result = getOASCoverageStats(output, outputs[output][runIndex])
if result is not None:
result['app'] = appName
result['file'] = outputs[output][runIndex]
results.append(result)
else:
result = getOASCoverageStats(output + str(runIndex), outputs[output][runIndex])
if result is not None:
result['app'] = appName
result['file'] = outputs[output][runIndex]
results.append(result)
avgResult["pathTotal"] += result["pathTotal"]
avgResult["pathTested"] +=result["pathTested"]
avgResult["pathRate"]
avgResult["operationTotal"] +=result["operationTotal"]
avgResult["operationTested"] +=result["operationTested"]
avgResult["operationRate"]
avgResult["statusClassTotal"] +=result["statusClassTotal"]
avgResult["statusClassTested"] +=result["statusClassTested"]
avgResult["statusClassRate"]
avgResult["statusTotal"] +=result["statusTotal"]
avgResult["statusTested"] +=result["statusTested"]
avgResult["statusRate"]
avgResult["responseTypeTotal"] +=result["responseTypeTotal"]
avgResult["responseTypeTested"] +=result["responseTypeTested"]
avgResult["responseTypeRate"]
avgResult["requestTypeTotal"] +=result["requestTypeTotal"]
avgResult["requestTypeTested"] +=result["requestTypeTested"]
avgResult["requestTypeRate"]
avgResult["parameterTotal"] +=result["parameterTotal"]
avgResult["parameterTested"] +=result["parameterTested"]
avgResult["parameterRate"]
if not (output == "carverRecords" or output == "uiRecords" or output == "proberRecords"):
try:
avgResult["pathRate"] = avgResult["pathTested"]/avgResult["pathTotal"]
except:
print("err")
try:
avgResult["operationRate"] = avgResult["operationTested"]/avgResult["operationTotal"]
except:
print("err")
try:
avgResult["statusClassRate"]= avgResult["statusClassTested"]/avgResult["statusClassTotal"]
except:
print("err")
try:
avgResult["statusRate"] = avgResult["statusTested"]/avgResult["statusTotal"]
except:
print("err")
try:
avgResult["responseTypeRate"] = avgResult["responseTypeTested"]/avgResult["responseTypeTotal"]
except:
print("err")
try:
avgResult["requestTypeRate"] = avgResult["requestTypeTested"]/avgResult["requestTypeTotal"]
except:
print("err")
try:
avgResult["parameterRate"] = avgResult["parameterRate"]/avgResult["parameterTotal"]
except:
print("err")
results.append(avgResult)
return results, oasResults
def getAllRestatsResults():
totalResults = []
totalOASResults = []
for app in constants.APPS:
results, oasResults = getRestatsResults(app)
if results is not None:
totalResults.extend(results)
if oasResults is not None:
totalOASResults.extend(oasResults)
print(totalResults)
print(totalOASResults)
return totalResults, totalOASResults
if __name__ == "__main__":
# returnDict = fetchRestatsOutputDir("petclinic")
# print(returnDict)
# results = getRestatsResults("petclinic")
# print(results)
totalResults, totalOASResults = getAllRestatsResults()
utilsRun.writeCSV_Dict(totalResults[0].keys(), csvRows=totalResults,dst="../results/apiTestCov_"+datetime.now().strftime("%Y%m%d-%H%M%S")+".csv")
utilsRun.writeCSV_Dict(totalOASResults[0].keys(), csvRows=totalOASResults,dst="../results/specCompare_"+datetime.now().strftime("%Y%m%d-%H%M%S")+".csv")
| 12,307 | 45.621212 | 156 | py |
apicarver | apicarver-main/testCarver/pythonCode/runRestats.py | import glob
import os.path
from datetime import datetime
from enum import Enum
import constants
import utilsRun
from constants import APPS, STATUS_ERRORED, STATUS_SUCCESSFUL, STATUS_SKIPPED, RUN_RESTATS_COMMAND, \
RESULT_RESPONSES_JSON, SCHEMATHESIS_OUTPUT, CASETTE_YAML, PROBER_RESPONSES_JSON, INFERRED_YAML, PROBER_YAML, \
RESTATS_PATH, INFERRED_JSON, PROBER_JSON
from utilsRun import monitorProcess, startProcess, restartDocker
class RUN_MODE(Enum):
merge = "merge"
carver = "carver"
schemathesis = "schemathesis"
specCompare = "specCompare"
def buildRestatsConfig(run_mode, groundTruthYaml, inferredYaml=None, inferredJson=None, carverRecords=None, stRecords=None, mergeName=None):
if (groundTruthYaml is None) or (not os.path.exists(groundTruthYaml)):
print("Error. Provide a valid groundtruth openapi yaml")
return None
confDict = {}
confDict["specification"] = os.path.abspath(groundTruthYaml)
if run_mode == RUN_MODE.carver:
if (carverRecords is None) or (not os.path.exists(carverRecords)):
print("Error. Provide a valid resultResponses for carver")
return None
confDict["modules"] = "carver"
confDict["results"] = os.path.abspath(carverRecords)
# carverDir = os.path.pathsep.join(os.path.split(carverRecords)[0:len(os.path.split(carverRecords)) - 1])
reportsDir = os.path.splitext(carverRecords)[0] + "_reports"
confDict["reportsDir"] = reportsDir
if not os.path.exists(reportsDir):
os.makedirs(reportsDir)
confDict["dbPath"] = os.path.splitext(carverRecords)[0] + ".sqlite"
elif run_mode == RUN_MODE.specCompare:
# if (inferredYaml is None) or (not os.path.exists(inferredYaml)):
# print("Error. Provide a valid yaml for spec comparison")
# return None
if inferredYaml is not None:
confDict["modules"] = "specCompare"
confDict["inferred"] = os.path.abspath(inferredYaml)
# inferredDir = os.path.pathsep.join(os.path.split(inferredYaml)[0:len(os.path.split(inferredYaml)) - 1])
reportsDir = os.path.splitext(inferredYaml)[0] + "_reports"
confDict["specReports"] = reportsDir
if not os.path.exists(reportsDir):
os.makedirs(reportsDir)
confDict["specDbPath"] = os.path.splitext(inferredYaml)[0] + ".sqlite"
if inferredJson is not None:
confDict["modules"] = "specCompare"
confDict["inferred"] = os.path.abspath(inferredJson)
# inferredDir = os.path.pathsep.join(os.path.split(inferredYaml)[0:len(os.path.split(inferredYaml)) - 1])
reportsDir = os.path.splitext(inferredJson)[0] + "_json_reports"
confDict["specReports"] = reportsDir
if not os.path.exists(reportsDir):
os.makedirs(reportsDir)
confDict["specDbPath"] = os.path.splitext(inferredJson)[0] + "_json.sqlite"
elif run_mode == RUN_MODE.schemathesis:
if (stRecords is None) or (not os.path.exists(stRecords)):
print("Error. Provide a valid Cassette for schemathesis")
return None
confDict["modules"] = "schemathesis"
confDict["cassette"] = stRecords
confDict["cassetteReports"] = os.path.abspath(stRecords)
# cassetteDir = os.path.pathsep.join(os.path.split(stRecords)[0:len(os.path.split(stRecords)) - 1])
reportsDir = os.path.splitext(stRecords)[0] + "_reports"
confDict["cassetteReports"] = reportsDir
if not os.path.exists(reportsDir):
os.makedirs(reportsDir)
confDict["cassetteDbPath"] = os.path.splitext(stRecords)[0] + ".sqlite"
elif run_mode == RUN_MODE.merge:
if (stRecords is None) or (not os.path.exists(stRecords)):
print("Error. Provide a valid Cassette for schemathesis")
return None
if (carverRecords is None) or (not os.path.exists(carverRecords)):
print("Error. Provide a valid resultResponses for carver")
return None
if mergeName is None:
print("Error. Provide a valid merge name")
return None
confDict["modules"] = "merge"
confDict["cassette"] = stRecords
confDict["results"] = carverRecords
reportsDir = os.path.splitext(stRecords)[0] + mergeName + "_reports"
confDict["mergeReports"] = reportsDir
if not os.path.exists(reportsDir):
os.makedirs(reportsDir)
confDict["mergeDb"] = os.path.splitext(stRecords)[0] + mergeName + ".sqlite"
# confDict = {
# "modules": "specCompare",
# "specification": "petclinic2/openapi.json",
# "reportsDir": "petclinic2/reports",
# "cassetteReports": "petclinic2/reports_cassette",
# "dbPath": "petclinic2/database.sqlite",
# "cassetteDbPath": "petclinic2/database_cassette.sqlite",
# "results": "petclinic2/probe_resultResponses.json",
# "inferred": "petclinic2/probe_oas.yaml",
# "specDbPath": "petclinic2/database_spec.sqlite",
# "specReports": "petclinic2/reports_spec",
# "cassette": "petclinic2/casette.yaml"
# }
return confDict
def runAllApps(TOOLS=[], RUNTIME=30):
if len(TOOLS) == 0:
TOOLS = ALL_TOOLS
succesful = []
unsuccesful = []
skipped = []
for app in APPS:
if app in excludeApps:
continue
baseURL = constants.getHostURL(app)
if baseURL is None:
skipped.append(app)
continue
results = runAlgo(app, TOOLS)
for result in results:
if result["status"] == STATUS_SUCCESSFUL:
succesful.append(result["command"])
elif result["status"] == STATUS_SKIPPED:
skipped.append(result["command"])
elif result["status"] == STATUS_ERRORED:
unsuccesful.append(result["command"])
print("succesful : {0}".format(str(len(succesful))))
print(succesful)
print("skipped : {0}".format(str(len(skipped))))
print(skipped)
print("unsuccesful : {0}".format(str(len(unsuccesful))))
print(unsuccesful)
if DRY_RUN:
print("Predicted run time : " + str(RUNTIME * len(succesful)))
def getExistingOutput(appName, ALL_CRAWLS=os.path.join(os.path.abspath(".."), "out")):
appOutput = os.path.abspath(os.path.join(ALL_CRAWLS, appName))
if not os.path.exists(appOutput):
print("no output folder for {}".format(appName))
return None
uiRecords = glob.glob(appOutput + "/*/" + RESULT_RESPONSES_JSON)
carverRecords = glob.glob(appOutput + "/*/run/*/" + RESULT_RESPONSES_JSON)
proberRecords = glob.glob(appOutput + "/*/oas/*/" + PROBER_RESPONSES_JSON)
inferredYaml = glob.glob(appOutput + "/*/oas/*/" + INFERRED_YAML)
proberYaml = glob.glob(appOutput + "/*/oas/*/" + PROBER_YAML)
inferredJson = glob.glob(appOutput + "/*/oas/*/" + INFERRED_JSON)
proberJson = glob.glob(appOutput + "/*/oas/*/" + PROBER_JSON)
stOutput = glob.glob(appOutput + "/" + SCHEMATHESIS_OUTPUT + "/*/" + CASETTE_YAML)
carverMerge = glob.glob(appOutput + "/" + SCHEMATHESIS_OUTPUT + "/*/" + CASETTE_YAML)
proberMerge = glob.glob(appOutput + "/" + SCHEMATHESIS_OUTPUT + "/*/" + CASETTE_YAML)
stCarver = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/" + CASETTE_YAML)
stProber = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/" + CASETTE_YAML)
return {
"uiRecords": uiRecords,
"carverRecords": carverRecords,
"proberRecords": proberRecords,
"inferredYaml": inferredYaml,
"proberYaml": proberYaml,
"inferredJson": inferredJson,
"proberJson": proberJson,
"stOutput": stOutput,
"carverMerge": carverMerge,
"proberMerge": proberMerge,
"stCarver": stCarver,
"stProber": stProber
}
def runAlgo(appName, tools=[]):
if len(tools) == 0:
tools = ALL_TOOLS
results = []
existingOutput = getExistingOutput(appName)
print(existingOutput)
if existingOutput is None:
print("Ignoring run because no output exists.")
status = STATUS_SKIPPED
return status, appName
srcPath = os.path.join("..", "src", "main", "resources", "webapps", appName)
# openApiPath = os.path.join(srcPath, "openapi.yml")
openApiPath = os.path.join(srcPath, "openapi.json")
# Carver Results
try:
carverRecords = existingOutput['carverRecords'][0]
except Exception as ex:
carverRecords = None
print(ex)
if carverRecords is not None and ("carverRecords" in tools):
config = buildRestatsConfig(run_mode=RUN_MODE.carver, groundTruthYaml=openApiPath,
carverRecords=carverRecords)
confPath = os.path.splitext(carverRecords)[0] + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "carverResults_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "carverResults", "status": status, "command": command})
timeout = 200
status = monitorProcess(proc, runtime=30, timeStep=10)
print("Done : {0}".format(command))
results.append({"name": "carverResults", "status": status, "command": command})
# Prober Carver Results
try:
proberRecords = existingOutput['proberRecords'][0]
except Exception as ex:
proberRecords = None
print(ex)
if proberRecords is not None and ("proberRecords" in tools):
config = buildRestatsConfig(run_mode=RUN_MODE.carver, groundTruthYaml=openApiPath,
carverRecords=proberRecords)
confPath = os.path.splitext(proberRecords)[0] + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "proberResults_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "proberResults", "status": status, "command": command})
timeout = 200
status = monitorProcess(proc, runtime=30, timeStep=10)
print("Done : {0}".format(command))
results.append({"name": "proberResults", "status": status, "command": command})
# InferredYaml Results
try:
inferredYaml = existingOutput['inferredYaml'][0]
except Exception as ex:
inferredYaml = None
print(ex)
if inferredYaml is not None and ("inferredYaml" in tools):
config = buildRestatsConfig(run_mode=RUN_MODE.specCompare, groundTruthYaml=openApiPath,
inferredYaml=inferredYaml)
confPath = os.path.splitext(inferredYaml)[0] + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "inferredYaml_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "inferredYaml", "status": status, "command": command})
timeout = 200
status = monitorProcess(proc, runtime=30, timeStep=1)
print("Done : {0}".format(command))
results.append({"name": "inferredYaml", "status": status, "command": command})
# Inferred Json Results
try:
inferredJson = existingOutput['inferredJson'][0]
except Exception as ex:
inferredJson = None
print(ex)
if inferredJson is not None and ("inferredJson" in tools):
config = buildRestatsConfig(run_mode=RUN_MODE.specCompare, groundTruthYaml=openApiPath,
inferredJson=inferredJson)
confPath = os.path.splitext(inferredJson)[0] + "_json_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "inferredJson_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "inferredJson", "status": status, "command": command})
timeout = 200
status = monitorProcess(proc, runtime=30, timeStep=1)
print("Done : {0}".format(command))
results.append({"name": "inferredJson", "status": status, "command": command})
# Prober Json Results
try:
proberJson = existingOutput['proberJson'][0]
except Exception as ex:
proberJson = None
print(ex)
if proberJson is not None and ("proberJson" in tools):
config = buildRestatsConfig(run_mode=RUN_MODE.specCompare, groundTruthYaml=openApiPath,
inferredJson=proberJson)
confPath = os.path.splitext(proberJson)[0] + "_json_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "proberJson_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "proberJson", "status": status, "command": command})
timeout = 200
status = monitorProcess(proc, runtime=30, timeStep=1)
print("Done : {0}".format(command))
results.append({"name": "proberJson", "status": status, "command": command})
# ProberYaml Results
try:
proberYaml = existingOutput['proberYaml'][0]
except Exception as ex:
proberYaml = None
print(ex)
if proberYaml is not None and ("proberYaml" in tools):
config = buildRestatsConfig(run_mode=RUN_MODE.specCompare, groundTruthYaml=openApiPath,
inferredYaml=proberYaml)
confPath = os.path.splitext(proberYaml)[0] + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "proberYaml_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "proberYaml", "status": status, "command": command})
timeout = 200
status = monitorProcess(proc, runtime=30, timeStep=1)
print("Done : {0}".format(command))
results.append({"name": "proberYaml", "status": status, "command": command})
# schemathesis Results
try:
stOutputEntries = existingOutput['stOutput']
except Exception as ex:
stOutputEntries = None
print(ex)
if stOutputEntries is not None and ("stOutput" in tools):
for runIndex in range(len(stOutputEntries)):
stOutput = stOutputEntries[runIndex]
config = buildRestatsConfig(run_mode=RUN_MODE.schemathesis, groundTruthYaml=openApiPath,
stRecords=stOutput)
confPath = os.path.splitext(stOutput)[0] + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "stOutput_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "stOutput"+str(runIndex), "status": status, "command": command})
status = monitorProcess(proc, runtime=30, timeStep=10)
print("Done : {0}".format(command))
results.append({"name": "stOutput"+str(runIndex), "status": status, "command": command})
try:
carverMergeEntries = existingOutput['carverMerge']
carverRecords=existingOutput['carverRecords'][0]
except Exception as ex:
carverMergeEntries = None
print(ex)
if carverMergeEntries is not None and ("carverMerge" in tools) and (carverRecords is not None):
for runIndex in range(len(carverMergeEntries)):
carverMerge = carverMergeEntries[runIndex]
mergeName = "carver"
config = buildRestatsConfig(run_mode=RUN_MODE.merge, groundTruthYaml=openApiPath,
stRecords=carverMerge, carverRecords=carverRecords, mergeName=mergeName)
confPath = os.path.splitext(carverMerge)[0] + mergeName + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "carverMerge_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "carverMerge"+str(runIndex), "status": status, "command": command})
status = monitorProcess(proc, runtime=30, timeStep=10)
print("Done : {0}".format(command))
results.append({"name": "carverMerge"+str(runIndex), "status": status, "command": command})
try:
proberMergeEntries = existingOutput['proberMerge']
proberRecords=existingOutput['proberRecords'][0]
except Exception as ex:
proberMergeEntries = None
print(ex)
if proberMergeEntries is not None and ("proberMerge" in tools) and (proberRecords is not None):
for runIndex in range(len(proberMergeEntries)):
proberMerge = proberMergeEntries[runIndex]
mergeName = "prober"
config = buildRestatsConfig(run_mode=RUN_MODE.merge, groundTruthYaml=openApiPath,
stRecords=proberMerge, carverRecords=proberRecords, mergeName=mergeName)
confPath = os.path.splitext(proberMerge)[0] + mergeName + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "proberMerge_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "proberMerge"+str(runIndex), "status": status, "command": command})
status = monitorProcess(proc, runtime=30, timeStep=10)
print("Done : {0}".format(command))
results.append({"name": "proberMerge"+str(runIndex), "status": status, "command": command})
try:
stCarverEntries = existingOutput['stCarver']
except Exception as ex:
stCarverEntries = None
print(ex)
if stCarverEntries is not None and ("stCarver" in tools):
for runIndex in range(len(stCarverEntries)):
stCarver = stCarverEntries[runIndex]
config = buildRestatsConfig(run_mode=RUN_MODE.schemathesis, groundTruthYaml=openApiPath,
stRecords=stCarver)
confPath = os.path.splitext(stCarver)[0] + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "stCarver_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "stCarver"+str(runIndex), "status": status, "command": command})
status = monitorProcess(proc, runtime=30, timeStep=10)
print("Done : {0}".format(command))
results.append({"name": "stCarver"+str(runIndex), "status": status, "command": command})
try:
stProberEntries = existingOutput['stProber']
except Exception as ex:
stProberEntries = None
print(ex)
if stProberEntries is not None and ("stProber" in tools):
for runIndex in range(len(stProberEntries)):
stProber = stProberEntries[runIndex]
config = buildRestatsConfig(run_mode=RUN_MODE.schemathesis, groundTruthYaml=openApiPath,
stRecords=stProber)
confPath = os.path.splitext(stProber)[0] + "_conf.json"
utilsRun.exportJson(confPath, config)
command = RUN_RESTATS_COMMAND.copy()
command.append(confPath)
logFile = os.path.abspath(
os.path.join("../logs", "stProber_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=RESTATS_PATH)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
results.append({"name": "stProber"+str(runIndex), "status": status, "command": command})
status = monitorProcess(proc, runtime=30, timeStep=10)
print("Done : {0}".format(command))
results.append({"name": "stProber"+str(runIndex), "status": status, "command": command})
return results
def getExistingTest():
for app in APPS:
print(getExistingOutput(app))
# APPS = ["ecomm"]
DRY_RUN = True
excludeApps = ['tmf', 'mdh']
ALL_TOOLS = ["uiRecords", "carverRecords", "proberRecords", "inferredYaml", "proberYaml","inferredJson", "proberJson", "stOutput", "carverMerge", "proberMerge", "stCarver", "stProber"]
if __name__ == "__main__":
print("hello")
# getExistingTest()
runAllApps(['inferredJson', 'proberJson'])
#Can specify paritcular app and tool
# ["uiRecords", "carverMerge", "proberMerge", "stOutput", "stCarver", "stProber"])
# runAlgo("petclinic", ["inferredJson", "proberJson"])
# runAlgo("parabank")
# runAlgo("jawa", ["proberJson"])
| 24,223 | 42.963702 | 185 | py |
apicarver | apicarver-main/testCarver/pythonCode/scratch_1.py | import glob
import os
# print(os.path.splitext("../a/b/c.json")[0])
# carverRecords = "../a/c.json"
#
# dir = os.path.pathsep.join(os.path.split(carverRecords)[0:len(os.path.split(carverRecords))-1])
# print(dir)
import constants
import coverageStats
from constants import RESULT_RESPONSES_JSON
# print(glob.glob( "/TestCarving/testCarver/out/petclinic/schemathesis/*/cov/" + constants.COV_XML))
print((coverageStats.getRawCovFiles("realworld")))
| 450 | 27.1875 | 100 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.