language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def postprocess(self, x: torch.Tensor) -> np.ndarray:
r"""
Converts a normalized tensor of an image to a denormalized numpy array.
Output: np.uint8, shape: [..., h, w, c], range: [0, 255]
Args:
x (torch.Tensor): Input tensor of shape [..., c, h, w] and (approx.) range [min_val, max_val].
Returns: A post-processed (quantized) sequence array ready for display.
"""
# assuming shape = [..., c, h, w] -> [..., h, w, c]
if x.ndim < 3:
raise ValueError(f"expected at least three dimensions for input image")
else:
permutation = list(range(x.ndim - 3)) + [-2, -1, -3]
x = x.permute(permutation)
x -= self.value_range_min # ~[0, max_val - min_val]
x /= self.value_range_max - self.value_range_min # ~[0, 1]
x *= 255. # ~[0, 255]
x = torch.clamp(x, 0., 255.)
x = x.cpu().numpy().astype('uint8')
return x | def postprocess(self, x: torch.Tensor) -> np.ndarray:
r"""
Converts a normalized tensor of an image to a denormalized numpy array.
Output: np.uint8, shape: [..., h, w, c], range: [0, 255]
Args:
x (torch.Tensor): Input tensor of shape [..., c, h, w] and (approx.) range [min_val, max_val].
Returns: A post-processed (quantized) sequence array ready for display.
"""
# assuming shape = [..., c, h, w] -> [..., h, w, c]
if x.ndim < 3:
raise ValueError(f"expected at least three dimensions for input image")
else:
permutation = list(range(x.ndim - 3)) + [-2, -1, -3]
x = x.permute(permutation)
x -= self.value_range_min # ~[0, max_val - min_val]
x /= self.value_range_max - self.value_range_min # ~[0, 1]
x *= 255. # ~[0, 255]
x = torch.clamp(x, 0., 255.)
x = x.cpu().numpy().astype('uint8')
return x |
Python | def default_available(self, split: str, **dataset_kwargs):
r"""
Tries to load a dataset and a datapoint using the default :attr:`self.data_dir` value.
If this succeeds, then we can safely use the default data dir,
otherwise a new dataset has to be downloaded and prepared.
Args:
split (str): The dataset's split identifier (i.e. whether it's a training/validation/test dataset).
**dataset_kwargs (Any): Optional dataset arguments for image transformation, value_range, splitting etc.
Returns: True if we could load the dataset using default values, False otherwise.
"""
try:
kwargs_ = deepcopy(dataset_kwargs)
kwargs_.update({"data_dir": self.DEFAULT_DATA_DIR})
default_ = self.__class__(split, **kwargs_)
default_.set_seq_len(1, 1, 1)
_ = default_[0]
except (FileNotFoundError, ValueError, IndexError) as e: # TODO other exceptions?
return False
return True | def default_available(self, split: str, **dataset_kwargs):
r"""
Tries to load a dataset and a datapoint using the default :attr:`self.data_dir` value.
If this succeeds, then we can safely use the default data dir,
otherwise a new dataset has to be downloaded and prepared.
Args:
split (str): The dataset's split identifier (i.e. whether it's a training/validation/test dataset).
**dataset_kwargs (Any): Optional dataset arguments for image transformation, value_range, splitting etc.
Returns: True if we could load the dataset using default values, False otherwise.
"""
try:
kwargs_ = deepcopy(dataset_kwargs)
kwargs_.update({"data_dir": self.DEFAULT_DATA_DIR})
default_ = self.__class__(split, **kwargs_)
default_.set_seq_len(1, 1, 1)
_ = default_[0]
except (FileNotFoundError, ValueError, IndexError) as e: # TODO other exceptions?
return False
return True |
Python | def download_and_prepare_dataset(cls):
r"""
Downloads the specific dataset, prepares it for the video prediction task (if needed)
and stores it in a default location in the 'data/' folder.
Implemented by the derived dataset classes.
"""
raise NotImplementedError | def download_and_prepare_dataset(cls):
r"""
Downloads the specific dataset, prepares it for the video prediction task (if needed)
and stores it in a default location in the 'data/' folder.
Implemented by the derived dataset classes.
"""
raise NotImplementedError |
Python | def _random_split(dataset: VPDataset, lengths: Sequence[int], random_seed: int) -> List[VPSubset]:
r"""
Custom implementation of torch.utils.data.random_split that returns SubsetWrappers.
Randomly split a dataset into non-overlapping new datasets of given lengths.
Optionally fix the generator for reproducible results.
Args:
dataset (Dataset): Dataset to be split.
lengths (sequence): lengths of splits to be produced.
random_seed (int): RNG seed used for the random permutation.
Returns:
A list of VPSubsets containing the randomly split datasets.
"""
# Cannot verify that dataset is Sized
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = list(range(sum(lengths)))
random.Random(random_seed).shuffle(indices)
return [VPSubset(dataset, indices[offset - length: offset])
for offset, length in zip(_accumulate(lengths), lengths)] | def _random_split(dataset: VPDataset, lengths: Sequence[int], random_seed: int) -> List[VPSubset]:
r"""
Custom implementation of torch.utils.data.random_split that returns SubsetWrappers.
Randomly split a dataset into non-overlapping new datasets of given lengths.
Optionally fix the generator for reproducible results.
Args:
dataset (Dataset): Dataset to be split.
lengths (sequence): lengths of splits to be produced.
random_seed (int): RNG seed used for the random permutation.
Returns:
A list of VPSubsets containing the randomly split datasets.
"""
# Cannot verify that dataset is Sized
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = list(range(sum(lengths)))
random.Random(random_seed).shuffle(indices)
return [VPSubset(dataset, indices[offset - length: offset])
for offset, length in zip(_accumulate(lengths), lengths)] |
Python | def calculate_n_chunks(self, num_frames):
r"""
If given input length is too large, this function returns the number of chunks.
Each chunk is then used for a separate fvd calculation, and their results are combined afterwards.
Args:
num_frames (int): The number of context frames (aka the input length).
Returns:
The number of chunks the input sequence needs to be split into,
as well as a boolean value indicating whether the last chunk has to be neglected.
"""
n_chunks, drop_last_chunk = 1, False
if num_frames < self._MIN_T:
print(f"The I3D Module used for FVD needs at least"
f" {self._MIN_T} input frames (given: {num_frames}) -> returning None as loss value!")
n_chunks = -1
elif num_frames > self._MAX_T:
possible_chunk_l = range(self._MAX_T, self._MIN_T - 1, -1)
n_chunks = None
for chunk_l in possible_chunk_l:
if num_frames % chunk_l >= self._MIN_T:
n_chunks = num_frames // chunk_l + 1
# loss-less chunking not possible -> get largest possible even chunk and drop last chunk
if n_chunks is None:
missed_frames = [num_frames % chunk_l for chunk_l in possible_chunk_l]
best_chunk_l = sorted(zip(possible_chunk_l, missed_frames), key=lambda x: x[1])[-1]
n_chunks = num_frames // best_chunk_l + 1
drop_last_chunk = True
print(f"The I3D Module used for FVD handles at most 16 input frames (given: {num_frames})"
f" -> input video will be consumed {n_chunks} chunks!")
return n_chunks, drop_last_chunk | def calculate_n_chunks(self, num_frames):
r"""
If given input length is too large, this function returns the number of chunks.
Each chunk is then used for a separate fvd calculation, and their results are combined afterwards.
Args:
num_frames (int): The number of context frames (aka the input length).
Returns:
The number of chunks the input sequence needs to be split into,
as well as a boolean value indicating whether the last chunk has to be neglected.
"""
n_chunks, drop_last_chunk = 1, False
if num_frames < self._MIN_T:
print(f"The I3D Module used for FVD needs at least"
f" {self._MIN_T} input frames (given: {num_frames}) -> returning None as loss value!")
n_chunks = -1
elif num_frames > self._MAX_T:
possible_chunk_l = range(self._MAX_T, self._MIN_T - 1, -1)
n_chunks = None
for chunk_l in possible_chunk_l:
if num_frames % chunk_l >= self._MIN_T:
n_chunks = num_frames // chunk_l + 1
# loss-less chunking not possible -> get largest possible even chunk and drop last chunk
if n_chunks is None:
missed_frames = [num_frames % chunk_l for chunk_l in possible_chunk_l]
best_chunk_l = sorted(zip(possible_chunk_l, missed_frames), key=lambda x: x[1])[-1]
n_chunks = num_frames // best_chunk_l + 1
drop_last_chunk = True
print(f"The I3D Module used for FVD handles at most 16 input frames (given: {num_frames})"
f" -> input video will be consumed {n_chunks} chunks!")
return n_chunks, drop_last_chunk |
Python | def _sample_digit(self):
"""
Samples digit, initial position and speed.
"""
digit_id = self.get_digit_id()
cur_digit = np.array(self.data[digit_id][0]) / 255 # sample IDX, digit
digit_size = cur_digit.shape[-1]
cur_digit = cur_digit[..., np.newaxis]
if self.num_channels == 3:
cur_digit = np.repeat(cur_digit, 3, axis=-1)
# obtaining position in original frame
x_coord, y_coord = self.get_init_pos(digit_size)
cur_pos = np.array([y_coord, x_coord])
# generating sequence
speed_x, speed_y, acc = None, None, None
while speed_x is None or np.abs(speed_x) < self.min_speed:
speed_x = self.get_speed()
while speed_y is None or np.abs(speed_y) < self.min_speed:
speed_y = self.get_speed()
while acc is None or np.abs(acc) < self.min_acc:
acc = self.get_acc()
speed = np.array([speed_y, speed_x])
return cur_digit, cur_pos, speed, digit_size | def _sample_digit(self):
"""
Samples digit, initial position and speed.
"""
digit_id = self.get_digit_id()
cur_digit = np.array(self.data[digit_id][0]) / 255 # sample IDX, digit
digit_size = cur_digit.shape[-1]
cur_digit = cur_digit[..., np.newaxis]
if self.num_channels == 3:
cur_digit = np.repeat(cur_digit, 3, axis=-1)
# obtaining position in original frame
x_coord, y_coord = self.get_init_pos(digit_size)
cur_pos = np.array([y_coord, x_coord])
# generating sequence
speed_x, speed_y, acc = None, None, None
while speed_x is None or np.abs(speed_x) < self.min_speed:
speed_x = self.get_speed()
while speed_y is None or np.abs(speed_y) < self.min_speed:
speed_y = self.get_speed()
while acc is None or np.abs(acc) < self.min_acc:
acc = self.get_acc()
speed = np.array([speed_y, speed_x])
return cur_digit, cur_pos, speed, digit_size |
Python | def _move_digit(self, speed, cur_pos, img_size, digit_size):
"""
Performs digit movement. Also produces bounces and makes appropriate changes.
"""
next_pos = cur_pos + speed
for i, p in enumerate(next_pos):
# left/down bounce
if p + digit_size > img_size:
offset = p + digit_size - img_size
next_pos[i] = p - offset
speed[i] = -1 * speed[i]
elif (p < 0):
next_pos[i] = -1 * p
speed[i] = -1 * speed[i]
return speed, next_pos | def _move_digit(self, speed, cur_pos, img_size, digit_size):
"""
Performs digit movement. Also produces bounces and makes appropriate changes.
"""
next_pos = cur_pos + speed
for i, p in enumerate(next_pos):
# left/down bounce
if p + digit_size > img_size:
offset = p + digit_size - img_size
next_pos[i] = p - offset
speed[i] = -1 * speed[i]
elif (p < 0):
next_pos[i] = -1 * p
speed[i] = -1 * speed[i]
return speed, next_pos |
Python | def config(self):
r"""
Returns: A dictionary containing the complete model configuration, including common attributes
as well as model-specific attributes.
"""
attr_dict = get_public_attrs(self, "config", non_config_vars=self.NON_CONFIG_VARS, model_mode=True)
img_c, img_h, img_w = self.img_shape
extra_config = {
"img_h": img_h,
"img_w": img_w,
"img_c": img_c,
"NAME": self.NAME
}
return {**attr_dict, **extra_config} | def config(self):
r"""
Returns: A dictionary containing the complete model configuration, including common attributes
as well as model-specific attributes.
"""
attr_dict = get_public_attrs(self, "config", non_config_vars=self.NON_CONFIG_VARS, model_mode=True)
img_c, img_h, img_w = self.img_shape
extra_config = {
"img_h": img_h,
"img_w": img_w,
"img_c": img_c,
"NAME": self.NAME
}
return {**attr_dict, **extra_config} |
Python | def pred_1(self, x: torch.Tensor, **kwargs):
r"""
Given an input sequence of t frames, predicts one single frame into the future.
Args:
x (torch.Tensor): A batch of `b` sequences of `t` input frames as a tensor of shape [b, t, c, h, w].
**kwargs (Any): Optional input parameters such as actions.
Returns: A single frame as a tensor of shape [b, c, h, w].
"""
raise NotImplementedError | def pred_1(self, x: torch.Tensor, **kwargs):
r"""
Given an input sequence of t frames, predicts one single frame into the future.
Args:
x (torch.Tensor): A batch of `b` sequences of `t` input frames as a tensor of shape [b, t, c, h, w].
**kwargs (Any): Optional input parameters such as actions.
Returns: A single frame as a tensor of shape [b, c, h, w].
"""
raise NotImplementedError |
Python | def train_iter(self, config: dict, loader: DataLoader, optimizer: Optimizer,
loss_provider: PredictionLossProvider, epoch: int):
r"""
Default training iteration: Loops through the whole data loader once and, for every batch, executes
forward pass, loss calculation and backward pass/optimization step.
Args:
config (dict): The configuration dict of the current training run (combines model, dataset and run config)
loader (DataLoader): Training data is sampled from this loader.
optimizer (Optimizer): The optimizer to use for weight update calculations.
loss_provider (PredictionLossProvider): An instance of the :class:`LossProvider` class for flexible loss calculation.
epoch (int): The current epoch.
"""
loop = tqdm(loader)
for batch_idx, data in enumerate(loop):
# fwd
input, targets, actions = self.unpack_data(data, config)
predictions, model_losses = self(input, pred_frames=config["pred_frames"], actions=actions)
# loss
_, total_loss = loss_provider.get_losses(predictions, targets)
if model_losses is not None:
for value in model_losses.values():
total_loss += value
# bwd
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# bookkeeping
loop.set_postfix(loss=total_loss.item()) | def train_iter(self, config: dict, loader: DataLoader, optimizer: Optimizer,
loss_provider: PredictionLossProvider, epoch: int):
r"""
Default training iteration: Loops through the whole data loader once and, for every batch, executes
forward pass, loss calculation and backward pass/optimization step.
Args:
config (dict): The configuration dict of the current training run (combines model, dataset and run config)
loader (DataLoader): Training data is sampled from this loader.
optimizer (Optimizer): The optimizer to use for weight update calculations.
loss_provider (PredictionLossProvider): An instance of the :class:`LossProvider` class for flexible loss calculation.
epoch (int): The current epoch.
"""
loop = tqdm(loader)
for batch_idx, data in enumerate(loop):
# fwd
input, targets, actions = self.unpack_data(data, config)
predictions, model_losses = self(input, pred_frames=config["pred_frames"], actions=actions)
# loss
_, total_loss = loss_provider.get_losses(predictions, targets)
if model_losses is not None:
for value in model_losses.values():
total_loss += value
# bwd
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# bookkeeping
loop.set_postfix(loss=total_loss.item()) |
Python | def eval_iter(self, config: dict, loader: DataLoader, loss_provider: PredictionLossProvider):
r"""
Default training iteration: Loops through the whole data loader once and, for every datapoint, executes
forward pass, and loss calculation. Then, aggregates all loss values to assess the prediction quality.
Args:
config (dict): The configuration dict of the current validation run (combines model, dataset and run config)
loader (DataLoader): Validation data is sampled from this loader.
loss_provider (PredictionLossProvider): An instance of the :class:`LossProvider` class for flexible loss calculation.
Returns: A dictionary containing the averages value for each loss type specified for usage,
as well as the value for the 'indicator' loss (the loss used for determining overall model improvement).
"""
self.eval()
loop = tqdm(loader)
all_losses = []
indicator_losses = []
with torch.no_grad():
for batch_idx, data in enumerate(loop):
# fwd
input, targets, actions = self.unpack_data(data, config)
predictions, model_losses = self(input, pred_frames=config["pred_frames"], actions=actions)
# metrics
loss_values, _ = loss_provider.get_losses(predictions, targets)
all_losses.append(loss_values)
indicator_losses.append(loss_values[config["val_rec_criterion"]])
indicator_loss = torch.stack(indicator_losses).mean()
all_losses = {
k: torch.stack([loss_values[k] for loss_values in all_losses]).mean().item() for k in all_losses[0].keys()
}
self.train()
return all_losses, indicator_loss | def eval_iter(self, config: dict, loader: DataLoader, loss_provider: PredictionLossProvider):
r"""
Default training iteration: Loops through the whole data loader once and, for every datapoint, executes
forward pass, and loss calculation. Then, aggregates all loss values to assess the prediction quality.
Args:
config (dict): The configuration dict of the current validation run (combines model, dataset and run config)
loader (DataLoader): Validation data is sampled from this loader.
loss_provider (PredictionLossProvider): An instance of the :class:`LossProvider` class for flexible loss calculation.
Returns: A dictionary containing the averages value for each loss type specified for usage,
as well as the value for the 'indicator' loss (the loss used for determining overall model improvement).
"""
self.eval()
loop = tqdm(loader)
all_losses = []
indicator_losses = []
with torch.no_grad():
for batch_idx, data in enumerate(loop):
# fwd
input, targets, actions = self.unpack_data(data, config)
predictions, model_losses = self(input, pred_frames=config["pred_frames"], actions=actions)
# metrics
loss_values, _ = loss_provider.get_losses(predictions, targets)
all_losses.append(loss_values)
indicator_losses.append(loss_values[config["val_rec_criterion"]])
indicator_loss = torch.stack(indicator_losses).mean()
all_losses = {
k: torch.stack([loss_values[k] for loss_values in all_losses]).mean().item() for k in all_losses[0].keys()
}
self.train()
return all_losses, indicator_loss |
Python | def save_vid_vis(out_fp: str, context_frames: str, mode: str = "gif", **trajs):
r"""
Assembles a video file that displays all given visualizations side-by-side, with a border around each
visualization that denotes whether an input or a predicted frame is displayed. Depending on the specified save mode,
the resulting visualization file may differ.
Args:
out_fp (str): Where to save the visualization.
context_frames (int): Number of context frames (needed to add suitable borders around the videos)
mode (str): A string specifying the save mode: mp4 (uses openCV) vs. gif (uses matplotlib, adds titles to the video visualizations)
**trajs (Any): Any number of videos (keyword becomes video vis title in the coloring/visualization process)
"""
trajs = {k: v for k, v in trajs.items() if v is not None} # filter out 'None' trajs
T, h, w, _ = list(trajs.values())[0].shape
trajs = add_borders(trajs, context_frames)
if mode == "gif": # gif visualizations with matplotlib
try:
from matplotlib import pyplot as PLT
PLT.rcParams.update({'axes.titlesize': 'small'})
from matplotlib.animation import FuncAnimation
except ImportError:
raise ImportError("importing from matplotlib failed "
"-> please install matplotlib or use the mp4-mode for visualization.")
n_trajs = len(trajs)
plt_scale = 0.01
plt_cols = math.ceil(math.sqrt(n_trajs))
plt_rows = math.ceil(n_trajs / plt_cols)
plt_w = 1.2 * w * plt_scale * plt_cols
plt_h = 1.4 * h * plt_scale * plt_rows
fig = PLT.figure(figsize=(plt_w, plt_h), dpi=100)
def update(t):
for i, (name, traj) in enumerate(trajs.items()):
PLT.subplot(plt_rows, plt_cols, i + 1)
PLT.xticks([])
PLT.yticks([])
PLT.title(' '.join(name.split('_')).title())
PLT.imshow(traj[t])
anim = FuncAnimation(fig, update, frames=np.arange(T), interval=500)
anim.save(out_fp, writer="imagemagick", dpi=200)
PLT.close(fig)
else: # mp4 visualizations with opencv and moviepy
try:
from moviepy.editor import ImageSequenceClip
except ImportError:
raise ImportError("importing from moviepy failed"
" -> please install moviepy or use the gif-mode for visualization.")
combined_traj = np.concatenate(list(trajs.values()), axis=-2) # put visualizations next to each other
out_paths = []
for t, frame in enumerate(list(combined_traj)):
out_fn = f"{out_fp[:-4]}_t{t}.jpg"
out_paths.append(out_fn)
out_frame_BGR = frame[:, :, ::-1]
cv.imwrite(out_fn, out_frame_BGR)
clip = ImageSequenceClip(out_paths, fps=2)
clip.write_videofile(f"{out_fp[:-4]}.mp4", fps=2)
for out_fn in out_paths:
os.remove(out_fn) | def save_vid_vis(out_fp: str, context_frames: str, mode: str = "gif", **trajs):
r"""
Assembles a video file that displays all given visualizations side-by-side, with a border around each
visualization that denotes whether an input or a predicted frame is displayed. Depending on the specified save mode,
the resulting visualization file may differ.
Args:
out_fp (str): Where to save the visualization.
context_frames (int): Number of context frames (needed to add suitable borders around the videos)
mode (str): A string specifying the save mode: mp4 (uses openCV) vs. gif (uses matplotlib, adds titles to the video visualizations)
**trajs (Any): Any number of videos (keyword becomes video vis title in the coloring/visualization process)
"""
trajs = {k: v for k, v in trajs.items() if v is not None} # filter out 'None' trajs
T, h, w, _ = list(trajs.values())[0].shape
trajs = add_borders(trajs, context_frames)
if mode == "gif": # gif visualizations with matplotlib
try:
from matplotlib import pyplot as PLT
PLT.rcParams.update({'axes.titlesize': 'small'})
from matplotlib.animation import FuncAnimation
except ImportError:
raise ImportError("importing from matplotlib failed "
"-> please install matplotlib or use the mp4-mode for visualization.")
n_trajs = len(trajs)
plt_scale = 0.01
plt_cols = math.ceil(math.sqrt(n_trajs))
plt_rows = math.ceil(n_trajs / plt_cols)
plt_w = 1.2 * w * plt_scale * plt_cols
plt_h = 1.4 * h * plt_scale * plt_rows
fig = PLT.figure(figsize=(plt_w, plt_h), dpi=100)
def update(t):
for i, (name, traj) in enumerate(trajs.items()):
PLT.subplot(plt_rows, plt_cols, i + 1)
PLT.xticks([])
PLT.yticks([])
PLT.title(' '.join(name.split('_')).title())
PLT.imshow(traj[t])
anim = FuncAnimation(fig, update, frames=np.arange(T), interval=500)
anim.save(out_fp, writer="imagemagick", dpi=200)
PLT.close(fig)
else: # mp4 visualizations with opencv and moviepy
try:
from moviepy.editor import ImageSequenceClip
except ImportError:
raise ImportError("importing from moviepy failed"
" -> please install moviepy or use the gif-mode for visualization.")
combined_traj = np.concatenate(list(trajs.values()), axis=-2) # put visualizations next to each other
out_paths = []
for t, frame in enumerate(list(combined_traj)):
out_fn = f"{out_fp[:-4]}_t{t}.jpg"
out_paths.append(out_fn)
out_frame_BGR = frame[:, :, ::-1]
cv.imwrite(out_fn, out_frame_BGR)
clip = ImageSequenceClip(out_paths, fps=2)
clip.write_videofile(f"{out_fp[:-4]}.mp4", fps=2)
for out_fn in out_paths:
os.remove(out_fn) |
Python | def visualize_vid(dataset, context_frames: int, pred_frames: int, model, device: str,
out_path: Path, vis_idx: Iterable[int], vis_mode: str):
r"""
Extracts certain data points from given dataset, uses given model to obtain predictions for these sequences and
saves visualizations of these grond truth/predicted sequences.
Args:
dataset (VPDataset): The dataset the data is taken from.
context_frames (int): Number of input/context frames.
pred_frames (int): Number of frames to predict.
model (VPModel): The prediction model.
device (str): The device that should be used for visualization creation (GPU vs. CPU).
out_path (Path): A path object containing the directory where the visualizations should be saved.
vis_idx (Iterable[int]): An iterable of dataset indices which should be used to obtain the data points that should be used for vis.
vis_mode (str): A string specifying the save mode: mp4 (uses openCV) vs. gif (uses matplotlib, adds titles to the video visualizations)
"""
out_fn_template = "vis_{}." + vis_mode
data_unpack_config = {"device": device, "context_frames": context_frames, "pred_frames": pred_frames}
if vis_idx is None or any([x >= len(dataset) for x in vis_idx]):
raise ValueError(f"invalid vis_idx provided for visualization "
f"(dataset len = {len(dataset)}): {vis_idx}")
for i, n in enumerate(vis_idx):
# prepare input and ground truth sequence
input_vis, pred_vis = get_vis_from_model(dataset, dataset[n], model,
data_unpack_config, pred_frames)
# visualize
out_filename = str(out_path / out_fn_template.format(str(i)))
save_vid_vis(out_fp=out_filename, context_frames=context_frames,
GT=input_vis, Pred=pred_vis, mode=vis_mode) | def visualize_vid(dataset, context_frames: int, pred_frames: int, model, device: str,
out_path: Path, vis_idx: Iterable[int], vis_mode: str):
r"""
Extracts certain data points from given dataset, uses given model to obtain predictions for these sequences and
saves visualizations of these grond truth/predicted sequences.
Args:
dataset (VPDataset): The dataset the data is taken from.
context_frames (int): Number of input/context frames.
pred_frames (int): Number of frames to predict.
model (VPModel): The prediction model.
device (str): The device that should be used for visualization creation (GPU vs. CPU).
out_path (Path): A path object containing the directory where the visualizations should be saved.
vis_idx (Iterable[int]): An iterable of dataset indices which should be used to obtain the data points that should be used for vis.
vis_mode (str): A string specifying the save mode: mp4 (uses openCV) vs. gif (uses matplotlib, adds titles to the video visualizations)
"""
out_fn_template = "vis_{}." + vis_mode
data_unpack_config = {"device": device, "context_frames": context_frames, "pred_frames": pred_frames}
if vis_idx is None or any([x >= len(dataset) for x in vis_idx]):
raise ValueError(f"invalid vis_idx provided for visualization "
f"(dataset len = {len(dataset)}): {vis_idx}")
for i, n in enumerate(vis_idx):
# prepare input and ground truth sequence
input_vis, pred_vis = get_vis_from_model(dataset, dataset[n], model,
data_unpack_config, pred_frames)
# visualize
out_filename = str(out_path / out_fn_template.format(str(i)))
save_vid_vis(out_fp=out_filename, context_frames=context_frames,
GT=input_vis, Pred=pred_vis, mode=vis_mode) |
Python | def save_frame_compare_img(out_filename: str, context_frames: int, ground_truth_vis: np.ndarray,
preds_vis: List[np.ndarray], vis_context_frame_idx: Iterable[int]):
r"""
Given a ground truth frame sequence as well as prediction sequences, creates and saves
a large image file that displays the ground truth sequence in the first row and the predictions each in a row below
(for them, only the predicted frames are put into the graphic).
Specified by vis_context_frame_idx, only the selected input frames are put onto the visualization image.
Args:
out_filename (str): Output filename.
context_frames (int): Number of input/context frames.
ground_truth_vis (np.ndarray): The ground truth frame sequence.
preds_vis (List[np.ndarray): The predicted frame sequences.
vis_context_frame_idx (Iterable[int]): A list of indices for the context frames. For the ground truth row, only these context frames will be displayed to unlutter the visualization.
"""
border = 2
all_seqs = [ground_truth_vis] + preds_vis
T, h, w, c = ground_truth_vis.shape
hb, wb = h + border, w + border # img sizes with borders
H = (hb * len(all_seqs)) - border # height of resulting vis.
W_context = (wb * len(vis_context_frame_idx)) # width of context part of resulting vis.
W_pred = (wb * (T - context_frames)) # width of prediction part of resulting vis.
# left part of seq vis: only first row is populated (with context frames)
large_img_context = np.ones((H, W_context, c), dtype=np.uint8) * 255
for n_frame, context_i in enumerate(vis_context_frame_idx):
w_start = n_frame * wb
large_img_context[:h, w_start:w_start+w] = ground_truth_vis[context_i]
# right part of seq vis: display predictions below ground truth frame-by-frame
large_img_pred = np.ones((H, W_pred, c), dtype=np.uint8) * 255
for n_seq, seq in enumerate(all_seqs):
for t in range(context_frames, T):
h_start = n_seq * hb
w_start = (t - context_frames) * wb + border
large_img_pred[h_start:h_start+h, w_start:w_start+w, :] = seq[t]
large_img = np.concatenate([large_img_context, large_img_pred], axis=-2)
cv.imwrite(out_filename, cv.cvtColor(large_img, cv.COLOR_RGB2BGR)) | def save_frame_compare_img(out_filename: str, context_frames: int, ground_truth_vis: np.ndarray,
preds_vis: List[np.ndarray], vis_context_frame_idx: Iterable[int]):
r"""
Given a ground truth frame sequence as well as prediction sequences, creates and saves
a large image file that displays the ground truth sequence in the first row and the predictions each in a row below
(for them, only the predicted frames are put into the graphic).
Specified by vis_context_frame_idx, only the selected input frames are put onto the visualization image.
Args:
out_filename (str): Output filename.
context_frames (int): Number of input/context frames.
ground_truth_vis (np.ndarray): The ground truth frame sequence.
preds_vis (List[np.ndarray): The predicted frame sequences.
vis_context_frame_idx (Iterable[int]): A list of indices for the context frames. For the ground truth row, only these context frames will be displayed to unlutter the visualization.
"""
border = 2
all_seqs = [ground_truth_vis] + preds_vis
T, h, w, c = ground_truth_vis.shape
hb, wb = h + border, w + border # img sizes with borders
H = (hb * len(all_seqs)) - border # height of resulting vis.
W_context = (wb * len(vis_context_frame_idx)) # width of context part of resulting vis.
W_pred = (wb * (T - context_frames)) # width of prediction part of resulting vis.
# left part of seq vis: only first row is populated (with context frames)
large_img_context = np.ones((H, W_context, c), dtype=np.uint8) * 255
for n_frame, context_i in enumerate(vis_context_frame_idx):
w_start = n_frame * wb
large_img_context[:h, w_start:w_start+w] = ground_truth_vis[context_i]
# right part of seq vis: display predictions below ground truth frame-by-frame
large_img_pred = np.ones((H, W_pred, c), dtype=np.uint8) * 255
for n_seq, seq in enumerate(all_seqs):
for t in range(context_frames, T):
h_start = n_seq * hb
w_start = (t - context_frames) * wb + border
large_img_pred[h_start:h_start+h, w_start:w_start+w, :] = seq[t]
large_img = np.concatenate([large_img_context, large_img_pred], axis=-2)
cv.imwrite(out_filename, cv.cvtColor(large_img, cv.COLOR_RGB2BGR)) |
Python | def visualize_sequences(dataset, context_frames, pred_frames, models, device,
out_path, vis_idx, vis_context_frame_idx, vis_vid_mode):
r"""
Extracts certain data points from given dataset, uses each given model to obtain predictions for these sequences
and saves visualizations of these grond truth/predicted sequences. Also creates a large graphic comparing the
visualizations of different models against the ground truth sequence using :meth:`save_frame_compare_img`.
Args:
dataset (VPDataset): The dataset the data is taken from.
context_frames (int): Number of input/context frames.
pred_frames (int): Number of frames to predict.
models (Iterable[VPModel]): The prediction models.
device (str): The device that should be used for visualization creation (GPU vs. CPU).
out_path (Path): A path object containing the directory where the visualizations should be saved.
vis_idx (Iterable[int]): An iterable of dataset indices which should be used to obtain the data points that should be used for vis.
vis_context_frame_idx (Iterable[int]): A list of indices for the context frames. For the ground truth row, only these context frames will be displayed to unlutter the visualization.
vis_vid_mode (str): A string specifying the save mode: mp4 (uses openCV) vs. gif (uses matplotlib, adds titles to the video visualizations)
"""
data_unpack_config = {"device": device, "context_frames": context_frames, "pred_frames": pred_frames}
info_file_lines = [f"DATASET: {dataset.NAME}", f"chosen dataset idx: {vis_idx}",
f"Displayed context frames: {list(range(context_frames))}, ({vis_context_frame_idx} in seq_img)",
f"Displayed pred frames: {list(range(context_frames, context_frames+pred_frames))}",
"Displayed rows (from top):", " - Ground Truth"]
if vis_idx is None or any([x >= len(dataset) for x in vis_idx]):
raise ValueError(f"invalid vis_idx provided for visualization "
f"(dataset len = {len(dataset)}): {vis_idx}")
for i, n in enumerate(vis_idx):
data = dataset[n] # [T, c, h, w]
ground_truth_vis = None
preds_vis = []
for j, model in enumerate(models):
if model.model_dir is None: # skip baseline models such as CopyLastFrame
continue
input_vis, pred_vis = get_vis_from_model(dataset, data, model,
data_unpack_config, pred_frames)
if ground_truth_vis is None:
ground_truth_vis = input_vis
preds_vis.append(pred_vis)
# visualize as vid
vis_vid_out_fn = str(out_path / f"vis_{i}_model_{j}.{vis_vid_mode}")
save_vid_vis(out_fp=vis_vid_out_fn, context_frames=context_frames,
GT=input_vis, Pred=pred_vis, mode=vis_vid_mode)
if i == 0:
info_file_lines.append(f" - model {j}: {model.NAME} (model dir: {model.model_dir})")
# visualize as img if context frame idx are given
if vis_context_frame_idx is not None:
vis_img_out_fn = str((out_path / f"vis_{i}.png").resolve())
save_frame_compare_img(vis_img_out_fn, context_frames, ground_truth_vis,
preds_vis, vis_context_frame_idx)
info_file_lines.append(f"vis {i} (idx {n}) origin: {data['origin']}")
vis_info_fn = str((out_path / "vis_info.txt").resolve())
with open(vis_info_fn, "w") as vis_info_file:
vis_info_file.writelines(line + '\n' for line in info_file_lines) | def visualize_sequences(dataset, context_frames, pred_frames, models, device,
out_path, vis_idx, vis_context_frame_idx, vis_vid_mode):
r"""
Extracts certain data points from given dataset, uses each given model to obtain predictions for these sequences
and saves visualizations of these grond truth/predicted sequences. Also creates a large graphic comparing the
visualizations of different models against the ground truth sequence using :meth:`save_frame_compare_img`.
Args:
dataset (VPDataset): The dataset the data is taken from.
context_frames (int): Number of input/context frames.
pred_frames (int): Number of frames to predict.
models (Iterable[VPModel]): The prediction models.
device (str): The device that should be used for visualization creation (GPU vs. CPU).
out_path (Path): A path object containing the directory where the visualizations should be saved.
vis_idx (Iterable[int]): An iterable of dataset indices which should be used to obtain the data points that should be used for vis.
vis_context_frame_idx (Iterable[int]): A list of indices for the context frames. For the ground truth row, only these context frames will be displayed to unlutter the visualization.
vis_vid_mode (str): A string specifying the save mode: mp4 (uses openCV) vs. gif (uses matplotlib, adds titles to the video visualizations)
"""
data_unpack_config = {"device": device, "context_frames": context_frames, "pred_frames": pred_frames}
info_file_lines = [f"DATASET: {dataset.NAME}", f"chosen dataset idx: {vis_idx}",
f"Displayed context frames: {list(range(context_frames))}, ({vis_context_frame_idx} in seq_img)",
f"Displayed pred frames: {list(range(context_frames, context_frames+pred_frames))}",
"Displayed rows (from top):", " - Ground Truth"]
if vis_idx is None or any([x >= len(dataset) for x in vis_idx]):
raise ValueError(f"invalid vis_idx provided for visualization "
f"(dataset len = {len(dataset)}): {vis_idx}")
for i, n in enumerate(vis_idx):
data = dataset[n] # [T, c, h, w]
ground_truth_vis = None
preds_vis = []
for j, model in enumerate(models):
if model.model_dir is None: # skip baseline models such as CopyLastFrame
continue
input_vis, pred_vis = get_vis_from_model(dataset, data, model,
data_unpack_config, pred_frames)
if ground_truth_vis is None:
ground_truth_vis = input_vis
preds_vis.append(pred_vis)
# visualize as vid
vis_vid_out_fn = str(out_path / f"vis_{i}_model_{j}.{vis_vid_mode}")
save_vid_vis(out_fp=vis_vid_out_fn, context_frames=context_frames,
GT=input_vis, Pred=pred_vis, mode=vis_vid_mode)
if i == 0:
info_file_lines.append(f" - model {j}: {model.NAME} (model dir: {model.model_dir})")
# visualize as img if context frame idx are given
if vis_context_frame_idx is not None:
vis_img_out_fn = str((out_path / f"vis_{i}.png").resolve())
save_frame_compare_img(vis_img_out_fn, context_frames, ground_truth_vis,
preds_vis, vis_context_frame_idx)
info_file_lines.append(f"vis {i} (idx {n}) origin: {data['origin']}")
vis_info_fn = str((out_path / "vis_info.txt").resolve())
with open(vis_info_fn, "w") as vis_info_file:
vis_info_file.writelines(line + '\n' for line in info_file_lines) |
Python | def save_arr_hist(diff: np.ndarray, diff_id: int):
r"""
Given a numpy array contianing values, creates and saves a histogram figure that shows the distribution of values
within the array, as well as min, max and average.
Args:
diff (np.ndarray): Input array containing the values to visualize.
diff_id (int): An id used in the save name.
"""
avg_diff, min_diff, max_diff = np.average(diff), np.min(diff), np.max(diff)
plt.hist(diff.flatten(), bins=1000, log=True)
plt.suptitle(f"np.abs(their_pred - our_pred)\n"
f"min: {min_diff}, max: {max_diff}, avg: {avg_diff}")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.xscale("log")
plt.yscale("log")
plt.savefig(f"diff_{diff_id}.png") | def save_arr_hist(diff: np.ndarray, diff_id: int):
r"""
Given a numpy array contianing values, creates and saves a histogram figure that shows the distribution of values
within the array, as well as min, max and average.
Args:
diff (np.ndarray): Input array containing the values to visualize.
diff_id (int): An id used in the save name.
"""
avg_diff, min_diff, max_diff = np.average(diff), np.min(diff), np.max(diff)
plt.hist(diff.flatten(), bins=1000, log=True)
plt.suptitle(f"np.abs(their_pred - our_pred)\n"
f"min: {min_diff}, max: {max_diff}, avg: {avg_diff}")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.xscale("log")
plt.yscale("log")
plt.savefig(f"diff_{diff_id}.png") |
Python | def lstm_model(num_units, rnn_layers, dense_layers=None, learning_rate=0.1, optimizer='Adagrad'):
"""
Creates a deep model based on:
* stacked lstm cells
* an optional dense layers
:param num_units: the size of the cells.
:param rnn_layers: list of int or dict
* list of int: the steps used to instantiate the `BasicLSTMCell` cell
* list of dict: [{steps: int, keep_prob: int}, ...]
:param dense_layers: list of nodes for each layer
:return: the model definition
"""
def lstm_cells(layers):
if isinstance(layers[0], dict):
return [tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.BasicLSTMCell(layer['num_units'],
state_is_tuple=True),
layer['keep_prob'])
if layer.get('keep_prob') else tf.nn.rnn_cell.BasicLSTMCell(layer['num_units'],
state_is_tuple=True)
for layer in layers]
return [tf.nn.rnn_cell.BasicLSTMCell(steps, state_is_tuple=True) for steps in layers]
def dnn_layers(input_layers, layers):
if layers and isinstance(layers, dict):
return learn.ops.dnn(input_layers,
layers['layers'],
activation=layers.get('activation'),
dropout=layers.get('dropout'))
elif layers:
return learn.ops.dnn(input_layers, layers)
else:
return input_layers
def _lstm_model(X, y):
stacked_lstm = tf.nn.rnn_cell.MultiRNNCell(lstm_cells(rnn_layers), state_is_tuple=True)
x_ = tf.unpack(X, axis=1, num=num_units)
output, layers = tf.nn.rnn(stacked_lstm, x_, dtype=dtypes.float32)
output = dnn_layers(output[-1], dense_layers)
prediction, loss = learn.models.linear_regression(output, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer=optimizer,
learning_rate=learning_rate)
return prediction, loss, train_op
return _lstm_model | def lstm_model(num_units, rnn_layers, dense_layers=None, learning_rate=0.1, optimizer='Adagrad'):
"""
Creates a deep model based on:
* stacked lstm cells
* an optional dense layers
:param num_units: the size of the cells.
:param rnn_layers: list of int or dict
* list of int: the steps used to instantiate the `BasicLSTMCell` cell
* list of dict: [{steps: int, keep_prob: int}, ...]
:param dense_layers: list of nodes for each layer
:return: the model definition
"""
def lstm_cells(layers):
if isinstance(layers[0], dict):
return [tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.BasicLSTMCell(layer['num_units'],
state_is_tuple=True),
layer['keep_prob'])
if layer.get('keep_prob') else tf.nn.rnn_cell.BasicLSTMCell(layer['num_units'],
state_is_tuple=True)
for layer in layers]
return [tf.nn.rnn_cell.BasicLSTMCell(steps, state_is_tuple=True) for steps in layers]
def dnn_layers(input_layers, layers):
if layers and isinstance(layers, dict):
return learn.ops.dnn(input_layers,
layers['layers'],
activation=layers.get('activation'),
dropout=layers.get('dropout'))
elif layers:
return learn.ops.dnn(input_layers, layers)
else:
return input_layers
def _lstm_model(X, y):
stacked_lstm = tf.nn.rnn_cell.MultiRNNCell(lstm_cells(rnn_layers), state_is_tuple=True)
x_ = tf.unpack(X, axis=1, num=num_units)
output, layers = tf.nn.rnn(stacked_lstm, x_, dtype=dtypes.float32)
output = dnn_layers(output[-1], dense_layers)
prediction, loss = learn.models.linear_regression(output, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer=optimizer,
learning_rate=learning_rate)
return prediction, loss, train_op
return _lstm_model |
Python | def vrep_pose_from_plane(plane):
"""Creates a vrep-compatible transformation matrix from a Rhino/Grasshopper
plane.
This function might need rework as the source of the 90-deg Y rotation
need is not entirely clear to me (related to the RFL model mismatch).
"""
translation_matrix = rs.XformTranslation(((plane[0][0]), (plane[0][1]), plane[0][2]))
plane_start = rs.PlaneFromFrame(rs.AddPoint(0, 0, 0), rs.AddPoint(1, 0, 0), rs.AddPoint(0, 1, 0))
plane_end = rs.PlaneFromFrame(rs.AddPoint(0, 0, 0), rs.AddPoint(plane[1][0], (plane[1][1]), plane[1][2]), rs.AddPoint(plane[2][0], plane[2][1], plane[2][2]))
rotation_matrix = rs.XformRotation1(plane_start, plane_end)
matrix = rs.XformMultiply(translation_matrix, rotation_matrix)
return [matrix.M00, matrix.M01, matrix.M02, matrix.M03,
matrix.M10, matrix.M11, matrix.M12, matrix.M13,
matrix.M20, matrix.M21, matrix.M22, matrix.M23] | def vrep_pose_from_plane(plane):
"""Creates a vrep-compatible transformation matrix from a Rhino/Grasshopper
plane.
This function might need rework as the source of the 90-deg Y rotation
need is not entirely clear to me (related to the RFL model mismatch).
"""
translation_matrix = rs.XformTranslation(((plane[0][0]), (plane[0][1]), plane[0][2]))
plane_start = rs.PlaneFromFrame(rs.AddPoint(0, 0, 0), rs.AddPoint(1, 0, 0), rs.AddPoint(0, 1, 0))
plane_end = rs.PlaneFromFrame(rs.AddPoint(0, 0, 0), rs.AddPoint(plane[1][0], (plane[1][1]), plane[1][2]), rs.AddPoint(plane[2][0], plane[2][1], plane[2][2]))
rotation_matrix = rs.XformRotation1(plane_start, plane_end)
matrix = rs.XformMultiply(translation_matrix, rotation_matrix)
return [matrix.M00, matrix.M01, matrix.M02, matrix.M03,
matrix.M10, matrix.M11, matrix.M12, matrix.M13,
matrix.M20, matrix.M21, matrix.M22, matrix.M23] |
Python | def find_path(cls, host='127.0.0.1', port=19997, mode='local', **kwargs):
"""Finds a path for the specified scene description. There is a large number
of parameters that can be passed as `kwargs`. It can run in two modes: *remote* or
*local*. In remote mode, the `host` and `port` parameters correspond to a
simulation coordinator, and in local mode, `host` and `port` correspond to
a V-REP instance.
Args:
host (:obj:`str`): IP address of the service (simulation coordinator in `remote`, V-REP in `local` mode)
port (:obj:`int`): Port of the service.
mode (:obj:`str`): Execution mode, either ``local`` or ``remote``.
kwargs: Keyword arguments.
Returns:
list: list of configurations representing a path.
"""
parser = InputParameterParser()
options = {'robots': []}
active_robot = None
if 'robots' in kwargs:
for i, settings in enumerate(kwargs['robots']):
if 'robot' not in settings:
raise KeyError("'robot' not found at kwargs['robots'][%d]" % i)
robot = {'robot': settings['robot']}
if 'start' in settings:
start = parser.get_config_or_pose(settings['start'])
if start:
robot['start'] = start.to_data()
if 'goal' in settings:
if not active_robot:
active_robot = robot
goal = parser.get_config_or_pose(settings['goal'])
if goal:
robot['goal'] = goal.to_data()
else:
raise ValueError('Multi-move is not (yet) supported. Only one goal can be specified.')
if 'building_member' in settings:
robot['building_member'] = mesh_from_guid(Mesh, settings['building_member']).to_data()
if 'metric_values' in settings:
robot['metric_values'] = map(float, settings['metric_values'].split(','))
if 'joint_limits' in settings:
robot['joint_limits'] = settings['joint_limits']
options['robots'].append(robot)
if 'collision_meshes' in kwargs:
mesh_guids = parser.compact_list(kwargs['collision_meshes'])
options['collision_meshes'] = [mesh_from_guid(Mesh, m).to_data() for m in mesh_guids]
options['debug'] = kwargs.get('debug')
options['trials'] = kwargs.get('trials')
options['shallow_state_search'] = kwargs.get('shallow_state_search')
options['optimize_path_length'] = kwargs.get('optimize_path_length')
options['planner_id'] = kwargs.get('planner_id')
options['resolution'] = kwargs.get('resolution')
if mode == 'remote':
LOG.debug('Running remote path planner executor. Host=%s:%d', host, port)
return SimulationCoordinator.remote_executor(options, host, port)
else:
LOG.debug('Running local path planner executor. Host=%s:%d', host, port)
return SimulationCoordinator.local_executor(options, host, port) | def find_path(cls, host='127.0.0.1', port=19997, mode='local', **kwargs):
"""Finds a path for the specified scene description. There is a large number
of parameters that can be passed as `kwargs`. It can run in two modes: *remote* or
*local*. In remote mode, the `host` and `port` parameters correspond to a
simulation coordinator, and in local mode, `host` and `port` correspond to
a V-REP instance.
Args:
host (:obj:`str`): IP address of the service (simulation coordinator in `remote`, V-REP in `local` mode)
port (:obj:`int`): Port of the service.
mode (:obj:`str`): Execution mode, either ``local`` or ``remote``.
kwargs: Keyword arguments.
Returns:
list: list of configurations representing a path.
"""
parser = InputParameterParser()
options = {'robots': []}
active_robot = None
if 'robots' in kwargs:
for i, settings in enumerate(kwargs['robots']):
if 'robot' not in settings:
raise KeyError("'robot' not found at kwargs['robots'][%d]" % i)
robot = {'robot': settings['robot']}
if 'start' in settings:
start = parser.get_config_or_pose(settings['start'])
if start:
robot['start'] = start.to_data()
if 'goal' in settings:
if not active_robot:
active_robot = robot
goal = parser.get_config_or_pose(settings['goal'])
if goal:
robot['goal'] = goal.to_data()
else:
raise ValueError('Multi-move is not (yet) supported. Only one goal can be specified.')
if 'building_member' in settings:
robot['building_member'] = mesh_from_guid(Mesh, settings['building_member']).to_data()
if 'metric_values' in settings:
robot['metric_values'] = map(float, settings['metric_values'].split(','))
if 'joint_limits' in settings:
robot['joint_limits'] = settings['joint_limits']
options['robots'].append(robot)
if 'collision_meshes' in kwargs:
mesh_guids = parser.compact_list(kwargs['collision_meshes'])
options['collision_meshes'] = [mesh_from_guid(Mesh, m).to_data() for m in mesh_guids]
options['debug'] = kwargs.get('debug')
options['trials'] = kwargs.get('trials')
options['shallow_state_search'] = kwargs.get('shallow_state_search')
options['optimize_path_length'] = kwargs.get('optimize_path_length')
options['planner_id'] = kwargs.get('planner_id')
options['resolution'] = kwargs.get('resolution')
if mode == 'remote':
LOG.debug('Running remote path planner executor. Host=%s:%d', host, port)
return SimulationCoordinator.remote_executor(options, host, port)
else:
LOG.debug('Running local path planner executor. Host=%s:%d', host, port)
return SimulationCoordinator.local_executor(options, host, port) |
Python | def from_revolute_values(cls, values):
"""Construct a configuration from revolute joint values in radians.
Parameters
----------
values : :obj:`list` of :obj:`float`
Joint values expressed in radians.
Returns
-------
:class:`Configuration`
An instance of :class:`Configuration` instance.
"""
values = list(values)
return cls.from_data({'values': values, 'types': [Joint.REVOLUTE] * len(values)}) | def from_revolute_values(cls, values):
"""Construct a configuration from revolute joint values in radians.
Parameters
----------
values : :obj:`list` of :obj:`float`
Joint values expressed in radians.
Returns
-------
:class:`Configuration`
An instance of :class:`Configuration` instance.
"""
values = list(values)
return cls.from_data({'values': values, 'types': [Joint.REVOLUTE] * len(values)}) |
Python | def from_prismatic_and_revolute_values(cls, prismatic_values, revolute_values):
"""Construct a configuration from prismatic and revolute joint values.
Parameters
----------
prismatic_values : :obj:`list` of :obj:`float`
Positions on the external axis system in meters.
revolute_values : :obj:`list` of :obj:`float`
Joint values expressed in radians.
Returns
-------
:class:`Configuration`
An instance of :class:`Configuration` instance.
"""
# Force iterables into lists
prismatic_values = list(prismatic_values)
revolute_values = list(revolute_values)
values = prismatic_values + revolute_values
types = [Joint.PRISMATIC] * \
len(prismatic_values) + [Joint.REVOLUTE] * len(revolute_values)
return cls.from_data({'values': values, 'types': types}) | def from_prismatic_and_revolute_values(cls, prismatic_values, revolute_values):
"""Construct a configuration from prismatic and revolute joint values.
Parameters
----------
prismatic_values : :obj:`list` of :obj:`float`
Positions on the external axis system in meters.
revolute_values : :obj:`list` of :obj:`float`
Joint values expressed in radians.
Returns
-------
:class:`Configuration`
An instance of :class:`Configuration` instance.
"""
# Force iterables into lists
prismatic_values = list(prismatic_values)
revolute_values = list(revolute_values)
values = prismatic_values + revolute_values
types = [Joint.PRISMATIC] * \
len(prismatic_values) + [Joint.REVOLUTE] * len(revolute_values)
return cls.from_data({'values': values, 'types': types}) |
Python | def scale(self, scale_factor):
"""Scales the joint positions of the current configuration.
Only scalable joints are scaled, i.e. planar and prismatic joints.
Parameters
----------
scale_factor : float
Scale factor
"""
values_scaled = []
for value, joint_type in zip(self.values, self.types):
if joint_type in (Joint.PLANAR, Joint.PRISMATIC):
value *= scale_factor
values_scaled.append(value)
self.values = values_scaled | def scale(self, scale_factor):
"""Scales the joint positions of the current configuration.
Only scalable joints are scaled, i.e. planar and prismatic joints.
Parameters
----------
scale_factor : float
Scale factor
"""
values_scaled = []
for value, joint_type in zip(self.values, self.types):
if joint_type in (Joint.PLANAR, Joint.PRISMATIC):
value *= scale_factor
values_scaled.append(value)
self.values = values_scaled |
Python | def validate_response(response):
"""Raise an exception if the response indicates an error condition."""
if response.error_code != MoveItErrorCodes.SUCCESS:
raise RosError(response.error_code.human_readable,
int(response.error_code)) | def validate_response(response):
"""Raise an exception if the response indicates an error condition."""
if response.error_code != MoveItErrorCodes.SUCCESS:
raise RosError(response.error_code.human_readable,
int(response.error_code)) |
Python | def inverse_kinematics_async(self, callback, errback, frame, base_link, group,
joint_names, joint_positions, avoid_collisions=True,
constraints=None, attempts=8, attached_collision_meshes=None):
"""Asynchronous handler of MoveIt IK service."""
header = Header(frame_id=base_link)
pose = Pose.from_frame(frame)
pose_stamped = PoseStamped(header, pose)
joint_state = JointState(
name=joint_names, position=joint_positions, header=header)
start_state = RobotState(
joint_state, MultiDOFJointState(header=header))
if attached_collision_meshes:
for acm in attached_collision_meshes:
aco = AttachedCollisionObject.from_attached_collision_mesh(acm)
start_state.attached_collision_objects.append(aco)
ik_request = PositionIKRequest(group_name=group,
robot_state=start_state,
constraints=constraints,
pose_stamped=pose_stamped,
avoid_collisions=avoid_collisions,
attempts=attempts)
def convert_to_positions(response):
callback(response.solution.joint_state.position)
self.GET_POSITION_IK(self, (ik_request, ), convert_to_positions, errback) | def inverse_kinematics_async(self, callback, errback, frame, base_link, group,
joint_names, joint_positions, avoid_collisions=True,
constraints=None, attempts=8, attached_collision_meshes=None):
"""Asynchronous handler of MoveIt IK service."""
header = Header(frame_id=base_link)
pose = Pose.from_frame(frame)
pose_stamped = PoseStamped(header, pose)
joint_state = JointState(
name=joint_names, position=joint_positions, header=header)
start_state = RobotState(
joint_state, MultiDOFJointState(header=header))
if attached_collision_meshes:
for acm in attached_collision_meshes:
aco = AttachedCollisionObject.from_attached_collision_mesh(acm)
start_state.attached_collision_objects.append(aco)
ik_request = PositionIKRequest(group_name=group,
robot_state=start_state,
constraints=constraints,
pose_stamped=pose_stamped,
avoid_collisions=avoid_collisions,
attempts=attempts)
def convert_to_positions(response):
callback(response.solution.joint_state.position)
self.GET_POSITION_IK(self, (ik_request, ), convert_to_positions, errback) |
Python | def forward_kinematics_async(self, callback, errback, joint_positions, base_link,
group, joint_names, ee_link):
"""Asynchronous handler of MoveIt FK service."""
header = Header(frame_id=base_link)
fk_link_names = [ee_link]
joint_state = JointState(
name=joint_names, position=joint_positions, header=header)
robot_state = RobotState(
joint_state, MultiDOFJointState(header=header))
def convert_to_frame(response):
callback(response.pose_stamped[0].pose.frame)
self.GET_POSITION_FK(self, (header, fk_link_names,
robot_state), convert_to_frame, errback) | def forward_kinematics_async(self, callback, errback, joint_positions, base_link,
group, joint_names, ee_link):
"""Asynchronous handler of MoveIt FK service."""
header = Header(frame_id=base_link)
fk_link_names = [ee_link]
joint_state = JointState(
name=joint_names, position=joint_positions, header=header)
robot_state = RobotState(
joint_state, MultiDOFJointState(header=header))
def convert_to_frame(response):
callback(response.pose_stamped[0].pose.frame)
self.GET_POSITION_FK(self, (header, fk_link_names,
robot_state), convert_to_frame, errback) |
Python | def plan_cartesian_motion_async(self, callback, errback, frames, base_link,
ee_link, group, joint_names, joint_types,
start_configuration, max_step, jump_threshold,
avoid_collisions, path_constraints,
attached_collision_meshes):
"""Asynchronous handler of MoveIt cartesian motion planner service."""
header = Header(frame_id=base_link)
waypoints = [Pose.from_frame(frame) for frame in frames]
joint_state = JointState(
header=header, name=joint_names, position=start_configuration.values)
start_state = RobotState(
joint_state, MultiDOFJointState(header=header))
if attached_collision_meshes:
for acm in attached_collision_meshes:
aco = AttachedCollisionObject.from_attached_collision_mesh(acm)
start_state.attached_collision_objects.append(aco)
request = dict(header=header,
start_state=start_state,
group_name=group,
link_name=ee_link,
waypoints=waypoints,
max_step=float(max_step),
jump_threshold=float(jump_threshold),
avoid_collisions=bool(avoid_collisions),
path_constraints=path_constraints)
def convert_to_trajectory(response):
trajectory = JointTrajectory()
trajectory.source_message = response
trajectory.fraction = response.fraction
trajectory.joint_names = response.solution.joint_trajectory.joint_names
trajectory.points = convert_trajectory_points(response.solution.joint_trajectory.points, joint_types)
trajectory.start_configuration = Configuration(response.start_state.joint_state.position, start_configuration.types)
callback(trajectory)
self.GET_CARTESIAN_PATH(self, request, convert_to_trajectory, errback) | def plan_cartesian_motion_async(self, callback, errback, frames, base_link,
ee_link, group, joint_names, joint_types,
start_configuration, max_step, jump_threshold,
avoid_collisions, path_constraints,
attached_collision_meshes):
"""Asynchronous handler of MoveIt cartesian motion planner service."""
header = Header(frame_id=base_link)
waypoints = [Pose.from_frame(frame) for frame in frames]
joint_state = JointState(
header=header, name=joint_names, position=start_configuration.values)
start_state = RobotState(
joint_state, MultiDOFJointState(header=header))
if attached_collision_meshes:
for acm in attached_collision_meshes:
aco = AttachedCollisionObject.from_attached_collision_mesh(acm)
start_state.attached_collision_objects.append(aco)
request = dict(header=header,
start_state=start_state,
group_name=group,
link_name=ee_link,
waypoints=waypoints,
max_step=float(max_step),
jump_threshold=float(jump_threshold),
avoid_collisions=bool(avoid_collisions),
path_constraints=path_constraints)
def convert_to_trajectory(response):
trajectory = JointTrajectory()
trajectory.source_message = response
trajectory.fraction = response.fraction
trajectory.joint_names = response.solution.joint_trajectory.joint_names
trajectory.points = convert_trajectory_points(response.solution.joint_trajectory.points, joint_types)
trajectory.start_configuration = Configuration(response.start_state.joint_state.position, start_configuration.types)
callback(trajectory)
self.GET_CARTESIAN_PATH(self, request, convert_to_trajectory, errback) |
Python | def plan_motion_async(self, callback, errback, goal_constraints, base_link,
ee_link, group, joint_names, joint_types,
start_configuration, path_constraints=None,
trajectory_constraints=None,
planner_id='', num_planning_attempts=8,
allowed_planning_time=2.,
max_velocity_scaling_factor=1.,
max_acceleration_scaling_factor=1.,
attached_collision_meshes=None,
workspace_parameters=None):
"""Asynchronous handler of MoveIt motion planner service."""
# http://docs.ros.org/jade/api/moveit_core/html/utils_8cpp_source.html
# TODO: if list of frames (goals) => receive multiple solutions?
header = Header(frame_id=base_link)
joint_state = JointState(
header=header, name=joint_names, position=start_configuration.values)
start_state = RobotState(
joint_state, MultiDOFJointState(header=header))
if attached_collision_meshes:
for acm in attached_collision_meshes:
aco = AttachedCollisionObject.from_attached_collision_mesh(acm)
start_state.attached_collision_objects.append(aco)
# goal constraints
constraints = Constraints()
for c in goal_constraints:
if c.type == c.JOINT:
constraints.joint_constraints.append(
JointConstraint.from_joint_constraint(c))
elif c.type == c.POSITION:
constraints.position_constraints.append(
PositionConstraint.from_position_constraint(header, c))
elif c.type == c.ORIENTATION:
constraints.orientation_constraints.append(
OrientationConstraint.from_orientation_constraint(header, c))
else:
raise NotImplementedError
goal_constraints = [constraints]
# path constraints
if path_constraints:
constraints = Constraints()
for c in path_constraints:
if c.type == c.JOINT:
constraints.joint_constraints.append(
JointConstraint.from_joint_constraint(c))
elif c.type == c.POSITION:
constraints.position_constraints.append(
PositionConstraint.from_position_constraint(header, c))
elif c.type == c.ORIENTATION:
constraints.orientation_constraints.append(
OrientationConstraint.from_orientation_constraint(header, c))
else:
raise NotImplementedError
path_constraints = constraints
request = dict(start_state=start_state,
goal_constraints=goal_constraints,
path_constraints=path_constraints,
trajectory_constraints=trajectory_constraints,
planner_id=planner_id,
group_name=group,
num_planning_attempts=num_planning_attempts,
allowed_planning_time=allowed_planning_time,
max_velocity_scaling_factor=max_velocity_scaling_factor,
max_acceleration_scaling_factor=max_velocity_scaling_factor)
# workspace_parameters=workspace_parameters
def convert_to_trajectory(response):
trajectory = JointTrajectory()
trajectory.source_message = response
trajectory.fraction = 1.
trajectory.joint_names = response.trajectory.joint_trajectory.joint_names
trajectory.points = convert_trajectory_points(response.trajectory.joint_trajectory.points, joint_types)
trajectory.start_configuration = Configuration(response.trajectory_start.joint_state.position, start_configuration.types)
trajectory.planning_time = response.planning_time
callback(trajectory)
self.GET_MOTION_PLAN(self, request, convert_to_trajectory, errback) | def plan_motion_async(self, callback, errback, goal_constraints, base_link,
ee_link, group, joint_names, joint_types,
start_configuration, path_constraints=None,
trajectory_constraints=None,
planner_id='', num_planning_attempts=8,
allowed_planning_time=2.,
max_velocity_scaling_factor=1.,
max_acceleration_scaling_factor=1.,
attached_collision_meshes=None,
workspace_parameters=None):
"""Asynchronous handler of MoveIt motion planner service."""
# http://docs.ros.org/jade/api/moveit_core/html/utils_8cpp_source.html
# TODO: if list of frames (goals) => receive multiple solutions?
header = Header(frame_id=base_link)
joint_state = JointState(
header=header, name=joint_names, position=start_configuration.values)
start_state = RobotState(
joint_state, MultiDOFJointState(header=header))
if attached_collision_meshes:
for acm in attached_collision_meshes:
aco = AttachedCollisionObject.from_attached_collision_mesh(acm)
start_state.attached_collision_objects.append(aco)
# goal constraints
constraints = Constraints()
for c in goal_constraints:
if c.type == c.JOINT:
constraints.joint_constraints.append(
JointConstraint.from_joint_constraint(c))
elif c.type == c.POSITION:
constraints.position_constraints.append(
PositionConstraint.from_position_constraint(header, c))
elif c.type == c.ORIENTATION:
constraints.orientation_constraints.append(
OrientationConstraint.from_orientation_constraint(header, c))
else:
raise NotImplementedError
goal_constraints = [constraints]
# path constraints
if path_constraints:
constraints = Constraints()
for c in path_constraints:
if c.type == c.JOINT:
constraints.joint_constraints.append(
JointConstraint.from_joint_constraint(c))
elif c.type == c.POSITION:
constraints.position_constraints.append(
PositionConstraint.from_position_constraint(header, c))
elif c.type == c.ORIENTATION:
constraints.orientation_constraints.append(
OrientationConstraint.from_orientation_constraint(header, c))
else:
raise NotImplementedError
path_constraints = constraints
request = dict(start_state=start_state,
goal_constraints=goal_constraints,
path_constraints=path_constraints,
trajectory_constraints=trajectory_constraints,
planner_id=planner_id,
group_name=group,
num_planning_attempts=num_planning_attempts,
allowed_planning_time=allowed_planning_time,
max_velocity_scaling_factor=max_velocity_scaling_factor,
max_acceleration_scaling_factor=max_velocity_scaling_factor)
# workspace_parameters=workspace_parameters
def convert_to_trajectory(response):
trajectory = JointTrajectory()
trajectory.source_message = response
trajectory.fraction = 1.
trajectory.joint_names = response.trajectory.joint_trajectory.joint_names
trajectory.points = convert_trajectory_points(response.trajectory.joint_trajectory.points, joint_types)
trajectory.start_configuration = Configuration(response.trajectory_start.joint_state.position, start_configuration.types)
trajectory.planning_time = response.planning_time
callback(trajectory)
self.GET_MOTION_PLAN(self, request, convert_to_trajectory, errback) |
Python | def remove_collision_mesh(self, id):
"""Remove a collision mesh from the planning scene."""
co = CollisionObject()
co.id = id
self._collision_object(co, CollisionObject.REMOVE) | def remove_collision_mesh(self, id):
"""Remove a collision mesh from the planning scene."""
co = CollisionObject()
co.id = id
self._collision_object(co, CollisionObject.REMOVE) |
Python | def add_attached_collision_mesh(self, attached_collision_mesh):
"""Add a collision mesh attached to the robot."""
aco = AttachedCollisionObject.from_attached_collision_mesh(
attached_collision_mesh)
self._attached_collision_object(aco, operation=CollisionObject.ADD) | def add_attached_collision_mesh(self, attached_collision_mesh):
"""Add a collision mesh attached to the robot."""
aco = AttachedCollisionObject.from_attached_collision_mesh(
attached_collision_mesh)
self._attached_collision_object(aco, operation=CollisionObject.ADD) |
Python | def remove_attached_collision_mesh(self, id):
"""Add an attached collision mesh from the robot."""
aco = AttachedCollisionObject()
aco.object.id = id
return self._attached_collision_object(aco, operation=CollisionObject.REMOVE) | def remove_attached_collision_mesh(self, id):
"""Add an attached collision mesh from the robot."""
aco = AttachedCollisionObject()
aco.object.id = id
return self._attached_collision_object(aco, operation=CollisionObject.REMOVE) |
Python | def clean(ctx, docs=True, bytecode=True, builds=True):
"""Cleans the local copy from compiled artifacts."""
if builds:
ctx.run('python setup.py clean')
if bytecode:
for root, dirs, files in os.walk(BASE_FOLDER):
for f in files:
if f.endswith('.pyc'):
os.remove(os.path.join(root, f))
if '.git' in dirs:
dirs.remove('.git')
folders = []
if docs:
folders.append('docs/_build/')
folders.append('docs/reference/generated')
folders.append('dist/')
if bytecode:
folders.append('src/compas_fab/__pycache__')
if builds:
folders.append('build/')
folders.append('src/compas_fab.egg-info/')
for folder in folders:
rmtree(os.path.join(BASE_FOLDER, folder), ignore_errors=True) | def clean(ctx, docs=True, bytecode=True, builds=True):
"""Cleans the local copy from compiled artifacts."""
if builds:
ctx.run('python setup.py clean')
if bytecode:
for root, dirs, files in os.walk(BASE_FOLDER):
for f in files:
if f.endswith('.pyc'):
os.remove(os.path.join(root, f))
if '.git' in dirs:
dirs.remove('.git')
folders = []
if docs:
folders.append('docs/_build/')
folders.append('docs/reference/generated')
folders.append('dist/')
if bytecode:
folders.append('src/compas_fab/__pycache__')
if builds:
folders.append('build/')
folders.append('src/compas_fab.egg-info/')
for folder in folders:
rmtree(os.path.join(BASE_FOLDER, folder), ignore_errors=True) |
Python | def docs(ctx, doctest=False, rebuild=False, check_links=False):
"""Builds package's HTML documentation."""
if rebuild:
clean(ctx)
if doctest:
ctx.run('sphinx-build -b doctest docs dist/docs')
ctx.run('sphinx-build -b html docs dist/docs')
if check_links:
ctx.run('sphinx-build -b linkcheck docs dist/docs') | def docs(ctx, doctest=False, rebuild=False, check_links=False):
"""Builds package's HTML documentation."""
if rebuild:
clean(ctx)
if doctest:
ctx.run('sphinx-build -b doctest docs dist/docs')
ctx.run('sphinx-build -b html docs dist/docs')
if check_links:
ctx.run('sphinx-build -b linkcheck docs dist/docs') |
Python | def check(ctx):
"""Check the consistency of documentation, coding style and a few other things."""
log.write('Checking MANIFEST.in...')
ctx.run('check-manifest --ignore-bad-ideas=remoteApi.so')
log.write('Checking ReStructuredText formatting...')
ctx.run('python setup.py check --strict --metadata --restructuredtext')
# log.write('Running flake8 python linter...')
# ctx.run('flake8 src setup.py')
# log.write('Checking python imports...')
# ctx.run('isort --check-only --diff --recursive src tests setup.py') | def check(ctx):
"""Check the consistency of documentation, coding style and a few other things."""
log.write('Checking MANIFEST.in...')
ctx.run('check-manifest --ignore-bad-ideas=remoteApi.so')
log.write('Checking ReStructuredText formatting...')
ctx.run('python setup.py check --strict --metadata --restructuredtext')
# log.write('Running flake8 python linter...')
# ctx.run('flake8 src setup.py')
# log.write('Checking python imports...')
# ctx.run('isort --check-only --diff --recursive src tests setup.py') |
Python | def release(ctx, release_type):
"""Releases the project in one swift command!"""
if release_type not in ('patch', 'minor', 'major'):
raise Exit('The release type parameter is invalid.\nMust be one of: major, minor, patch')
# Run checks
ctx.run('invoke check test')
# Bump version and git tag it
ctx.run('bump2version %s --verbose' % release_type)
# Build project
ctx.run('python setup.py clean --all sdist bdist_wheel')
# Upload to pypi
if confirm('You are about to upload the release to pypi.org. Are you sure? [y/N]'):
files = ['dist/*.whl', 'dist/*.gz', 'dist/*.zip']
dist_files = ' '.join([pattern for f in files for pattern in glob.glob(f)])
if len(dist_files):
ctx.run('twine upload --skip-existing %s' % dist_files)
else:
raise Exit('No files found to release')
else:
raise Exit('Aborted release') | def release(ctx, release_type):
"""Releases the project in one swift command!"""
if release_type not in ('patch', 'minor', 'major'):
raise Exit('The release type parameter is invalid.\nMust be one of: major, minor, patch')
# Run checks
ctx.run('invoke check test')
# Bump version and git tag it
ctx.run('bump2version %s --verbose' % release_type)
# Build project
ctx.run('python setup.py clean --all sdist bdist_wheel')
# Upload to pypi
if confirm('You are about to upload the release to pypi.org. Are you sure? [y/N]'):
files = ['dist/*.whl', 'dist/*.gz', 'dist/*.zip']
dist_files = ' '.join([pattern for f in files for pattern in glob.glob(f)])
if len(dist_files):
ctx.run('twine upload --skip-existing %s' % dist_files)
else:
raise Exit('No files found to release')
else:
raise Exit('Aborted release') |
Python | def create(self, link=None):
"""Recursive function that triggers the drawing of the robot geometry.
This method delegates the geometry drawing to the :meth:`draw_geometry`
method. It transforms the geometry based on the saved initial
transformation from the robot model.
Parameters
----------
link : :class:`compas.robots.Link`, optional
Link instance to create. Defaults to the robot model's root.
Returns
-------
None
"""
if link is None:
link = self.robot.root
for item in itertools.chain(link.visual, link.collision):
if item.geometry.geo:
color = None
if hasattr(item, 'get_color'):
color = item.get_color()
# create native geometry
item.native_geometry = self.draw_geometry(item.geometry.geo, color)
# transform native geometry based on saved init transform
self.transform(item.native_geometry, item.init_transformation)
item.current_transformation = Transformation()
for child_joint in link.joints:
self.create(child_joint.child_link) | def create(self, link=None):
"""Recursive function that triggers the drawing of the robot geometry.
This method delegates the geometry drawing to the :meth:`draw_geometry`
method. It transforms the geometry based on the saved initial
transformation from the robot model.
Parameters
----------
link : :class:`compas.robots.Link`, optional
Link instance to create. Defaults to the robot model's root.
Returns
-------
None
"""
if link is None:
link = self.robot.root
for item in itertools.chain(link.visual, link.collision):
if item.geometry.geo:
color = None
if hasattr(item, 'get_color'):
color = item.get_color()
# create native geometry
item.native_geometry = self.draw_geometry(item.geometry.geo, color)
# transform native geometry based on saved init transform
self.transform(item.native_geometry, item.init_transformation)
item.current_transformation = Transformation()
for child_joint in link.joints:
self.create(child_joint.child_link) |
Python | def scale_link(self, link, transformation):
"""Recursive function to apply the scale transformation on each link.
"""
for item in itertools.chain(link.visual, link.collision):
# Some links have only collision geometry, not visual. These meshes
# have not been loaded.
if item.native_geometry:
self.transform(item.native_geometry, transformation)
for child_joint in link.joints:
# Recursive call
self.scale_link(child_joint.child_link, transformation) | def scale_link(self, link, transformation):
"""Recursive function to apply the scale transformation on each link.
"""
for item in itertools.chain(link.visual, link.collision):
# Some links have only collision geometry, not visual. These meshes
# have not been loaded.
if item.native_geometry:
self.transform(item.native_geometry, transformation)
for child_joint in link.joints:
# Recursive call
self.scale_link(child_joint.child_link, transformation) |
Python | def _apply_transformation_on_transformed_link(self, item, transformation):
"""Applies a transformation on a link that is already transformed.
Calculates the relative transformation and applies it to the link
geometry. This is to prevent the recreation of large meshes.
Parameters
----------
item: :class:`compas.robots.Visual` or :class:`compas.robots.Collision`
The visual or collidable object of a link.
transformation: :class:`Transformation`
The (absolute) transformation to apply onto the link's geometry.
Returns
-------
None
"""
relative_transformation = transformation * item.current_transformation.inverse()
self.transform(item.native_geometry, relative_transformation)
item.current_transformation = transformation | def _apply_transformation_on_transformed_link(self, item, transformation):
"""Applies a transformation on a link that is already transformed.
Calculates the relative transformation and applies it to the link
geometry. This is to prevent the recreation of large meshes.
Parameters
----------
item: :class:`compas.robots.Visual` or :class:`compas.robots.Collision`
The visual or collidable object of a link.
transformation: :class:`Transformation`
The (absolute) transformation to apply onto the link's geometry.
Returns
-------
None
"""
relative_transformation = transformation * item.current_transformation.inverse()
self.transform(item.native_geometry, relative_transformation)
item.current_transformation = transformation |
Python | def update(self, configuration, names, visual=True, collision=True):
"""Triggers the update of the robot geometry.
Parameters
----------
configuration : :class:`compas_fab.robots.Configuration`
Instance of the configuration (joint state) to move to.
names : list of string
The names of the configurable joints to update.
visual : bool, optional
``True`` if the visual geometry should be also updated, otherwise ``False``.
Defaults to ``True``.
collision : bool, optional
``True`` if the collision geometry should be also updated, otherwise ``False``.
Defaults to ``True``.
"""
positions = configuration.values
if len(names) != len(positions):
raise ValueError("len(names): %d is not len(positions) %d" % (len(names), len(positions)))
joint_state = dict(zip(names, positions))
transformations = self.robot.compute_transformations(joint_state)
for j in self.robot.iter_joints():
link = j.child_link
for item in link.visual:
self._apply_transformation_on_transformed_link(item, transformations[j.name])
if collision:
for item in link.collision:
# some links have only collision geometry, not visual. These meshes have not been loaded.
if item.native_geometry:
self._apply_transformation_on_transformed_link(item, transformations[j.name]) | def update(self, configuration, names, visual=True, collision=True):
"""Triggers the update of the robot geometry.
Parameters
----------
configuration : :class:`compas_fab.robots.Configuration`
Instance of the configuration (joint state) to move to.
names : list of string
The names of the configurable joints to update.
visual : bool, optional
``True`` if the visual geometry should be also updated, otherwise ``False``.
Defaults to ``True``.
collision : bool, optional
``True`` if the collision geometry should be also updated, otherwise ``False``.
Defaults to ``True``.
"""
positions = configuration.values
if len(names) != len(positions):
raise ValueError("len(names): %d is not len(positions) %d" % (len(names), len(positions)))
joint_state = dict(zip(names, positions))
transformations = self.robot.compute_transformations(joint_state)
for j in self.robot.iter_joints():
link = j.child_link
for item in link.visual:
self._apply_transformation_on_transformed_link(item, transformations[j.name])
if collision:
for item in link.collision:
# some links have only collision geometry, not visual. These meshes have not been loaded.
if item.native_geometry:
self._apply_transformation_on_transformed_link(item, transformations[j.name]) |
Python | def draw_visual(self):
"""Draws all visual geometry of the robot."""
for link in self.robot.iter_links():
for item in link.visual:
yield item.native_geometry | def draw_visual(self):
"""Draws all visual geometry of the robot."""
for link in self.robot.iter_links():
for item in link.visual:
yield item.native_geometry |
Python | def draw_collision(self):
"""Draws all collision geometry of the robot."""
for link in self.robot.iter_links():
for item in link.collision:
if item.native_geometry:
yield item.native_geometry | def draw_collision(self):
"""Draws all collision geometry of the robot."""
for link in self.robot.iter_links():
for item in link.collision:
if item.native_geometry:
yield item.native_geometry |
Python | def forward_kinematics(self, robot):
"""Calculates forward kinematics to get the current end-effector pose.
Args:
robot (:class:`Robot`): Robot instance.
Examples:
>>> from compas_fab.robots import *
>>> with VrepClient() as client:
... frame = client.forward_kinematics(rfl.Robot('A'))
Returns:
An instance of :class:`Frame`.
"""
assert_robot(robot)
_res, _, pose, _, _ = self.run_child_script('getIkTipPose',
[robot.model.attr['index']],
[], [])
return vrep_pose_to_frame(pose, self.scale) | def forward_kinematics(self, robot):
"""Calculates forward kinematics to get the current end-effector pose.
Args:
robot (:class:`Robot`): Robot instance.
Examples:
>>> from compas_fab.robots import *
>>> with VrepClient() as client:
... frame = client.forward_kinematics(rfl.Robot('A'))
Returns:
An instance of :class:`Frame`.
"""
assert_robot(robot)
_res, _, pose, _, _ = self.run_child_script('getIkTipPose',
[robot.model.attr['index']],
[], [])
return vrep_pose_to_frame(pose, self.scale) |
Python | def inverse_kinematics(self, robot, goal_frame, metric_values=None, gantry_joint_limits=None, arm_joint_limits=None, max_trials=None, max_results=1):
"""Calculates inverse kinematics to find valid robot configurations for the specified goal frame.
Args:
robot (:class:`Robot`): Robot instance.
goal_frame (:class:`Frame`): Target or goal frame.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1,
where 1 indicates the axis/joint is blocked and cannot
move during inverse kinematic solving.
gantry_joint_limits (:obj:`list` of `float`): List of 6 floats defining the upper/lower limits of
gantry joints. Use this if you want to restrict the area in which to search for states.
arm_joint_limits (:obj:`list` of `float`): List of 12 floats defining the upper/lower limits of
arm joints. Use this if you want to restrict the working area in which to search for states.
max_trials (:obj:`int`): Number of trials to run. Set to ``None``
to retry infinitely.
max_results (:obj:`int`): Maximum number of result states to return.
Returns:
list: List of :class:`Configuration` objects representing
the collision-free configuration for the ``goal_frame``.
"""
assert_robot(robot)
joints = len(robot.get_configurable_joints())
if not metric_values:
metric_values = [0.1] * joints
self.set_robot_metric(robot, metric_values)
states = self._find_raw_robot_states(robot, frame_to_vrep_pose(goal_frame, self.scale), gantry_joint_limits, arm_joint_limits, max_trials, max_results)
return [config_from_vrep(states[i:i + joints], self.scale)
for i in range(0, len(states), joints)] | def inverse_kinematics(self, robot, goal_frame, metric_values=None, gantry_joint_limits=None, arm_joint_limits=None, max_trials=None, max_results=1):
"""Calculates inverse kinematics to find valid robot configurations for the specified goal frame.
Args:
robot (:class:`Robot`): Robot instance.
goal_frame (:class:`Frame`): Target or goal frame.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1,
where 1 indicates the axis/joint is blocked and cannot
move during inverse kinematic solving.
gantry_joint_limits (:obj:`list` of `float`): List of 6 floats defining the upper/lower limits of
gantry joints. Use this if you want to restrict the area in which to search for states.
arm_joint_limits (:obj:`list` of `float`): List of 12 floats defining the upper/lower limits of
arm joints. Use this if you want to restrict the working area in which to search for states.
max_trials (:obj:`int`): Number of trials to run. Set to ``None``
to retry infinitely.
max_results (:obj:`int`): Maximum number of result states to return.
Returns:
list: List of :class:`Configuration` objects representing
the collision-free configuration for the ``goal_frame``.
"""
assert_robot(robot)
joints = len(robot.get_configurable_joints())
if not metric_values:
metric_values = [0.1] * joints
self.set_robot_metric(robot, metric_values)
states = self._find_raw_robot_states(robot, frame_to_vrep_pose(goal_frame, self.scale), gantry_joint_limits, arm_joint_limits, max_trials, max_results)
return [config_from_vrep(states[i:i + joints], self.scale)
for i in range(0, len(states), joints)] |
Python | def pick_building_member(self, robot, building_member_mesh, pickup_frame, metric_values=None):
"""Picks up a building member and attaches it to the robot.
Args:
robot (:class:`Robot`): Robot instance to use for pick up.
building_member_mesh (:class:`compas.datastructures.Mesh`): Mesh
of the building member that will be attached to the robot.
pickup_pose (:class:`Frame`): Pickup frame.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1,
where 1 indicates the axis/joint is blocked and cannot
move during inverse kinematic solving.
Returns:
int: Object handle (identifier) assigned to the building member.
"""
assert_robot(robot)
joints = len(robot.get_configurable_joints())
if not metric_values:
metric_values = [0.1] * joints
self.set_robot_pose(robot, pickup_frame)
return self.add_building_member(robot, building_member_mesh) | def pick_building_member(self, robot, building_member_mesh, pickup_frame, metric_values=None):
"""Picks up a building member and attaches it to the robot.
Args:
robot (:class:`Robot`): Robot instance to use for pick up.
building_member_mesh (:class:`compas.datastructures.Mesh`): Mesh
of the building member that will be attached to the robot.
pickup_pose (:class:`Frame`): Pickup frame.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1,
where 1 indicates the axis/joint is blocked and cannot
move during inverse kinematic solving.
Returns:
int: Object handle (identifier) assigned to the building member.
"""
assert_robot(robot)
joints = len(robot.get_configurable_joints())
if not metric_values:
metric_values = [0.1] * joints
self.set_robot_pose(robot, pickup_frame)
return self.add_building_member(robot, building_member_mesh) |
Python | def plan_motion_to_config(self, robot, goal_configs, metric_values=None, collision_meshes=None,
planner_id='rrtconnect', trials=1, resolution=0.02,
gantry_joint_limits=None, arm_joint_limits=None, shallow_state_search=True, optimize_path_length=False):
"""Find a path plan to move the selected robot from its current position to one of the `goal_configs`.
This function is useful when it is required to get a path plan that ends in one
specific goal configuration.
Args:
robot (:class:`Robot`): Robot instance to move.
goal_configs (:obj:`list` of :class:`Configuration`): List of target or goal configurations.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1,
where 1 indicates the axis/joint is blocked and cannot
move during inverse kinematic solving.
collision_meshes (:obj:`list` of :class:`compas.datastructures.Mesh`): Collision meshes
to be taken into account when calculating the motion plan.
Defaults to ``None``.
planner_id (:obj:`str`): Name of the planner to use. Defaults to ``rrtconnect``.
trials (:obj:`int`): Number of search trials to run. Defaults to ``1``.
resolution (:obj:`float`): Validity checking resolution. This value
is specified as a fraction of the space's extent.
Defaults to ``0.02``.
gantry_joint_limits (:obj:`list` of `float`): List of 6 floats defining the upper/lower limits of
gantry joints. Use this if you want to restrict the working area of the path planner.
arm_joint_limits (:obj:`list` of `float`): List of 12 floats defining the upper/lower limits of
arm joints. Use this if you want to restrict the working area of the path planner.
shallow_state_search (:obj:`bool`): True to search only a minimum of
valid states before searching a path, False to search states intensively.
optimize_path_length (:obj:`bool`): True to search the path with minimal total length among all `trials`,
False to return the first valid path found. It only affects the output if `trials > 1`.
Returns:
list: List of :class:`Configuration` objects representing the
collision-free path to the ``goal_configs``.
"""
assert_robot(robot)
return self._find_path_plan(robot, {'target_type': 'config', 'target': goal_configs},
metric_values, collision_meshes, planner_id, trials, resolution,
gantry_joint_limits, arm_joint_limits, shallow_state_search, optimize_path_length) | def plan_motion_to_config(self, robot, goal_configs, metric_values=None, collision_meshes=None,
planner_id='rrtconnect', trials=1, resolution=0.02,
gantry_joint_limits=None, arm_joint_limits=None, shallow_state_search=True, optimize_path_length=False):
"""Find a path plan to move the selected robot from its current position to one of the `goal_configs`.
This function is useful when it is required to get a path plan that ends in one
specific goal configuration.
Args:
robot (:class:`Robot`): Robot instance to move.
goal_configs (:obj:`list` of :class:`Configuration`): List of target or goal configurations.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1,
where 1 indicates the axis/joint is blocked and cannot
move during inverse kinematic solving.
collision_meshes (:obj:`list` of :class:`compas.datastructures.Mesh`): Collision meshes
to be taken into account when calculating the motion plan.
Defaults to ``None``.
planner_id (:obj:`str`): Name of the planner to use. Defaults to ``rrtconnect``.
trials (:obj:`int`): Number of search trials to run. Defaults to ``1``.
resolution (:obj:`float`): Validity checking resolution. This value
is specified as a fraction of the space's extent.
Defaults to ``0.02``.
gantry_joint_limits (:obj:`list` of `float`): List of 6 floats defining the upper/lower limits of
gantry joints. Use this if you want to restrict the working area of the path planner.
arm_joint_limits (:obj:`list` of `float`): List of 12 floats defining the upper/lower limits of
arm joints. Use this if you want to restrict the working area of the path planner.
shallow_state_search (:obj:`bool`): True to search only a minimum of
valid states before searching a path, False to search states intensively.
optimize_path_length (:obj:`bool`): True to search the path with minimal total length among all `trials`,
False to return the first valid path found. It only affects the output if `trials > 1`.
Returns:
list: List of :class:`Configuration` objects representing the
collision-free path to the ``goal_configs``.
"""
assert_robot(robot)
return self._find_path_plan(robot, {'target_type': 'config', 'target': goal_configs},
metric_values, collision_meshes, planner_id, trials, resolution,
gantry_joint_limits, arm_joint_limits, shallow_state_search, optimize_path_length) |
Python | def plan_motion(self, robot, goal_frame, metric_values=None, collision_meshes=None,
planner_id='rrtconnect', trials=1, resolution=0.02,
gantry_joint_limits=None, arm_joint_limits=None, shallow_state_search=True, optimize_path_length=False):
"""Find a path plan to move the selected robot from its current position to the `goal_frame`.
Args:
robot (:class:`Robot`): Robot instance to move.
goal_frame (:class:`Frame`): Target or goal frame.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1,
where 1 indicates the axis/joint is blocked and cannot
move during inverse kinematic solving.
collision_meshes (:obj:`list` of :class:`compas.datastructures.Mesh`): Collision meshes
to be taken into account when calculating the motion plan.
Defaults to ``None``.
planner_id (:obj:`str`): Name of the planner to use. Defaults to ``rrtconnect``.
trials (:obj:`int`): Number of search trials to run. Defaults to ``1``.
resolution (:obj:`float`): Validity checking resolution. This value
is specified as a fraction of the space's extent.
Defaults to ``0.02``.
gantry_joint_limits (:obj:`list` of `float`): List of 6 floats defining the upper/lower limits of
gantry joints. Use this if you want to restrict the working area of the path planner.
arm_joint_limits (:obj:`list` of `float`): List of 12 floats defining the upper/lower limits of
arm joints. Use this if you want to restrict the working area of the path planner.
shallow_state_search (:obj:`bool`): True to search only a minimum of
valid states before searching a path, False to search states intensively.
optimize_path_length (:obj:`bool`): True to search the path with minimal total length among all `trials`,
False to return the first valid path found. It only affects the output if `trials > 1`.
Returns:
list: List of :class:`Configuration` objects representing the
collision-free path to the ``goal_frame``.
"""
assert_robot(robot)
return self._find_path_plan(robot, {'target_type': 'pose', 'target': goal_frame},
metric_values, collision_meshes, planner_id, trials, resolution,
gantry_joint_limits, arm_joint_limits, shallow_state_search, optimize_path_length) | def plan_motion(self, robot, goal_frame, metric_values=None, collision_meshes=None,
planner_id='rrtconnect', trials=1, resolution=0.02,
gantry_joint_limits=None, arm_joint_limits=None, shallow_state_search=True, optimize_path_length=False):
"""Find a path plan to move the selected robot from its current position to the `goal_frame`.
Args:
robot (:class:`Robot`): Robot instance to move.
goal_frame (:class:`Frame`): Target or goal frame.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1,
where 1 indicates the axis/joint is blocked and cannot
move during inverse kinematic solving.
collision_meshes (:obj:`list` of :class:`compas.datastructures.Mesh`): Collision meshes
to be taken into account when calculating the motion plan.
Defaults to ``None``.
planner_id (:obj:`str`): Name of the planner to use. Defaults to ``rrtconnect``.
trials (:obj:`int`): Number of search trials to run. Defaults to ``1``.
resolution (:obj:`float`): Validity checking resolution. This value
is specified as a fraction of the space's extent.
Defaults to ``0.02``.
gantry_joint_limits (:obj:`list` of `float`): List of 6 floats defining the upper/lower limits of
gantry joints. Use this if you want to restrict the working area of the path planner.
arm_joint_limits (:obj:`list` of `float`): List of 12 floats defining the upper/lower limits of
arm joints. Use this if you want to restrict the working area of the path planner.
shallow_state_search (:obj:`bool`): True to search only a minimum of
valid states before searching a path, False to search states intensively.
optimize_path_length (:obj:`bool`): True to search the path with minimal total length among all `trials`,
False to return the first valid path found. It only affects the output if `trials > 1`.
Returns:
list: List of :class:`Configuration` objects representing the
collision-free path to the ``goal_frame``.
"""
assert_robot(robot)
return self._find_path_plan(robot, {'target_type': 'pose', 'target': goal_frame},
metric_values, collision_meshes, planner_id, trials, resolution,
gantry_joint_limits, arm_joint_limits, shallow_state_search, optimize_path_length) |
Python | def add_building_member(self, robot, building_member_mesh):
"""Adds a building member to the 3D scene and attaches it to the robot.
Args:
robot (:class:`Robot`): Robot instance to attach the building member to.
building_member_mesh (:class:`compas.datastructures.Mesh`): Mesh
of the building member that will be attached to the robot.
Returns:
int: Object handle (identifier) assigned to the building member.
.. note::
All meshes are automatically removed from the scene when the simulation ends.
"""
assert_robot(robot)
handles = self.add_meshes([building_member_mesh])
if len(handles) != 1:
raise VrepError('Expected one handle, but multiple found=' + str(handles), -1)
handle = handles[0]
parent_handle = self.get_object_handle('customGripper' + robot.name + '_connection')
vrep.simxSetObjectParent(self.client_id, handle, parent_handle, True, DEFAULT_OP_MODE)
return handle | def add_building_member(self, robot, building_member_mesh):
"""Adds a building member to the 3D scene and attaches it to the robot.
Args:
robot (:class:`Robot`): Robot instance to attach the building member to.
building_member_mesh (:class:`compas.datastructures.Mesh`): Mesh
of the building member that will be attached to the robot.
Returns:
int: Object handle (identifier) assigned to the building member.
.. note::
All meshes are automatically removed from the scene when the simulation ends.
"""
assert_robot(robot)
handles = self.add_meshes([building_member_mesh])
if len(handles) != 1:
raise VrepError('Expected one handle, but multiple found=' + str(handles), -1)
handle = handles[0]
parent_handle = self.get_object_handle('customGripper' + robot.name + '_connection')
vrep.simxSetObjectParent(self.client_id, handle, parent_handle, True, DEFAULT_OP_MODE)
return handle |
Python | def add_meshes(self, meshes):
"""Adds meshes to the 3D scene.
Args:
meshes (:obj:`list` of :class:`compas.datastructures.Mesh`): List
of meshes to add to the current simulation scene.
Returns:
list: List of object handles (identifiers) assigned to the meshes.
.. note::
All meshes are automatically removed from the scene when the simulation ends.
"""
mesh_handles = []
for mesh in meshes:
if not mesh.is_trimesh():
raise ValueError('The V-REP client only supports tri-meshes')
vertices, faces = mesh.to_vertices_and_faces()
vrep_packing = (floats_to_vrep([item for sublist in vertices for item in sublist], self.scale) +
[item for sublist in faces for item in sublist])
params = [[len(vertices) * 3, len(faces) * 4], vrep_packing]
handles = self.run_child_script('buildMesh',
params[0],
params[1],
[])[1]
mesh_handles.extend(handles)
self._added_handles.extend(handles)
return mesh_handles | def add_meshes(self, meshes):
"""Adds meshes to the 3D scene.
Args:
meshes (:obj:`list` of :class:`compas.datastructures.Mesh`): List
of meshes to add to the current simulation scene.
Returns:
list: List of object handles (identifiers) assigned to the meshes.
.. note::
All meshes are automatically removed from the scene when the simulation ends.
"""
mesh_handles = []
for mesh in meshes:
if not mesh.is_trimesh():
raise ValueError('The V-REP client only supports tri-meshes')
vertices, faces = mesh.to_vertices_and_faces()
vrep_packing = (floats_to_vrep([item for sublist in vertices for item in sublist], self.scale) +
[item for sublist in faces for item in sublist])
params = [[len(vertices) * 3, len(faces) * 4], vrep_packing]
handles = self.run_child_script('buildMesh',
params[0],
params[1],
[])[1]
mesh_handles.extend(handles)
self._added_handles.extend(handles)
return mesh_handles |
Python | def remove_objects(self, object_handles):
"""Removes objects from the 3D scene.
Args:
object_handles (:obj:`list` of :obj:`int`): Object handles to remove.
.. note::
Please note there's no need to clean up objects manually after the simulation
has completed, as those will be reset automatically anyway. This method is
only useful if you need to remove objects *during* a simulation.
"""
for handle in object_handles:
vrep.simxRemoveObject(self.client_id, handle, DEFAULT_OP_MODE)
self._added_handles = filter(lambda x: x not in object_handles, self._added_handles) | def remove_objects(self, object_handles):
"""Removes objects from the 3D scene.
Args:
object_handles (:obj:`list` of :obj:`int`): Object handles to remove.
.. note::
Please note there's no need to clean up objects manually after the simulation
has completed, as those will be reset automatically anyway. This method is
only useful if you need to remove objects *during* a simulation.
"""
for handle in object_handles:
vrep.simxRemoveObject(self.client_id, handle, DEFAULT_OP_MODE)
self._added_handles = filter(lambda x: x not in object_handles, self._added_handles) |
Python | def basic(cls, name, joints=[], links=[], materials=[], **kwargs):
"""Convenience method to create the most basic instance of a robot, based only on a name.
Parameters
----------
name : str
Name of the robot
Returns
-------
:class:`Robot`
Newly created instance of a robot.
"""
model = RobotModel(name, joints=joints, links=links,
materials=materials, **kwargs)
return cls(model, None) | def basic(cls, name, joints=[], links=[], materials=[], **kwargs):
"""Convenience method to create the most basic instance of a robot, based only on a name.
Parameters
----------
name : str
Name of the robot
Returns
-------
:class:`Robot`
Newly created instance of a robot.
"""
model = RobotModel(name, joints=joints, links=links,
materials=materials, **kwargs)
return cls(model, None) |
Python | def _get_current_base_frame(self, full_configuration, group):
"""Returns the group's current base frame, if the robot is in full_configuration.
The base_frame of a planning group can change if a parent joint was
transformed. This function performs a forward kinematic request with the
full configuration to retrieve the (possibly) transformed base_frame of
planning group. This function is only used in plan_motion since other
services, such as ik or plan_cartesian_motion, do not use the
transformed base_frame as the group's local coordinate system.
Parameters
----------
full_configuration : :class:`compas_fab.robots.Configuration`
The (full) configuration from which the group's base frame is
calculated.
group : str
The planning group for which we want to get the transformed base frame.
Returns
-------
:class:`compas.geometry.Frame`
Examples
--------
"""
base_link = self.get_base_link_name(group)
# the group's original base_frame
base_frame = self.get_base_frame(group)
joint_names = self.get_configurable_joint_names()
joint_positions = self._get_scaled_joint_positions_from_start_configuration(full_configuration)
# ideally we would call this with the planning group that includes all
# configurable joints, but we cannot be sure that this group exists.
# That's why we have to do the workaround with the Transformation.
joint_state = dict(zip(joint_names, joint_positions))
base_frame_WCF = self.model.forward_kinematics(joint_state, link_name=base_link)
base_frame_RCF = self.represent_frame_in_RCF(base_frame_WCF, group)
base_frame_RCF.point *= self.scale_factor
T = Transformation.from_frame(base_frame)
return base_frame_RCF.transformed(T) | def _get_current_base_frame(self, full_configuration, group):
"""Returns the group's current base frame, if the robot is in full_configuration.
The base_frame of a planning group can change if a parent joint was
transformed. This function performs a forward kinematic request with the
full configuration to retrieve the (possibly) transformed base_frame of
planning group. This function is only used in plan_motion since other
services, such as ik or plan_cartesian_motion, do not use the
transformed base_frame as the group's local coordinate system.
Parameters
----------
full_configuration : :class:`compas_fab.robots.Configuration`
The (full) configuration from which the group's base frame is
calculated.
group : str
The planning group for which we want to get the transformed base frame.
Returns
-------
:class:`compas.geometry.Frame`
Examples
--------
"""
base_link = self.get_base_link_name(group)
# the group's original base_frame
base_frame = self.get_base_frame(group)
joint_names = self.get_configurable_joint_names()
joint_positions = self._get_scaled_joint_positions_from_start_configuration(full_configuration)
# ideally we would call this with the planning group that includes all
# configurable joints, but we cannot be sure that this group exists.
# That's why we have to do the workaround with the Transformation.
joint_state = dict(zip(joint_names, joint_positions))
base_frame_WCF = self.model.forward_kinematics(joint_state, link_name=base_link)
base_frame_RCF = self.represent_frame_in_RCF(base_frame_WCF, group)
base_frame_RCF.point *= self.scale_factor
T = Transformation.from_frame(base_frame)
return base_frame_RCF.transformed(T) |
Python | def init_configuration(self, group=None):
"""Returns the init joint configuration.
Examples
--------
>>> robot.init_configuration('manipulator')
Configuration((0.000, 0.000, 0.000, 0.000, 0.000, 0.000), (0, 0, 0, 0, 0, 0))
"""
types = [joint.type for joint in self.get_configurable_joints(group)]
positions = [0.] * len(types)
return Configuration(positions, types) | def init_configuration(self, group=None):
"""Returns the init joint configuration.
Examples
--------
>>> robot.init_configuration('manipulator')
Configuration((0.000, 0.000, 0.000, 0.000, 0.000, 0.000), (0, 0, 0, 0, 0, 0))
"""
types = [joint.type for joint in self.get_configurable_joints(group)]
positions = [0.] * len(types)
return Configuration(positions, types) |
Python | def random_configuration(self, group=None):
"""Returns a random configuration.
Note that no collision checking is involved, so the configuration may be invalid.
"""
configurable_joints = self.get_configurable_joints(group)
values = []
types = [j.type for j in configurable_joints]
for joint in configurable_joints:
if joint.limit:
values.append(joint.limit.lower + (joint.limit.upper - joint.limit.lower) * random.random())
else:
values.append(0)
return Configuration(values, types) | def random_configuration(self, group=None):
"""Returns a random configuration.
Note that no collision checking is involved, so the configuration may be invalid.
"""
configurable_joints = self.get_configurable_joints(group)
values = []
types = [j.type for j in configurable_joints]
for joint in configurable_joints:
if joint.limit:
values.append(joint.limit.lower + (joint.limit.upper - joint.limit.lower) * random.random())
else:
values.append(0)
return Configuration(values, types) |
Python | def _scale_joint_values(self, values, scale_factor, group=None):
"""Scales the scaleable joint values with scale_factor.
"""
joints = self.get_configurable_joints(group)
if len(joints) != len(values):
raise ValueError("Expected %d values for group %s, but received only %d." % (
len(joints), group, len(values)))
values_scaled = []
for v, j in zip(values, joints):
if j.is_scalable():
v *= scale_factor
values_scaled.append(v)
return values_scaled | def _scale_joint_values(self, values, scale_factor, group=None):
"""Scales the scaleable joint values with scale_factor.
"""
joints = self.get_configurable_joints(group)
if len(joints) != len(values):
raise ValueError("Expected %d values for group %s, but received only %d." % (
len(joints), group, len(values)))
values_scaled = []
for v, j in zip(values, joints):
if j.is_scalable():
v *= scale_factor
values_scaled.append(v)
return values_scaled |
Python | def _get_scaled_joint_positions_from_start_configuration(self, start_configuration=None):
"""Checks the start configuration and returns joint_positions.
"""
joint_names = self.get_configurable_joint_names() # full configuration
joint_positions = [0] * len(joint_names)
if start_configuration:
if len(joint_names) != len(start_configuration.values):
raise ValueError("Please pass a configuration with %d values" % len(joint_names))
joint_positions = start_configuration.values
# scale the prismatic joints
joint_positions = self._scale_joint_values(
joint_positions, 1. / self.scale_factor)
return joint_positions | def _get_scaled_joint_positions_from_start_configuration(self, start_configuration=None):
"""Checks the start configuration and returns joint_positions.
"""
joint_names = self.get_configurable_joint_names() # full configuration
joint_positions = [0] * len(joint_names)
if start_configuration:
if len(joint_names) != len(start_configuration.values):
raise ValueError("Please pass a configuration with %d values" % len(joint_names))
joint_positions = start_configuration.values
# scale the prismatic joints
joint_positions = self._scale_joint_values(
joint_positions, 1. / self.scale_factor)
return joint_positions |
Python | def _get_current_transformation_WCF_RCF(self, full_configuration, group):
"""Returns the group's current WCF to RCF transformation, if the robot is in full_configuration.
The base_frame of a planning group can change if a parent joint was
transformed. This function performs a forward kinematic request with the
full configuration to retrieve the (possibly) transformed base_frame of
planning group. This function is only used in plan_motion since other
services, such as ik or plan_cartesian_motion, do not use the
transformed base_frame as the group's local coordinate system.
Parameters
----------
full_configuration : :class:`compas_fab.robots.Configuration`
The (full) configuration from which the group's base frame is
calculated.
group : str
The planning group for which we want to get the transformed base frame.
Returns
-------
:class:`compas.geometry.Transformation`
"""
base_frame = self._get_current_base_frame(full_configuration, group)
return Transformation.from_frame_to_frame(base_frame, Frame.worldXY()) | def _get_current_transformation_WCF_RCF(self, full_configuration, group):
"""Returns the group's current WCF to RCF transformation, if the robot is in full_configuration.
The base_frame of a planning group can change if a parent joint was
transformed. This function performs a forward kinematic request with the
full configuration to retrieve the (possibly) transformed base_frame of
planning group. This function is only used in plan_motion since other
services, such as ik or plan_cartesian_motion, do not use the
transformed base_frame as the group's local coordinate system.
Parameters
----------
full_configuration : :class:`compas_fab.robots.Configuration`
The (full) configuration from which the group's base frame is
calculated.
group : str
The planning group for which we want to get the transformed base frame.
Returns
-------
:class:`compas.geometry.Transformation`
"""
base_frame = self._get_current_base_frame(full_configuration, group)
return Transformation.from_frame_to_frame(base_frame, Frame.worldXY()) |
Python | def orientation_constraint_from_frame(self, frame_WCF, tolerances_axes,
group=None):
"""Returns an orientation constraint on the group's end-effector link.
Parameters
----------
frame_WCF: :class:`compas.geometry.Frame`
The frame from which we create the orientation constraint.
tolerances_axes: list of float
Error tolerances ti for each of the frame's axes in radians. If only
one value is passed it will be uses for all 3 axes.
group: str
The planning group for which we specify the constraint. Defaults to
the robot's main planning group.
Examples
--------
>>> frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
>>> tolerances_axes = [math.radians(1)] * 3
>>> group = robot.main_group_name
>>> robot.orientation_constraint_from_frame(frame, tolerances_axes, group=group)
OrientationConstraint('ee_link', [0.5, 0.5, 0.5, 0.5], [0.017453292519943295, 0.017453292519943295, 0.017453292519943295], 1.0)
Notes
-----
If you specify the tolerances_axes vector with [0.01, 0.01, 6.3], it
means that the frame's x-axis and y-axis are allowed to rotate about the
z-axis by an angle of 6.3 radians, whereas the z-axis would only rotate
by 0.01.
"""
ee_link = self.get_end_effector_link_name(group)
tolerances_axes = list(tolerances_axes)
if len(tolerances_axes) == 1:
tolerances_axes *= 3
elif len(tolerances_axes) != 3:
raise ValueError("Must give either one or 3 values")
return OrientationConstraint(ee_link, frame_WCF.quaternion, tolerances_axes) | def orientation_constraint_from_frame(self, frame_WCF, tolerances_axes,
group=None):
"""Returns an orientation constraint on the group's end-effector link.
Parameters
----------
frame_WCF: :class:`compas.geometry.Frame`
The frame from which we create the orientation constraint.
tolerances_axes: list of float
Error tolerances ti for each of the frame's axes in radians. If only
one value is passed it will be uses for all 3 axes.
group: str
The planning group for which we specify the constraint. Defaults to
the robot's main planning group.
Examples
--------
>>> frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
>>> tolerances_axes = [math.radians(1)] * 3
>>> group = robot.main_group_name
>>> robot.orientation_constraint_from_frame(frame, tolerances_axes, group=group)
OrientationConstraint('ee_link', [0.5, 0.5, 0.5, 0.5], [0.017453292519943295, 0.017453292519943295, 0.017453292519943295], 1.0)
Notes
-----
If you specify the tolerances_axes vector with [0.01, 0.01, 6.3], it
means that the frame's x-axis and y-axis are allowed to rotate about the
z-axis by an angle of 6.3 radians, whereas the z-axis would only rotate
by 0.01.
"""
ee_link = self.get_end_effector_link_name(group)
tolerances_axes = list(tolerances_axes)
if len(tolerances_axes) == 1:
tolerances_axes *= 3
elif len(tolerances_axes) != 3:
raise ValueError("Must give either one or 3 values")
return OrientationConstraint(ee_link, frame_WCF.quaternion, tolerances_axes) |
Python | def position_constraint_from_frame(self, frame_WCF, tolerance_position, group=None):
"""Returns a position and orientation constraint on the group's end-effector link.
Parameters
----------
frame_WCF : :class:`compas.geometry.Frame`
The frame from which we create position and orientation constraints.
tolerance_position : float
The allowed tolerance to the frame's position. (Defined in the
robot's units)
group: str
The planning group for which we specify the constraint. Defaults to
the robot's main planning group.
Examples
--------
>>> frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
>>> tolerance_position = 0.001
>>> robot.position_constraint_from_frame(frame, tolerance_position)
PositionConstraint('ee_link', BoundingVolume(2, Sphere(Point(0.400, 0.300, 0.400), 0.001)), 1.0)
Notes
-----
There are many other possibilities of how to create a position and
orientation constraints. Checkout :class:`compas_fab.robots.PositionConstraint`
and :class:`compas_fab.robots.OrientationConstraint`.
"""
ee_link = self.get_end_effector_link_name(group)
sphere = Sphere(frame_WCF.point, tolerance_position)
return PositionConstraint.from_sphere(ee_link, sphere) | def position_constraint_from_frame(self, frame_WCF, tolerance_position, group=None):
"""Returns a position and orientation constraint on the group's end-effector link.
Parameters
----------
frame_WCF : :class:`compas.geometry.Frame`
The frame from which we create position and orientation constraints.
tolerance_position : float
The allowed tolerance to the frame's position. (Defined in the
robot's units)
group: str
The planning group for which we specify the constraint. Defaults to
the robot's main planning group.
Examples
--------
>>> frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
>>> tolerance_position = 0.001
>>> robot.position_constraint_from_frame(frame, tolerance_position)
PositionConstraint('ee_link', BoundingVolume(2, Sphere(Point(0.400, 0.300, 0.400), 0.001)), 1.0)
Notes
-----
There are many other possibilities of how to create a position and
orientation constraints. Checkout :class:`compas_fab.robots.PositionConstraint`
and :class:`compas_fab.robots.OrientationConstraint`.
"""
ee_link = self.get_end_effector_link_name(group)
sphere = Sphere(frame_WCF.point, tolerance_position)
return PositionConstraint.from_sphere(ee_link, sphere) |
Python | def constraints_from_frame(self, frame_WCF, tolerance_position, tolerances_axes, group=None):
"""Returns a position and orientation constraint on the group's end-effector link.
Parameters
----------
frame_WCF: :class:`compas.geometry.Frame`
The frame from which we create position and orientation constraints.
tolerance_position: float
The allowed tolerance to the frame's position. (Defined in the
robot's units)
tolerances_axes: list of float
Error tolerances ti for each of the frame's axes in radians. If only
one value is passed it will be uses for all 3 axes.
group: str
The planning group for which we specify the constraint. Defaults to
the robot's main planning group.
Examples
--------
>>> frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
>>> tolerance_position = 0.001
>>> tolerances_axes = [math.radians(1)]
>>> group = robot.main_group_name
>>> robot.constraints_from_frame(frame, tolerance_position, tolerances_axes, group)
[PositionConstraint('ee_link', BoundingVolume(2, Sphere(Point(0.400, 0.300, 0.400), 0.001)), 1.0), OrientationConstraint('ee_link', [0.5, 0.5, 0.5, 0.5], [0.017453292519943295, 0.017453292519943295, 0.017453292519943295], 1.0)]
Notes
-----
There are many other possibilities of how to create a position and
orientation constraint. Checkout :class:`compas_fab.robots.PositionConstraint`
and :class:`compas_fab.robots.OrientationConstraint`.
"""
pc = self.position_constraint_from_frame(frame_WCF, tolerance_position, group)
oc = self.orientation_constraint_from_frame(frame_WCF, tolerances_axes, group)
return [pc, oc] | def constraints_from_frame(self, frame_WCF, tolerance_position, tolerances_axes, group=None):
"""Returns a position and orientation constraint on the group's end-effector link.
Parameters
----------
frame_WCF: :class:`compas.geometry.Frame`
The frame from which we create position and orientation constraints.
tolerance_position: float
The allowed tolerance to the frame's position. (Defined in the
robot's units)
tolerances_axes: list of float
Error tolerances ti for each of the frame's axes in radians. If only
one value is passed it will be uses for all 3 axes.
group: str
The planning group for which we specify the constraint. Defaults to
the robot's main planning group.
Examples
--------
>>> frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
>>> tolerance_position = 0.001
>>> tolerances_axes = [math.radians(1)]
>>> group = robot.main_group_name
>>> robot.constraints_from_frame(frame, tolerance_position, tolerances_axes, group)
[PositionConstraint('ee_link', BoundingVolume(2, Sphere(Point(0.400, 0.300, 0.400), 0.001)), 1.0), OrientationConstraint('ee_link', [0.5, 0.5, 0.5, 0.5], [0.017453292519943295, 0.017453292519943295, 0.017453292519943295], 1.0)]
Notes
-----
There are many other possibilities of how to create a position and
orientation constraint. Checkout :class:`compas_fab.robots.PositionConstraint`
and :class:`compas_fab.robots.OrientationConstraint`.
"""
pc = self.position_constraint_from_frame(frame_WCF, tolerance_position, group)
oc = self.orientation_constraint_from_frame(frame_WCF, tolerances_axes, group)
return [pc, oc] |
Python | def constraints_from_configuration(self, configuration, tolerances, group=None):
"""Returns joint constraints on all joints of the configuration.
Parameters
----------
configuration: :class:`compas_fab.robots.Configuration`
The target configuration.
tolerances: list of float
The tolerances (as +/-) on each of the joints defining the bound in radian
to be achieved. If only one value is passed it will be used to create
bounds for all joint constraints.
group: str, optional
The planning group for which we specify the constraint. Defaults to
the robot's main planning group.
Examples
--------
>>> configuration = Configuration.from_revolute_values([-0.042, 4.295, -4.110, -3.327, 4.755, 0.])
>>> tolerances = [math.radians(5)] * 6
>>> group = robot.main_group_name
>>> robot.constraints_from_configuration(configuration, tolerances, group)
[JointConstraint('shoulder_pan_joint', -0.042, 0.08726646259971647, 1.0), JointConstraint('shoulder_lift_joint', 4.295, 0.08726646259971647, 1.0), JointConstraint('elbow_joint', -4.11, 0.08726646259971647, 1.0), JointConstraint('wrist_1_joint', -3.327, 0.08726646259971647, 1.0), JointConstraint('wrist_2_joint', 4.755, 0.08726646259971647, 1.0), JointConstraint('wrist_3_joint', 0.0, 0.08726646259971647, 1.0)]
Raises
------
ValueError
If the passed configuration does not correspond to the group.
ValueError
If the passed tolerances have a different length than the configuration.
Notes
-----
Check for using the correct tolerance units for prismatic and revolute
joints.
"""
if not group:
group = self.main_group_name
joint_names = self.get_configurable_joint_names(group)
if len(joint_names) != len(configuration.values):
raise ValueError("The passed configuration has %d values, the group %s needs however: %d" % (
len(configuration.values), group, len(joint_names)))
if len(tolerances) == 1:
tolerances = tolerances * len(joint_names)
elif len(tolerances) != len(configuration.values):
raise ValueError("The passed configuration has %d values, the tolerances however: %d" % (
len(configuration.values), len(tolerances)))
constraints = []
for name, value, tolerance in zip(joint_names, configuration.values, tolerances):
constraints.append(JointConstraint(name, value, tolerance))
return constraints | def constraints_from_configuration(self, configuration, tolerances, group=None):
"""Returns joint constraints on all joints of the configuration.
Parameters
----------
configuration: :class:`compas_fab.robots.Configuration`
The target configuration.
tolerances: list of float
The tolerances (as +/-) on each of the joints defining the bound in radian
to be achieved. If only one value is passed it will be used to create
bounds for all joint constraints.
group: str, optional
The planning group for which we specify the constraint. Defaults to
the robot's main planning group.
Examples
--------
>>> configuration = Configuration.from_revolute_values([-0.042, 4.295, -4.110, -3.327, 4.755, 0.])
>>> tolerances = [math.radians(5)] * 6
>>> group = robot.main_group_name
>>> robot.constraints_from_configuration(configuration, tolerances, group)
[JointConstraint('shoulder_pan_joint', -0.042, 0.08726646259971647, 1.0), JointConstraint('shoulder_lift_joint', 4.295, 0.08726646259971647, 1.0), JointConstraint('elbow_joint', -4.11, 0.08726646259971647, 1.0), JointConstraint('wrist_1_joint', -3.327, 0.08726646259971647, 1.0), JointConstraint('wrist_2_joint', 4.755, 0.08726646259971647, 1.0), JointConstraint('wrist_3_joint', 0.0, 0.08726646259971647, 1.0)]
Raises
------
ValueError
If the passed configuration does not correspond to the group.
ValueError
If the passed tolerances have a different length than the configuration.
Notes
-----
Check for using the correct tolerance units for prismatic and revolute
joints.
"""
if not group:
group = self.main_group_name
joint_names = self.get_configurable_joint_names(group)
if len(joint_names) != len(configuration.values):
raise ValueError("The passed configuration has %d values, the group %s needs however: %d" % (
len(configuration.values), group, len(joint_names)))
if len(tolerances) == 1:
tolerances = tolerances * len(joint_names)
elif len(tolerances) != len(configuration.values):
raise ValueError("The passed configuration has %d values, the tolerances however: %d" % (
len(configuration.values), len(tolerances)))
constraints = []
for name, value, tolerance in zip(joint_names, configuration.values, tolerances):
constraints.append(JointConstraint(name, value, tolerance))
return constraints |
Python | def inverse_kinematics(self, frame_WCF, start_configuration=None,
group=None, avoid_collisions=True,
constraints=None, attempts=8,
attached_collision_meshes=None):
"""Calculate the robot's inverse kinematic for a given frame.
Parameters
----------
frame: :class:`compas.geometry.Frame`
The frame to calculate the inverse for.
start_configuration: :class:`compas_fab.robots.Configuration`, optional
If passed, the inverse will be calculated such that the calculated
joint positions differ the least from the start_configuration.
Defaults to the init configuration.
group: str, optional
The planning group used for calculation. Defaults to the robot's
main planning group.
avoid_collisions: bool, optional
Whether or not to avoid collisions. Defaults to True.
constraints: list of :class:`compas_fab.robots.Constraint`, optional
A set of constraints that the request must obey. Defaults to None.
attempts: int, optional
The maximum number of inverse kinematic attempts. Defaults to 8.
attached_collision_meshes: list of :class:`compas_fab.robots.AttachedCollisionMesh`
Defaults to None.
Raises
------
compas_fab.backends.exceptions.BackendError
If no configuration can be found.
Returns
-------
:class:`compas_fab.robots.Configuration`
The planning group's configuration.
Examples
--------
>>> frame_WCF = Frame([0.3, 0.1, 0.5], [1, 0, 0], [0, 1, 0])
>>> start_configuration = robot.init_configuration()
>>> group = robot.main_group_name
>>> robot.inverse_kinematics(frame_WCF, start_configuration, group) # doctest: +SKIP
Configuration((4.045, 5.130, -2.174, -6.098, -5.616, 6.283), (0, 0, 0, 0, 0, 0)) # doctest: +SKIP
"""
self.ensure_client()
if not group:
group = self.main_group_name # ensure semantics
base_link = self.get_base_link_name(group)
joint_names = self.get_configurable_joint_names()
joint_positions = self._get_scaled_joint_positions_from_start_configuration(
start_configuration)
# represent in RCF
frame_RCF = self.represent_frame_in_RCF(frame_WCF, group)
frame_RCF.point /= self.scale_factor # must be in meters
joint_positions = self.client.inverse_kinematics(frame_RCF, base_link,
group, joint_names, joint_positions,
avoid_collisions, constraints, attempts,
attached_collision_meshes)
joint_positions = self._scale_joint_values(joint_positions, self.scale_factor)
# full configuration # TODO group config?
configuration = Configuration(joint_positions, self.get_configurable_joint_types())
return self.get_group_configuration(group, configuration) | def inverse_kinematics(self, frame_WCF, start_configuration=None,
group=None, avoid_collisions=True,
constraints=None, attempts=8,
attached_collision_meshes=None):
"""Calculate the robot's inverse kinematic for a given frame.
Parameters
----------
frame: :class:`compas.geometry.Frame`
The frame to calculate the inverse for.
start_configuration: :class:`compas_fab.robots.Configuration`, optional
If passed, the inverse will be calculated such that the calculated
joint positions differ the least from the start_configuration.
Defaults to the init configuration.
group: str, optional
The planning group used for calculation. Defaults to the robot's
main planning group.
avoid_collisions: bool, optional
Whether or not to avoid collisions. Defaults to True.
constraints: list of :class:`compas_fab.robots.Constraint`, optional
A set of constraints that the request must obey. Defaults to None.
attempts: int, optional
The maximum number of inverse kinematic attempts. Defaults to 8.
attached_collision_meshes: list of :class:`compas_fab.robots.AttachedCollisionMesh`
Defaults to None.
Raises
------
compas_fab.backends.exceptions.BackendError
If no configuration can be found.
Returns
-------
:class:`compas_fab.robots.Configuration`
The planning group's configuration.
Examples
--------
>>> frame_WCF = Frame([0.3, 0.1, 0.5], [1, 0, 0], [0, 1, 0])
>>> start_configuration = robot.init_configuration()
>>> group = robot.main_group_name
>>> robot.inverse_kinematics(frame_WCF, start_configuration, group) # doctest: +SKIP
Configuration((4.045, 5.130, -2.174, -6.098, -5.616, 6.283), (0, 0, 0, 0, 0, 0)) # doctest: +SKIP
"""
self.ensure_client()
if not group:
group = self.main_group_name # ensure semantics
base_link = self.get_base_link_name(group)
joint_names = self.get_configurable_joint_names()
joint_positions = self._get_scaled_joint_positions_from_start_configuration(
start_configuration)
# represent in RCF
frame_RCF = self.represent_frame_in_RCF(frame_WCF, group)
frame_RCF.point /= self.scale_factor # must be in meters
joint_positions = self.client.inverse_kinematics(frame_RCF, base_link,
group, joint_names, joint_positions,
avoid_collisions, constraints, attempts,
attached_collision_meshes)
joint_positions = self._scale_joint_values(joint_positions, self.scale_factor)
# full configuration # TODO group config?
configuration = Configuration(joint_positions, self.get_configurable_joint_types())
return self.get_group_configuration(group, configuration) |
Python | def forward_kinematics(self, configuration, group=None, backend=None, link_name=None):
"""Calculate the robot's forward kinematic.
Parameters
----------
configuration : :class:`compas_fab.robots.Configuration`
The configuration to calculate the forward kinematic for.
group : str, optional
The planning group used for the calculation. Defaults to the robot's
main planning group.
backend : None or str
If `None` calculates fk with the client if it exists or with the robot model.
If 'model' use the robot model to calculate fk. Anything else is open
for implementation, possibly 'kdl', 'ikfast'
link_name : str, optional
The name of the link to calculate the forward kinematics for.
Defaults to the group's end effector link.
Returns
-------
:class:`Frame`
The frame in the robot's coordinate system (RCF).
Examples
--------
>>> configuration = Configuration.from_revolute_values([-2.238, -1.153, -2.174, 0.185, 0.667, 0.000])
>>> group = robot.main_group_name
>>> frame_RCF_c = robot.forward_kinematics(configuration, group)
>>> frame_RCF_m = robot.forward_kinematics(configuration, group, backend='model')
>>> frame_RCF_c == frame_RCF_m
True
>>> frame_WCF = robot.represent_frame_in_WCF(frame_RCF_m, group)
>>> frame_WCF
Frame(Point(0.300, 0.100, 0.500), Vector(1.000, -0.000, -0.000), Vector(0.000, 1.000, -0.000))
"""
if not group:
group = self.main_group_name
if link_name is None:
link_name = self.get_end_effector_link_name(group)
else:
# check
if link_name not in self.get_link_names(group):
raise ValueError("Link name %s does not exist in planning group" % link_name)
full_configuration = self.merge_group_with_full_configuration(configuration, self.init_configuration(), group)
full_joint_positions = self._get_scaled_joint_positions_from_start_configuration(full_configuration)
full_joint_names = self.get_configurable_joint_names()
base_link_name = self.get_base_link_name(group)
group_configuration = self.get_group_configuration(group, full_configuration)
group_joint_names = self.get_configurable_joint_names(group)
group_joint_state = dict(zip(group_joint_names, group_configuration.values)) # assuming configuration is group configuration
if not backend:
if self.client:
frame_RCF = self.client.forward_kinematics(full_joint_positions, base_link_name, group, full_joint_names, link_name)
frame_RCF.point *= self.scale_factor
else:
frame_WCF = self.model.forward_kinematics(group_joint_state, link_name)
frame_RCF = self.represent_frame_in_RCF(frame_WCF, group)
elif backend == 'model':
frame_WCF = self.model.forward_kinematics(group_joint_state, link_name)
frame_RCF = self.represent_frame_in_RCF(frame_WCF, group)
else:
# pass to backend, kdl, ikfast,...
raise NotImplementedError
return frame_RCF | def forward_kinematics(self, configuration, group=None, backend=None, link_name=None):
"""Calculate the robot's forward kinematic.
Parameters
----------
configuration : :class:`compas_fab.robots.Configuration`
The configuration to calculate the forward kinematic for.
group : str, optional
The planning group used for the calculation. Defaults to the robot's
main planning group.
backend : None or str
If `None` calculates fk with the client if it exists or with the robot model.
If 'model' use the robot model to calculate fk. Anything else is open
for implementation, possibly 'kdl', 'ikfast'
link_name : str, optional
The name of the link to calculate the forward kinematics for.
Defaults to the group's end effector link.
Returns
-------
:class:`Frame`
The frame in the robot's coordinate system (RCF).
Examples
--------
>>> configuration = Configuration.from_revolute_values([-2.238, -1.153, -2.174, 0.185, 0.667, 0.000])
>>> group = robot.main_group_name
>>> frame_RCF_c = robot.forward_kinematics(configuration, group)
>>> frame_RCF_m = robot.forward_kinematics(configuration, group, backend='model')
>>> frame_RCF_c == frame_RCF_m
True
>>> frame_WCF = robot.represent_frame_in_WCF(frame_RCF_m, group)
>>> frame_WCF
Frame(Point(0.300, 0.100, 0.500), Vector(1.000, -0.000, -0.000), Vector(0.000, 1.000, -0.000))
"""
if not group:
group = self.main_group_name
if link_name is None:
link_name = self.get_end_effector_link_name(group)
else:
# check
if link_name not in self.get_link_names(group):
raise ValueError("Link name %s does not exist in planning group" % link_name)
full_configuration = self.merge_group_with_full_configuration(configuration, self.init_configuration(), group)
full_joint_positions = self._get_scaled_joint_positions_from_start_configuration(full_configuration)
full_joint_names = self.get_configurable_joint_names()
base_link_name = self.get_base_link_name(group)
group_configuration = self.get_group_configuration(group, full_configuration)
group_joint_names = self.get_configurable_joint_names(group)
group_joint_state = dict(zip(group_joint_names, group_configuration.values)) # assuming configuration is group configuration
if not backend:
if self.client:
frame_RCF = self.client.forward_kinematics(full_joint_positions, base_link_name, group, full_joint_names, link_name)
frame_RCF.point *= self.scale_factor
else:
frame_WCF = self.model.forward_kinematics(group_joint_state, link_name)
frame_RCF = self.represent_frame_in_RCF(frame_WCF, group)
elif backend == 'model':
frame_WCF = self.model.forward_kinematics(group_joint_state, link_name)
frame_RCF = self.represent_frame_in_RCF(frame_WCF, group)
else:
# pass to backend, kdl, ikfast,...
raise NotImplementedError
return frame_RCF |
Python | def forward_kinematics_robot_model(self, configuration, group=None, link_name=None):
"""Calculate the robot's forward kinematic with the robot model.
Parameters
----------
configuration : :class:`compas_fab.robots.Configuration`
The configuration to calculate the forward kinematic for.
group : str, optional
The planning group used for the calculation. Defaults to the robot's
main planning group.
link_name : str
The name of the link to calculate the forward kinematics for.
Defaults to the group's end effector link.
Examples
--------
>>> configuration = Configuration.from_revolute_values([-2.238, -1.153, -2.174, 0.185, 0.667, 0.000])
>>> group = robot.main_group_name
>>> frame_WCF = robot.forward_kinematics(configuration, group)
>>> frame_WCF
Frame(Point(0.300, 0.100, 0.500), Vector(1.000, -0.000, -0.000), Vector(0.000, 1.000, -0.000))
"""
if link_name is None:
link_name = self.get_end_effector_link_name(group)
joint_names = self.get_configurable_joint_names(group)
joint_state = dict(zip(joint_names, configuration.values))
return self.model.forward_kinematics(joint_state, link_name) | def forward_kinematics_robot_model(self, configuration, group=None, link_name=None):
"""Calculate the robot's forward kinematic with the robot model.
Parameters
----------
configuration : :class:`compas_fab.robots.Configuration`
The configuration to calculate the forward kinematic for.
group : str, optional
The planning group used for the calculation. Defaults to the robot's
main planning group.
link_name : str
The name of the link to calculate the forward kinematics for.
Defaults to the group's end effector link.
Examples
--------
>>> configuration = Configuration.from_revolute_values([-2.238, -1.153, -2.174, 0.185, 0.667, 0.000])
>>> group = robot.main_group_name
>>> frame_WCF = robot.forward_kinematics(configuration, group)
>>> frame_WCF
Frame(Point(0.300, 0.100, 0.500), Vector(1.000, -0.000, -0.000), Vector(0.000, 1.000, -0.000))
"""
if link_name is None:
link_name = self.get_end_effector_link_name(group)
joint_names = self.get_configurable_joint_names(group)
joint_state = dict(zip(joint_names, configuration.values))
return self.model.forward_kinematics(joint_state, link_name) |
Python | def plan_cartesian_motion(self, frames_WCF, start_configuration=None,
max_step=0.01, jump_threshold=1.57,
avoid_collisions=True, group=None,
path_constraints=None,
attached_collision_meshes=None):
"""Calculates a cartesian motion path (linear in tool space).
Parameters
----------
frames_WCF: list of :class:`compas.geometry.Frame`
The frames through which the path is defined.
start_configuration: :class:`Configuration`, optional
The robot's configuration at the starting position. Defaults to the
zero configuration.
max_step: float
The approximate distance between the calculated points. (Defined in
the robot's units)
jump_threshold: float
The maximum allowed distance of joint positions between consecutive
points. If the distance is found to be above this threshold, the
path computation fails. It must be specified in relation to max_step.
If this theshhold is 0, 'jumps' might occur, resulting in an invalid
cartesian path. Defaults to pi/2.
avoid_collisions: bool, optional
Whether or not to avoid collisions. Defaults to True.
group: str, optional
The planning group used for calculation. Defaults to the robot's
main planning group.
path_constraints: list of :class:`compas_fab.robots.Constraint`, optional
Optional constraints that can be imposed along the solution path.
Note that path calculation won't work if the start_configuration
violates these constraints. Defaults to None.
attached_collision_meshes: list of :class:`compas_fab.robots.AttachedCollisionMesh`
Defaults to None.
Examples
--------
>>> frames = [Frame([0.3, 0.1, 0.5], [1, 0, 0], [0, 1, 0]),\
Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])]
>>> start_configuration = Configuration.from_revolute_values([-0.042, 4.295, -4.110, -3.327, 4.755, 0.])
>>> group = robot.main_group_name
>>> trajectory = robot.plan_cartesian_motion(frames,\
start_configuration,\
max_step=0.01,\
jump_threshold=1.57,\
avoid_collisions=True,\
group=group)
>>> type(trajectory)
<class 'compas_fab.robots.trajectory.JointTrajectory'>
"""
self.ensure_client()
if not group:
group = self.main_group_name # ensure semantics
frames_RCF = []
for frame_WCF in frames_WCF:
# represent in RCF
frame_RCF = self.represent_frame_in_RCF(frame_WCF, group)
frame_RCF.point /= self.scale_factor
frames_RCF.append(frame_RCF)
base_link = self.get_base_link_name(group)
joint_names = self.get_configurable_joint_names()
joint_types = self.get_configurable_joint_types(group)
start_configuration = start_configuration.copy() if start_configuration else self.init_configuration()
start_configuration.scale(1. / self.scale_factor)
ee_link = self.get_end_effector_link_name(group)
max_step_scaled = max_step / self.scale_factor
T = self.transformation_WCF_RCF(group)
if path_constraints:
path_constraints_RCF_scaled = []
for c in path_constraints:
cp = c.copy()
cp.transform(T)
if c.type == Constraint.JOINT:
joint = self.get_joint_by_name(c.joint_name)
if joint.is_scalable():
cp.scale(self.scale_factor)
else:
cp.scale(self.scale_factor)
path_constraints_RCF_scaled.append(cp)
else:
path_constraints_RCF_scaled = None
trajectory = self.client.plan_cartesian_motion(frames_RCF, base_link,
ee_link, group, joint_names,
joint_types, start_configuration,
max_step_scaled, jump_threshold,
avoid_collisions,
path_constraints_RCF_scaled,
attached_collision_meshes)
# Scale everything back to robot's scale
for pt in trajectory.points:
pt.scale(self.scale_factor)
trajectory.start_configuration.scale(self.scale_factor)
return trajectory | def plan_cartesian_motion(self, frames_WCF, start_configuration=None,
max_step=0.01, jump_threshold=1.57,
avoid_collisions=True, group=None,
path_constraints=None,
attached_collision_meshes=None):
"""Calculates a cartesian motion path (linear in tool space).
Parameters
----------
frames_WCF: list of :class:`compas.geometry.Frame`
The frames through which the path is defined.
start_configuration: :class:`Configuration`, optional
The robot's configuration at the starting position. Defaults to the
zero configuration.
max_step: float
The approximate distance between the calculated points. (Defined in
the robot's units)
jump_threshold: float
The maximum allowed distance of joint positions between consecutive
points. If the distance is found to be above this threshold, the
path computation fails. It must be specified in relation to max_step.
If this theshhold is 0, 'jumps' might occur, resulting in an invalid
cartesian path. Defaults to pi/2.
avoid_collisions: bool, optional
Whether or not to avoid collisions. Defaults to True.
group: str, optional
The planning group used for calculation. Defaults to the robot's
main planning group.
path_constraints: list of :class:`compas_fab.robots.Constraint`, optional
Optional constraints that can be imposed along the solution path.
Note that path calculation won't work if the start_configuration
violates these constraints. Defaults to None.
attached_collision_meshes: list of :class:`compas_fab.robots.AttachedCollisionMesh`
Defaults to None.
Examples
--------
>>> frames = [Frame([0.3, 0.1, 0.5], [1, 0, 0], [0, 1, 0]),\
Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])]
>>> start_configuration = Configuration.from_revolute_values([-0.042, 4.295, -4.110, -3.327, 4.755, 0.])
>>> group = robot.main_group_name
>>> trajectory = robot.plan_cartesian_motion(frames,\
start_configuration,\
max_step=0.01,\
jump_threshold=1.57,\
avoid_collisions=True,\
group=group)
>>> type(trajectory)
<class 'compas_fab.robots.trajectory.JointTrajectory'>
"""
self.ensure_client()
if not group:
group = self.main_group_name # ensure semantics
frames_RCF = []
for frame_WCF in frames_WCF:
# represent in RCF
frame_RCF = self.represent_frame_in_RCF(frame_WCF, group)
frame_RCF.point /= self.scale_factor
frames_RCF.append(frame_RCF)
base_link = self.get_base_link_name(group)
joint_names = self.get_configurable_joint_names()
joint_types = self.get_configurable_joint_types(group)
start_configuration = start_configuration.copy() if start_configuration else self.init_configuration()
start_configuration.scale(1. / self.scale_factor)
ee_link = self.get_end_effector_link_name(group)
max_step_scaled = max_step / self.scale_factor
T = self.transformation_WCF_RCF(group)
if path_constraints:
path_constraints_RCF_scaled = []
for c in path_constraints:
cp = c.copy()
cp.transform(T)
if c.type == Constraint.JOINT:
joint = self.get_joint_by_name(c.joint_name)
if joint.is_scalable():
cp.scale(self.scale_factor)
else:
cp.scale(self.scale_factor)
path_constraints_RCF_scaled.append(cp)
else:
path_constraints_RCF_scaled = None
trajectory = self.client.plan_cartesian_motion(frames_RCF, base_link,
ee_link, group, joint_names,
joint_types, start_configuration,
max_step_scaled, jump_threshold,
avoid_collisions,
path_constraints_RCF_scaled,
attached_collision_meshes)
# Scale everything back to robot's scale
for pt in trajectory.points:
pt.scale(self.scale_factor)
trajectory.start_configuration.scale(self.scale_factor)
return trajectory |
Python | def plan_motion(self, goal_constraints, start_configuration=None,
group=None, path_constraints=None, planner_id='RRT',
num_planning_attempts=1, allowed_planning_time=2.,
max_velocity_scaling_factor=1.,
max_acceleration_scaling_factor=1.,
attached_collision_meshes=None):
"""Calculates a motion path.
Parameters
----------
goal_constraints: list of :class:`compas_fab.robots.Constraint`
The goal to be achieved, defined in a set of constraints.
Constraints can be very specific, for example defining value domains
for each joint, such that the goal configuration is included,
or defining a volume in space, to which a specific robot link (e.g.
the end-effector) is required to move to.
start_configuration: :class:`compas_fab.robots.Configuration`, optional
The robot's configuration at the starting position. Defaults to the
all-zero configuration.
group: str, optional
The name of the group to plan for. Defaults to the robot's main
planning group.
path_constraints: list of :class:`compas_fab.robots.Constraint`, optional
Optional constraints that can be imposed along the solution path.
Note that path calculation won't work if the start_configuration
violates these constraints. Defaults to None.
planner_id: str
The name of the algorithm used for path planning. Defaults to 'RRT'.
num_planning_attempts: int, optional
Normally, if one motion plan is needed, one motion plan is computed.
However, for algorithms that use randomization in their execution
(like 'RRT'), it is likely that different planner executions will
produce different solutions. Setting this parameter to a value above
1 will run many additional motion plans, and will report the
shortest solution as the final result. Defaults to 1.
allowed_planning_time: float
The number of seconds allowed to perform the planning. Defaults to 2.
max_velocity_scaling_factor: float
Defaults to 1.
max_acceleration_scaling_factor: float
Defaults to 1.
attached_collision_meshes: list of :class:`compas_fab.robots.AttachedCollisionMesh`
Defaults to None.
Examples
--------
>>> # Example with position and orientation constraints
>>> frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
>>> tolerance_position = 0.001
>>> tolerances_axes = [math.radians(1)] * 3
>>> start_configuration = Configuration.from_revolute_values([-0.042, 4.295, 0, -3.327, 4.755, 0.])
>>> group = robot.main_group_name
>>> goal_constraints = robot.constraints_from_frame(frame, tolerance_position, tolerances_axes, group)
>>> trajectory = robot.plan_motion(goal_constraints, start_configuration, group, planner_id='RRT')
>>> trajectory.fraction
1.0
>>> # Example with joint constraints (to the UP configuration)
>>> configuration = Configuration.from_revolute_values([0.0, -1.5707, 0.0, -1.5707, 0.0, 0.0])
>>> tolerances = [math.radians(5)] * 6
>>> group = robot.main_group_name
>>> goal_constraints = robot.constraints_from_configuration(configuration, tolerances, group)
>>> trajectory = robot.plan_motion(goal_constraints, start_configuration, group, planner_id='RRT')
>>> trajectory.fraction
1.0
>>> type(trajectory)
<class 'compas_fab.robots.trajectory.JointTrajectory'>
"""
# TODO: for the motion plan request a list of possible goal constraints
# can be passed, from which the planner will try to find a path that
# satisfies at least one of the specified goal constraints. For now only
# one set of goal constraints is supported.
# TODO: add workspace_parameters
self.ensure_client()
if not group:
group = self.main_group_name # ensure semantics
# Transform goal constraints to RCF and scale
T = self._get_current_transformation_WCF_RCF(start_configuration, group)
goal_constraints_RCF_scaled = []
for c in goal_constraints:
cp = c.copy()
cp.transform(T)
if c.type == Constraint.JOINT:
joint = self.get_joint_by_name(c.joint_name)
if joint.is_scalable():
cp.scale(self.scale_factor)
else:
cp.scale(self.scale_factor)
goal_constraints_RCF_scaled.append(cp)
# Transform path constraints to RCF and scale
if path_constraints:
path_constraints_RCF_scaled = []
for c in path_constraints:
cp = c.copy()
cp.transform(T)
if c.type == Constraint.JOINT:
joint = self.get_joint_by_name(c.joint_name)
if joint.is_scalable():
cp.scale(self.scale_factor)
else:
cp.scale(self.scale_factor)
path_constraints_RCF_scaled.append(cp)
else:
path_constraints_RCF_scaled = None
joint_names = self.get_configurable_joint_names()
joint_types = self.get_configurable_joint_types(group)
start_configuration = start_configuration.copy() if start_configuration else self.init_configuration()
start_configuration.scale(1. / self.scale_factor)
kwargs = {}
kwargs['goal_constraints'] = goal_constraints_RCF_scaled
kwargs['base_link'] = self.get_base_link_name(group)
kwargs['ee_link'] = self.get_end_effector_link_name(group)
kwargs['group'] = group
kwargs['joint_names'] = joint_names
kwargs['joint_types'] = joint_types
kwargs['start_configuration'] = start_configuration
kwargs['path_constraints'] = path_constraints_RCF_scaled
kwargs['trajectory_constraints'] = None
kwargs['planner_id'] = planner_id
kwargs['num_planning_attempts'] = num_planning_attempts
kwargs['allowed_planning_time'] = allowed_planning_time
kwargs['max_velocity_scaling_factor'] = max_velocity_scaling_factor
kwargs['max_acceleration_scaling_factor'] = max_acceleration_scaling_factor
kwargs['attached_collision_meshes'] = attached_collision_meshes
kwargs['workspace_parameters'] = None
trajectory = self.client.plan_motion(**kwargs)
# Scale everything back to robot's scale
for pt in trajectory.points:
pt.scale(self.scale_factor)
trajectory.start_configuration.scale(self.scale_factor)
return trajectory | def plan_motion(self, goal_constraints, start_configuration=None,
group=None, path_constraints=None, planner_id='RRT',
num_planning_attempts=1, allowed_planning_time=2.,
max_velocity_scaling_factor=1.,
max_acceleration_scaling_factor=1.,
attached_collision_meshes=None):
"""Calculates a motion path.
Parameters
----------
goal_constraints: list of :class:`compas_fab.robots.Constraint`
The goal to be achieved, defined in a set of constraints.
Constraints can be very specific, for example defining value domains
for each joint, such that the goal configuration is included,
or defining a volume in space, to which a specific robot link (e.g.
the end-effector) is required to move to.
start_configuration: :class:`compas_fab.robots.Configuration`, optional
The robot's configuration at the starting position. Defaults to the
all-zero configuration.
group: str, optional
The name of the group to plan for. Defaults to the robot's main
planning group.
path_constraints: list of :class:`compas_fab.robots.Constraint`, optional
Optional constraints that can be imposed along the solution path.
Note that path calculation won't work if the start_configuration
violates these constraints. Defaults to None.
planner_id: str
The name of the algorithm used for path planning. Defaults to 'RRT'.
num_planning_attempts: int, optional
Normally, if one motion plan is needed, one motion plan is computed.
However, for algorithms that use randomization in their execution
(like 'RRT'), it is likely that different planner executions will
produce different solutions. Setting this parameter to a value above
1 will run many additional motion plans, and will report the
shortest solution as the final result. Defaults to 1.
allowed_planning_time: float
The number of seconds allowed to perform the planning. Defaults to 2.
max_velocity_scaling_factor: float
Defaults to 1.
max_acceleration_scaling_factor: float
Defaults to 1.
attached_collision_meshes: list of :class:`compas_fab.robots.AttachedCollisionMesh`
Defaults to None.
Examples
--------
>>> # Example with position and orientation constraints
>>> frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
>>> tolerance_position = 0.001
>>> tolerances_axes = [math.radians(1)] * 3
>>> start_configuration = Configuration.from_revolute_values([-0.042, 4.295, 0, -3.327, 4.755, 0.])
>>> group = robot.main_group_name
>>> goal_constraints = robot.constraints_from_frame(frame, tolerance_position, tolerances_axes, group)
>>> trajectory = robot.plan_motion(goal_constraints, start_configuration, group, planner_id='RRT')
>>> trajectory.fraction
1.0
>>> # Example with joint constraints (to the UP configuration)
>>> configuration = Configuration.from_revolute_values([0.0, -1.5707, 0.0, -1.5707, 0.0, 0.0])
>>> tolerances = [math.radians(5)] * 6
>>> group = robot.main_group_name
>>> goal_constraints = robot.constraints_from_configuration(configuration, tolerances, group)
>>> trajectory = robot.plan_motion(goal_constraints, start_configuration, group, planner_id='RRT')
>>> trajectory.fraction
1.0
>>> type(trajectory)
<class 'compas_fab.robots.trajectory.JointTrajectory'>
"""
# TODO: for the motion plan request a list of possible goal constraints
# can be passed, from which the planner will try to find a path that
# satisfies at least one of the specified goal constraints. For now only
# one set of goal constraints is supported.
# TODO: add workspace_parameters
self.ensure_client()
if not group:
group = self.main_group_name # ensure semantics
# Transform goal constraints to RCF and scale
T = self._get_current_transformation_WCF_RCF(start_configuration, group)
goal_constraints_RCF_scaled = []
for c in goal_constraints:
cp = c.copy()
cp.transform(T)
if c.type == Constraint.JOINT:
joint = self.get_joint_by_name(c.joint_name)
if joint.is_scalable():
cp.scale(self.scale_factor)
else:
cp.scale(self.scale_factor)
goal_constraints_RCF_scaled.append(cp)
# Transform path constraints to RCF and scale
if path_constraints:
path_constraints_RCF_scaled = []
for c in path_constraints:
cp = c.copy()
cp.transform(T)
if c.type == Constraint.JOINT:
joint = self.get_joint_by_name(c.joint_name)
if joint.is_scalable():
cp.scale(self.scale_factor)
else:
cp.scale(self.scale_factor)
path_constraints_RCF_scaled.append(cp)
else:
path_constraints_RCF_scaled = None
joint_names = self.get_configurable_joint_names()
joint_types = self.get_configurable_joint_types(group)
start_configuration = start_configuration.copy() if start_configuration else self.init_configuration()
start_configuration.scale(1. / self.scale_factor)
kwargs = {}
kwargs['goal_constraints'] = goal_constraints_RCF_scaled
kwargs['base_link'] = self.get_base_link_name(group)
kwargs['ee_link'] = self.get_end_effector_link_name(group)
kwargs['group'] = group
kwargs['joint_names'] = joint_names
kwargs['joint_types'] = joint_types
kwargs['start_configuration'] = start_configuration
kwargs['path_constraints'] = path_constraints_RCF_scaled
kwargs['trajectory_constraints'] = None
kwargs['planner_id'] = planner_id
kwargs['num_planning_attempts'] = num_planning_attempts
kwargs['allowed_planning_time'] = allowed_planning_time
kwargs['max_velocity_scaling_factor'] = max_velocity_scaling_factor
kwargs['max_acceleration_scaling_factor'] = max_acceleration_scaling_factor
kwargs['attached_collision_meshes'] = attached_collision_meshes
kwargs['workspace_parameters'] = None
trajectory = self.client.plan_motion(**kwargs)
# Scale everything back to robot's scale
for pt in trajectory.points:
pt.scale(self.scale_factor)
trajectory.start_configuration.scale(self.scale_factor)
return trajectory |
Python | def transformed_frames(self, configuration, group=None):
"""Returns the robot's transformed frames."""
joint_names = self.get_configurable_joint_names(group)
joint_state = dict(zip(joint_names, configuration.values))
return self.model.transformed_frames(joint_state) | def transformed_frames(self, configuration, group=None):
"""Returns the robot's transformed frames."""
joint_names = self.get_configurable_joint_names(group)
joint_state = dict(zip(joint_names, configuration.values))
return self.model.transformed_frames(joint_state) |
Python | def transformed_axes(self, configuration, group=None):
"""Returns the robot's transformed axes."""
joint_names = self.get_configurable_joint_names(group)
joint_state = dict(zip(joint_names, configuration.values))
return self.model.transformed_axes(joint_state) | def transformed_axes(self, configuration, group=None):
"""Returns the robot's transformed axes."""
joint_names = self.get_configurable_joint_names(group)
joint_state = dict(zip(joint_names, configuration.values))
return self.model.transformed_axes(joint_state) |
Python | def scale_factor(self):
"""The robot's scale factor."""
if self.artist:
return self.artist.scale_factor
else:
return self._scale_factor | def scale_factor(self):
"""The robot's scale factor."""
if self.artist:
return self.artist.scale_factor
else:
return self._scale_factor |
Python | def info(self):
"""Prints information about the robot.
"""
print("The robot's name is '%s'." % self.name)
if self.semantics:
print("The planning groups are:", self.group_names)
print("The main planning group is '%s'." % self.main_group_name)
configurable_joints = self.get_configurable_joints(
self.main_group_name)
else:
configurable_joints = self.get_configurable_joints()
print("The end-effector's name is '%s'." %
self.get_end_effector_link_name())
print("The base link's name is '%s'" % self.get_base_link_name())
print("The base_frame is:", self.get_base_frame())
print("The robot's joints are:")
for joint in configurable_joints:
info = "\t* '%s' is of type '%s'" % (
joint.name, list(Joint.SUPPORTED_TYPES)[joint.type])
if joint.limit:
info += " and has limits [%.3f, %.3f]" % (
joint.limit.upper, joint.limit.lower)
print(info)
print("The robot's links are:")
print([l.name for l in self.model.links]) | def info(self):
"""Prints information about the robot.
"""
print("The robot's name is '%s'." % self.name)
if self.semantics:
print("The planning groups are:", self.group_names)
print("The main planning group is '%s'." % self.main_group_name)
configurable_joints = self.get_configurable_joints(
self.main_group_name)
else:
configurable_joints = self.get_configurable_joints()
print("The end-effector's name is '%s'." %
self.get_end_effector_link_name())
print("The base link's name is '%s'" % self.get_base_link_name())
print("The base_frame is:", self.get_base_frame())
print("The robot's joints are:")
for joint in configurable_joints:
info = "\t* '%s' is of type '%s'" % (
joint.name, list(Joint.SUPPORTED_TYPES)[joint.type])
if joint.limit:
info += " and has limits [%.3f, %.3f]" % (
joint.limit.upper, joint.limit.lower)
print(info)
print("The robot's links are:")
print([l.name for l in self.model.links]) |
Python | def xform_from_transformation(transformation):
"""Creates a Rhino Transform instance from a :class:`Transformation`.
Args:
transformation (:class:`Transformation`): the transformation.
Returns:
(:class:`Rhino.Geometry.Transform`)
"""
transform = Rhino.Geometry.Transform(1.0)
for i in range(0, 4):
for j in range(0, 4):
transform[i, j] = transformation[i, j]
return transform | def xform_from_transformation(transformation):
"""Creates a Rhino Transform instance from a :class:`Transformation`.
Args:
transformation (:class:`Transformation`): the transformation.
Returns:
(:class:`Rhino.Geometry.Transform`)
"""
transform = Rhino.Geometry.Transform(1.0)
for i in range(0, 4):
for j in range(0, 4):
transform[i, j] = transformation[i, j]
return transform |
Python | def time_from_start(self):
"""Effectively, time from start for the last point in the trajectory.
"""
if not self.points:
return 0.
return self.points[-1].time_from_start.seconds | def time_from_start(self):
"""Effectively, time from start for the last point in the trajectory.
"""
if not self.points:
return 0.
return self.points[-1].time_from_start.seconds |
Python | def filter_multiples(
mult: _Iterable[int],
xs: _Iterable[int]
) -> _Iterable[int]:
"""Return the list of numbers within 'xs' which are multiples of any numbers
in 'mult'."""
return [x for x in xs if _x_is_multiple_of_any(x, mult)] | def filter_multiples(
mult: _Iterable[int],
xs: _Iterable[int]
) -> _Iterable[int]:
"""Return the list of numbers within 'xs' which are multiples of any numbers
in 'mult'."""
return [x for x in xs if _x_is_multiple_of_any(x, mult)] |
Python | def fibonacci() -> _Iterable[int]:
"""Return a generator for Fibonacci numbers, starting with 1 and 2."""
a: int = 1
b: int = 2
while True:
yield a
a, b = b, a + b | def fibonacci() -> _Iterable[int]:
"""Return a generator for Fibonacci numbers, starting with 1 and 2."""
a: int = 1
b: int = 2
while True:
yield a
a, b = b, a + b |
Python | def make_seq_errors_genotype_model(g, error_probs):
"""
Given an empirically estimated error probability matrix, resample for a particular
variant. Determine variant frequency and true genotype (g0, g1, or g2),
then return observed genotype based on row in error_probs with nearest
frequency. Treat each pair of alleles as a diploid individual.
"""
m = g.shape[0]
frequency = np.sum(g) / m
closest_row = (np.abs(error_probs['freq']-frequency)).idxmin()
closest_freq = error_probs.iloc[closest_row - 1].values[1:]
w = np.copy(g)
# Make diploid (iterate each pair of alleles)
genos = np.reshape(w,(-1,2))
# Record the true genotypes (0,0=>0; 1,0=>1; 0,1=>2, 1,1=>3)
count = np.sum(np.array([1,2]) * genos,axis=1)
base_genotypes = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
genos[count==0,:]=base_genotypes[
np.random.choice(4,sum(count==0), p=closest_freq[[0, 1, 1, 2]]*[1,0.5,0.5,1]),:]
genos[count==1,:]=base_genotypes[[0,1,3],:][
np.random.choice(3,sum(count==1), p=closest_freq[[3, 4, 5]]),:]
genos[count==2,:]=base_genotypes[[0,2,3],:][
np.random.choice(3,sum(count==2), p=closest_freq[[3, 4, 5]]),:]
genos[count==3,:]=base_genotypes[
np.random.choice(4,sum(count==3), p=closest_freq[[6, 7, 7, 8]]*[1,0.5,0.5,1]),:]
return(np.reshape(genos,-1)) | def make_seq_errors_genotype_model(g, error_probs):
"""
Given an empirically estimated error probability matrix, resample for a particular
variant. Determine variant frequency and true genotype (g0, g1, or g2),
then return observed genotype based on row in error_probs with nearest
frequency. Treat each pair of alleles as a diploid individual.
"""
m = g.shape[0]
frequency = np.sum(g) / m
closest_row = (np.abs(error_probs['freq']-frequency)).idxmin()
closest_freq = error_probs.iloc[closest_row - 1].values[1:]
w = np.copy(g)
# Make diploid (iterate each pair of alleles)
genos = np.reshape(w,(-1,2))
# Record the true genotypes (0,0=>0; 1,0=>1; 0,1=>2, 1,1=>3)
count = np.sum(np.array([1,2]) * genos,axis=1)
base_genotypes = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
genos[count==0,:]=base_genotypes[
np.random.choice(4,sum(count==0), p=closest_freq[[0, 1, 1, 2]]*[1,0.5,0.5,1]),:]
genos[count==1,:]=base_genotypes[[0,1,3],:][
np.random.choice(3,sum(count==1), p=closest_freq[[3, 4, 5]]),:]
genos[count==2,:]=base_genotypes[[0,2,3],:][
np.random.choice(3,sum(count==2), p=closest_freq[[3, 4, 5]]),:]
genos[count==3,:]=base_genotypes[
np.random.choice(4,sum(count==3), p=closest_freq[[6, 7, 7, 8]]*[1,0.5,0.5,1]),:]
return(np.reshape(genos,-1)) |
Python | def add_errors(sample_data, ancestral_allele_error=0, random_seed=None, **kwargs):
"""
Return a new sample_data file with added sequencing and ancestral_state error
"""
if random_seed is not None:
np.random.seed(random_seed)
if sample_data.num_samples % 2 != 0:
raise ValueError("Must have an even number of samples to inject error")
error_probs = pd.read_csv(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/EmpiricalErrorPlatinum1000G.csv"
),
index_col=0,
)
n_variants = 0
aa_error_by_site = np.zeros(sample_data.num_sites, dtype=np.bool)
if ancestral_allele_error > 0:
assert ancestral_allele_error <= 1
n_bad_sites = round(ancestral_allele_error*sample_data.num_sites)
# This gives *exactly* a proportion aa_error or bad sites
# NB - to do this probabilitistically, use np.binomial(1, e, ts.num_sites)
aa_error_by_site[0:n_bad_sites] = True
np.random.shuffle(aa_error_by_site)
new_sd = sample_data.copy(**kwargs)
genotypes = new_sd.data["sites/genotypes"][:] # Could be big
alleles = new_sd.data["sites/alleles"][:]
for i, (ancestral_allele_error, v) in enumerate(zip(
aa_error_by_site, sample_data.variants())):
if ancestral_allele_error and len(v.site.alleles)==2:
genotypes[i, :] = 1-v.genotypes
alleles[i] = list(reversed(alleles[i]))
genotypes[i, :] = make_seq_errors_genotype_model(
genotypes[i, :], error_probs)
new_sd.data["sites/genotypes"][:] = genotypes
new_sd.data["sites/alleles"][:] = alleles
new_sd.finalise()
return new_sd | def add_errors(sample_data, ancestral_allele_error=0, random_seed=None, **kwargs):
"""
Return a new sample_data file with added sequencing and ancestral_state error
"""
if random_seed is not None:
np.random.seed(random_seed)
if sample_data.num_samples % 2 != 0:
raise ValueError("Must have an even number of samples to inject error")
error_probs = pd.read_csv(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/EmpiricalErrorPlatinum1000G.csv"
),
index_col=0,
)
n_variants = 0
aa_error_by_site = np.zeros(sample_data.num_sites, dtype=np.bool)
if ancestral_allele_error > 0:
assert ancestral_allele_error <= 1
n_bad_sites = round(ancestral_allele_error*sample_data.num_sites)
# This gives *exactly* a proportion aa_error or bad sites
# NB - to do this probabilitistically, use np.binomial(1, e, ts.num_sites)
aa_error_by_site[0:n_bad_sites] = True
np.random.shuffle(aa_error_by_site)
new_sd = sample_data.copy(**kwargs)
genotypes = new_sd.data["sites/genotypes"][:] # Could be big
alleles = new_sd.data["sites/alleles"][:]
for i, (ancestral_allele_error, v) in enumerate(zip(
aa_error_by_site, sample_data.variants())):
if ancestral_allele_error and len(v.site.alleles)==2:
genotypes[i, :] = 1-v.genotypes
alleles[i] = list(reversed(alleles[i]))
genotypes[i, :] = make_seq_errors_genotype_model(
genotypes[i, :], error_probs)
new_sd.data["sites/genotypes"][:] = genotypes
new_sd.data["sites/alleles"][:] = alleles
new_sd.finalise()
return new_sd |
Python | def remapped_ts(ts, bad_ancestral_states=None, save_site_changes=None):
"""
If you want to save a csv file of changed sites, provide save_site_changes as a path
to a file
"""
tables = ts.dump_tables()
sites = tables.sites
mutations = tables.mutations
sites.clear()
mutations.clear()
v_iter = ts.variants()
changed_states = 0
corrected_AS = 0
miscorrected_AS = 0
if save_site_changes is not None:
save_site_changes = open(save_site_changes, "wt")
print("site_id,position,old_AS,new_AS", file=save_site_changes)
for tree in ts.trees():
for s in tree.sites():
v = next(v_iter)
new_anc_state, muts = tree.map_mutations(v.genotypes, v.alleles)
site_id = sites.add_row(
position=s.position,
ancestral_state=new_anc_state,
metadata=s.metadata,
)
mapping = {-1:-1}
for i, m in enumerate(muts):
mapping[i] = mutations.add_row(
site=site_id,
node=m.node,
time=m.time,
derived_state=m.derived_state,
parent=mapping[m.parent],
metadata=m.metadata)
if s.ancestral_state != new_anc_state:
changed_states += 1
try:
if s.id in bad_ancestral_states:
# we might have corrected a bad AS
if bad_ancestral_states[s.id] == new_anc_state:
# Hurrah
corrected_AS += 1
else:
# we wrongly changed a good AS
miscorrected_AS += 1
except TypeError:
# bad_ancestral_states is None: we didn't have an original ts
pass
if save_site_changes is not None:
print(
f"{s.id},{s.position},{s.ancestral_state},{new_anc_state}",
file=save_site_changes,
)
logger.info(
f"{mutations.num_rows} new mutations vs {ts.num_mutations} old ones")
if bad_ancestral_states is not None:
logger.info(
f"{corrected_AS}/{len(bad_ancestral_states)} bad ancestral states corrected "
f"({miscorrected_AS}/{ts.num_sites-len(bad_ancestral_states)} miscorrected)"
)
if save_site_changes is not None:
logger.info(
f"{changed_states}/{ts.num_sites} ancestral states changed: "
f"saved to {save_site_changes.name}")
if 'user_data' in tables.metadata and 'muts' in tables.metadata['user_data']:
metadata = tables.metadata.copy()
user_meta = metadata['user_data']
assert 'old_muts' not in user_meta
assert 'old_ts_bytes' not in user_meta
user_meta.update({
'old_muts': user_meta['muts'],
'muts':mutations.num_rows,
'old_ts_bytes': user_meta['ts_bytes'],
'ts_bytes':tables.nbytes,
})
tables.metadata = metadata
return tables.tree_sequence() | def remapped_ts(ts, bad_ancestral_states=None, save_site_changes=None):
"""
If you want to save a csv file of changed sites, provide save_site_changes as a path
to a file
"""
tables = ts.dump_tables()
sites = tables.sites
mutations = tables.mutations
sites.clear()
mutations.clear()
v_iter = ts.variants()
changed_states = 0
corrected_AS = 0
miscorrected_AS = 0
if save_site_changes is not None:
save_site_changes = open(save_site_changes, "wt")
print("site_id,position,old_AS,new_AS", file=save_site_changes)
for tree in ts.trees():
for s in tree.sites():
v = next(v_iter)
new_anc_state, muts = tree.map_mutations(v.genotypes, v.alleles)
site_id = sites.add_row(
position=s.position,
ancestral_state=new_anc_state,
metadata=s.metadata,
)
mapping = {-1:-1}
for i, m in enumerate(muts):
mapping[i] = mutations.add_row(
site=site_id,
node=m.node,
time=m.time,
derived_state=m.derived_state,
parent=mapping[m.parent],
metadata=m.metadata)
if s.ancestral_state != new_anc_state:
changed_states += 1
try:
if s.id in bad_ancestral_states:
# we might have corrected a bad AS
if bad_ancestral_states[s.id] == new_anc_state:
# Hurrah
corrected_AS += 1
else:
# we wrongly changed a good AS
miscorrected_AS += 1
except TypeError:
# bad_ancestral_states is None: we didn't have an original ts
pass
if save_site_changes is not None:
print(
f"{s.id},{s.position},{s.ancestral_state},{new_anc_state}",
file=save_site_changes,
)
logger.info(
f"{mutations.num_rows} new mutations vs {ts.num_mutations} old ones")
if bad_ancestral_states is not None:
logger.info(
f"{corrected_AS}/{len(bad_ancestral_states)} bad ancestral states corrected "
f"({miscorrected_AS}/{ts.num_sites-len(bad_ancestral_states)} miscorrected)"
)
if save_site_changes is not None:
logger.info(
f"{changed_states}/{ts.num_sites} ancestral states changed: "
f"saved to {save_site_changes.name}")
if 'user_data' in tables.metadata and 'muts' in tables.metadata['user_data']:
metadata = tables.metadata.copy()
user_meta = metadata['user_data']
assert 'old_muts' not in user_meta
assert 'old_ts_bytes' not in user_meta
user_meta.update({
'old_muts': user_meta['muts'],
'muts':mutations.num_rows,
'old_ts_bytes': user_meta['ts_bytes'],
'ts_bytes':tables.nbytes,
})
tables.metadata = metadata
return tables.tree_sequence() |
Python | def randomly_split_polytomies(
self,
*,
epsilon=None,
squash_edges=True,
record_provenance=True,
random_seed=None,
):
"""
Modifies the table collection in place, adding extra nodes and edges
so that any node with greater than 2 children (i.e. a multifurcation
or "polytomy") is resolved into successive bifurcations. This is identical
to :meth:`TreeSequence.randomly_split_polytomies` but acts *in place* to
alter the data in this :class:`TableCollection`. Please see
:meth:`TreeSequence.randomly_split_polytomies` for a fuller description,
and details of parameters.
"""
if epsilon is None:
epsilon = 1e-10
rng = np.random.default_rng(seed=random_seed)
def resolve_polytomy(parent_node_id, child_ids, new_nodes_by_time_desc):
"""
For a polytomy and list of child node ids, return a list of (child, parent)
tuples, describing a bifurcating tree, rooted at parent_node_id, where the
new_nodes_by_time_desc have been used to break polytomies. All possible
topologies should be equiprobable.
"""
assert len(child_ids) == len(new_nodes_by_time_desc) + 2
# Polytomies broken by sequentially splicing onto edges, so an initial edge
# is required. This will always remain above the top node & is removed later
edges = [
[child_ids[0], None],
]
# We know beforehand how many random ints are needed: generate them all now
edge_choice = rng.integers(0, np.arange(1, len(child_ids) * 2 - 1, 2))
tmp_new_node_lab = [parent_node_id] + new_nodes_by_time_desc
assert len(edge_choice) == len(child_ids) - 1
for node_lab, child_id, target_edge_id in zip(
tmp_new_node_lab, child_ids[1:], edge_choice
):
target_edge = edges[target_edge_id]
# Insert in the right place, to keep edges in parent time order
edges.insert(target_edge_id, [child_id, node_lab])
edges.insert(target_edge_id, [target_edge[0], node_lab])
target_edge[0] = node_lab
top_edge = edges.pop() # remove the edge above the top node
assert top_edge[1] is None
# Re-map the internal nodes IDs so they are used in time order
real_node = iter(new_nodes_by_time_desc)
node_map = {c: c for c in child_ids}
node_map[edges[-1][1]] = parent_node_id # last edge == oldest parent
for e in reversed(edges):
# Reversing along the edges, parents are in inverse time order
for idx in (1, 0): # look at parent (1) then child (0)
if e[idx] not in node_map:
node_map[e[idx]] = next(real_node)
e[idx] = node_map[e[idx]]
assert len(node_map) == len(new_nodes_by_time_desc) + len(child_ids) + 1
return edges
edge_table = self.edges
node_table = self.nodes
# Store existing left, so we can change it if the edge is split
existing_edges_left = edge_table.left
# Keep other edge arrays etc. for fast read access
existing_edges_right = edge_table.right
existing_edges_parent = edge_table.parent
existing_edges_child = edge_table.child
existing_node_time = node_table.time
# We can save a lot of effort if we don't need to check the time of mutations
# We definitely don't need to check on the first iteration, a
check_mutations = np.any(
np.logical_not(tskit.is_unknown_time(self.mutations.time))
)
ts = self.tree_sequence() # Only needed to check mutations
tree_iter = ts.trees() # ditto
edge_table.clear()
edges_from_node = collections.defaultdict(set) # Active descendant edge ids
nodes_changed = set()
for interval, e_out, e_in in ts.edge_diffs(include_terminal=True):
pos = interval[0]
prev_tree = None if pos == 0 else next(tree_iter)
for edge in itertools.chain(e_out, e_in):
if edge.parent != tskit.NULL:
nodes_changed.add(edge.parent)
oldest_mutation_for_node = {}
if check_mutations and prev_tree is not None:
# It would also help if mutations were sorted such that all mutations
# above the same node appeared consecutively, with oldest first.
for site in prev_tree.sites():
for mutation in site.mutations:
if not tskit.is_unknown_time(mutation.time):
if mutation.node in oldest_mutation_for_node:
oldest_mutation_for_node[mutation.node] = max(
oldest_mutation_for_node[mutation.node], mutation.time
)
else:
oldest_mutation_for_node[mutation.node] = mutation.time
for parent_node in nodes_changed:
child_edge_ids = edges_from_node[parent_node]
if len(child_edge_ids) >= 3:
# We have a previous polytomy to break
parent_time = existing_node_time[parent_node]
new_nodes = []
child_ids = existing_edges_child[list(child_edge_ids)]
left = None
max_time = 0
# Split existing edges
for edge_id, child_id in zip(child_edge_ids, child_ids):
max_time = max(max_time, existing_node_time[child_id])
if check_mutations and child_id in oldest_mutation_for_node:
max_time = max(max_time, oldest_mutation_for_node[child_id])
if left is None:
left = existing_edges_left[edge_id]
else:
assert left == existing_edges_left[edge_id]
if existing_edges_right[edge_id] > interval[0]:
# make sure we carry on the edge after this polytomy
existing_edges_left[edge_id] = pos
# Arbitrarily, if epsilon is not small enough, use half the min dist
dt = min((parent_time - max_time) / (len(child_ids) * 2), epsilon)
# Break this N-degree polytomy. This requires N-2 extra nodes to be
# introduced: create them here in order of decreasing time
new_nodes = [
node_table.add_row(time=parent_time - (i * dt))
for i in range(1, len(child_ids) - 1)
]
# print("New nodes:", new_nodes, node_table.time[new_nodes])
for new_edge in resolve_polytomy(parent_node, child_ids, new_nodes):
edge_table.add_row(
left=left,
right=pos,
child=new_edge[0],
parent=new_edge[1],
)
# print("new_edge: left={}, right={}, child={}, parent={}"
# .format(left, pos, new_edge[0], new_edge[1]))
else:
# Previous node was not a polytomy - just add the edges_out
for edge_id in child_edge_ids:
if existing_edges_right[edge_id] == pos: # is an out edge
edge_table.add_row(
left=existing_edges_left[edge_id],
right=pos,
parent=parent_node,
child=existing_edges_child[edge_id],
)
for edge in e_out:
if edge.parent != tskit.NULL:
# print("REMOVE", edge.id)
edges_from_node[edge.parent].remove(edge.id)
for edge in e_in:
if edge.parent != tskit.NULL:
# print("ADD", edge.id)
edges_from_node[edge.parent].add(edge.id)
# Chop if we have created a polytomy: the polytomy itself will be resolved
# at a future iteration, when any edges move into or out of the polytomy
while nodes_changed:
node = nodes_changed.pop()
edge_ids = edges_from_node[node]
# print("Looking at", node)
if len(edge_ids) == 0:
del edges_from_node[node]
# if this node has changed *to* a polytomy, we need to cut all of the
# child edges that were previously present by adding the previous
# segment and left-truncating
elif len(edge_ids) >= 3:
for edge_id in edge_ids:
if existing_edges_left[edge_id] < interval[0]:
self.edges.add_row(
left=existing_edges_left[edge_id],
right=interval[0],
parent=existing_edges_parent[edge_id],
child=existing_edges_child[edge_id],
)
existing_edges_left[edge_id] = interval[0]
assert len(edges_from_node) == 0
self.sort()
if squash_edges:
self.edges.squash()
self.sort() # Bug: https://github.com/tskit-dev/tskit/issues/808
if record_provenance:
parameters = {"command": "randomly_split_polytomies"}
self.provenances.add_row(
record=json.dumps(provenance.get_provenance_dict(parameters))
) | def randomly_split_polytomies(
self,
*,
epsilon=None,
squash_edges=True,
record_provenance=True,
random_seed=None,
):
"""
Modifies the table collection in place, adding extra nodes and edges
so that any node with greater than 2 children (i.e. a multifurcation
or "polytomy") is resolved into successive bifurcations. This is identical
to :meth:`TreeSequence.randomly_split_polytomies` but acts *in place* to
alter the data in this :class:`TableCollection`. Please see
:meth:`TreeSequence.randomly_split_polytomies` for a fuller description,
and details of parameters.
"""
if epsilon is None:
epsilon = 1e-10
rng = np.random.default_rng(seed=random_seed)
def resolve_polytomy(parent_node_id, child_ids, new_nodes_by_time_desc):
"""
For a polytomy and list of child node ids, return a list of (child, parent)
tuples, describing a bifurcating tree, rooted at parent_node_id, where the
new_nodes_by_time_desc have been used to break polytomies. All possible
topologies should be equiprobable.
"""
assert len(child_ids) == len(new_nodes_by_time_desc) + 2
# Polytomies broken by sequentially splicing onto edges, so an initial edge
# is required. This will always remain above the top node & is removed later
edges = [
[child_ids[0], None],
]
# We know beforehand how many random ints are needed: generate them all now
edge_choice = rng.integers(0, np.arange(1, len(child_ids) * 2 - 1, 2))
tmp_new_node_lab = [parent_node_id] + new_nodes_by_time_desc
assert len(edge_choice) == len(child_ids) - 1
for node_lab, child_id, target_edge_id in zip(
tmp_new_node_lab, child_ids[1:], edge_choice
):
target_edge = edges[target_edge_id]
# Insert in the right place, to keep edges in parent time order
edges.insert(target_edge_id, [child_id, node_lab])
edges.insert(target_edge_id, [target_edge[0], node_lab])
target_edge[0] = node_lab
top_edge = edges.pop() # remove the edge above the top node
assert top_edge[1] is None
# Re-map the internal nodes IDs so they are used in time order
real_node = iter(new_nodes_by_time_desc)
node_map = {c: c for c in child_ids}
node_map[edges[-1][1]] = parent_node_id # last edge == oldest parent
for e in reversed(edges):
# Reversing along the edges, parents are in inverse time order
for idx in (1, 0): # look at parent (1) then child (0)
if e[idx] not in node_map:
node_map[e[idx]] = next(real_node)
e[idx] = node_map[e[idx]]
assert len(node_map) == len(new_nodes_by_time_desc) + len(child_ids) + 1
return edges
edge_table = self.edges
node_table = self.nodes
# Store existing left, so we can change it if the edge is split
existing_edges_left = edge_table.left
# Keep other edge arrays etc. for fast read access
existing_edges_right = edge_table.right
existing_edges_parent = edge_table.parent
existing_edges_child = edge_table.child
existing_node_time = node_table.time
# We can save a lot of effort if we don't need to check the time of mutations
# We definitely don't need to check on the first iteration, a
check_mutations = np.any(
np.logical_not(tskit.is_unknown_time(self.mutations.time))
)
ts = self.tree_sequence() # Only needed to check mutations
tree_iter = ts.trees() # ditto
edge_table.clear()
edges_from_node = collections.defaultdict(set) # Active descendant edge ids
nodes_changed = set()
for interval, e_out, e_in in ts.edge_diffs(include_terminal=True):
pos = interval[0]
prev_tree = None if pos == 0 else next(tree_iter)
for edge in itertools.chain(e_out, e_in):
if edge.parent != tskit.NULL:
nodes_changed.add(edge.parent)
oldest_mutation_for_node = {}
if check_mutations and prev_tree is not None:
# It would also help if mutations were sorted such that all mutations
# above the same node appeared consecutively, with oldest first.
for site in prev_tree.sites():
for mutation in site.mutations:
if not tskit.is_unknown_time(mutation.time):
if mutation.node in oldest_mutation_for_node:
oldest_mutation_for_node[mutation.node] = max(
oldest_mutation_for_node[mutation.node], mutation.time
)
else:
oldest_mutation_for_node[mutation.node] = mutation.time
for parent_node in nodes_changed:
child_edge_ids = edges_from_node[parent_node]
if len(child_edge_ids) >= 3:
# We have a previous polytomy to break
parent_time = existing_node_time[parent_node]
new_nodes = []
child_ids = existing_edges_child[list(child_edge_ids)]
left = None
max_time = 0
# Split existing edges
for edge_id, child_id in zip(child_edge_ids, child_ids):
max_time = max(max_time, existing_node_time[child_id])
if check_mutations and child_id in oldest_mutation_for_node:
max_time = max(max_time, oldest_mutation_for_node[child_id])
if left is None:
left = existing_edges_left[edge_id]
else:
assert left == existing_edges_left[edge_id]
if existing_edges_right[edge_id] > interval[0]:
# make sure we carry on the edge after this polytomy
existing_edges_left[edge_id] = pos
# Arbitrarily, if epsilon is not small enough, use half the min dist
dt = min((parent_time - max_time) / (len(child_ids) * 2), epsilon)
# Break this N-degree polytomy. This requires N-2 extra nodes to be
# introduced: create them here in order of decreasing time
new_nodes = [
node_table.add_row(time=parent_time - (i * dt))
for i in range(1, len(child_ids) - 1)
]
# print("New nodes:", new_nodes, node_table.time[new_nodes])
for new_edge in resolve_polytomy(parent_node, child_ids, new_nodes):
edge_table.add_row(
left=left,
right=pos,
child=new_edge[0],
parent=new_edge[1],
)
# print("new_edge: left={}, right={}, child={}, parent={}"
# .format(left, pos, new_edge[0], new_edge[1]))
else:
# Previous node was not a polytomy - just add the edges_out
for edge_id in child_edge_ids:
if existing_edges_right[edge_id] == pos: # is an out edge
edge_table.add_row(
left=existing_edges_left[edge_id],
right=pos,
parent=parent_node,
child=existing_edges_child[edge_id],
)
for edge in e_out:
if edge.parent != tskit.NULL:
# print("REMOVE", edge.id)
edges_from_node[edge.parent].remove(edge.id)
for edge in e_in:
if edge.parent != tskit.NULL:
# print("ADD", edge.id)
edges_from_node[edge.parent].add(edge.id)
# Chop if we have created a polytomy: the polytomy itself will be resolved
# at a future iteration, when any edges move into or out of the polytomy
while nodes_changed:
node = nodes_changed.pop()
edge_ids = edges_from_node[node]
# print("Looking at", node)
if len(edge_ids) == 0:
del edges_from_node[node]
# if this node has changed *to* a polytomy, we need to cut all of the
# child edges that were previously present by adding the previous
# segment and left-truncating
elif len(edge_ids) >= 3:
for edge_id in edge_ids:
if existing_edges_left[edge_id] < interval[0]:
self.edges.add_row(
left=existing_edges_left[edge_id],
right=interval[0],
parent=existing_edges_parent[edge_id],
child=existing_edges_child[edge_id],
)
existing_edges_left[edge_id] = interval[0]
assert len(edges_from_node) == 0
self.sort()
if squash_edges:
self.edges.squash()
self.sort() # Bug: https://github.com/tskit-dev/tskit/issues/808
if record_provenance:
parameters = {"command": "randomly_split_polytomies"}
self.provenances.add_row(
record=json.dumps(provenance.get_provenance_dict(parameters))
) |
Python | def resolve_polytomy(parent_node_id, child_ids, new_nodes_by_time_desc):
"""
For a polytomy and list of child node ids, return a list of (child, parent)
tuples, describing a bifurcating tree, rooted at parent_node_id, where the
new_nodes_by_time_desc have been used to break polytomies. All possible
topologies should be equiprobable.
"""
assert len(child_ids) == len(new_nodes_by_time_desc) + 2
# Polytomies broken by sequentially splicing onto edges, so an initial edge
# is required. This will always remain above the top node & is removed later
edges = [
[child_ids[0], None],
]
# We know beforehand how many random ints are needed: generate them all now
edge_choice = rng.integers(0, np.arange(1, len(child_ids) * 2 - 1, 2))
tmp_new_node_lab = [parent_node_id] + new_nodes_by_time_desc
assert len(edge_choice) == len(child_ids) - 1
for node_lab, child_id, target_edge_id in zip(
tmp_new_node_lab, child_ids[1:], edge_choice
):
target_edge = edges[target_edge_id]
# Insert in the right place, to keep edges in parent time order
edges.insert(target_edge_id, [child_id, node_lab])
edges.insert(target_edge_id, [target_edge[0], node_lab])
target_edge[0] = node_lab
top_edge = edges.pop() # remove the edge above the top node
assert top_edge[1] is None
# Re-map the internal nodes IDs so they are used in time order
real_node = iter(new_nodes_by_time_desc)
node_map = {c: c for c in child_ids}
node_map[edges[-1][1]] = parent_node_id # last edge == oldest parent
for e in reversed(edges):
# Reversing along the edges, parents are in inverse time order
for idx in (1, 0): # look at parent (1) then child (0)
if e[idx] not in node_map:
node_map[e[idx]] = next(real_node)
e[idx] = node_map[e[idx]]
assert len(node_map) == len(new_nodes_by_time_desc) + len(child_ids) + 1
return edges | def resolve_polytomy(parent_node_id, child_ids, new_nodes_by_time_desc):
"""
For a polytomy and list of child node ids, return a list of (child, parent)
tuples, describing a bifurcating tree, rooted at parent_node_id, where the
new_nodes_by_time_desc have been used to break polytomies. All possible
topologies should be equiprobable.
"""
assert len(child_ids) == len(new_nodes_by_time_desc) + 2
# Polytomies broken by sequentially splicing onto edges, so an initial edge
# is required. This will always remain above the top node & is removed later
edges = [
[child_ids[0], None],
]
# We know beforehand how many random ints are needed: generate them all now
edge_choice = rng.integers(0, np.arange(1, len(child_ids) * 2 - 1, 2))
tmp_new_node_lab = [parent_node_id] + new_nodes_by_time_desc
assert len(edge_choice) == len(child_ids) - 1
for node_lab, child_id, target_edge_id in zip(
tmp_new_node_lab, child_ids[1:], edge_choice
):
target_edge = edges[target_edge_id]
# Insert in the right place, to keep edges in parent time order
edges.insert(target_edge_id, [child_id, node_lab])
edges.insert(target_edge_id, [target_edge[0], node_lab])
target_edge[0] = node_lab
top_edge = edges.pop() # remove the edge above the top node
assert top_edge[1] is None
# Re-map the internal nodes IDs so they are used in time order
real_node = iter(new_nodes_by_time_desc)
node_map = {c: c for c in child_ids}
node_map[edges[-1][1]] = parent_node_id # last edge == oldest parent
for e in reversed(edges):
# Reversing along the edges, parents are in inverse time order
for idx in (1, 0): # look at parent (1) then child (0)
if e[idx] not in node_map:
node_map[e[idx]] = next(real_node)
e[idx] = node_map[e[idx]]
assert len(node_map) == len(new_nodes_by_time_desc) + len(child_ids) + 1
return edges |
Python | def branches_pnorm(bl1, bl2, p):
"""
Robinson-Foulds family of L^p distances.
"""
clades1 = bl1.keys()
clades2 = bl2.keys()
v = np.fromiter(
itertools.chain(
(bl1[cl] for cl in clades1 - clades2),
(bl2[cl] for cl in clades2 - clades1),
(bl1[cl] - bl2[cl] for cl in clades1 & clades2),
),
dtype=float,
)
return np.linalg.norm(v, ord=p) | def branches_pnorm(bl1, bl2, p):
"""
Robinson-Foulds family of L^p distances.
"""
clades1 = bl1.keys()
clades2 = bl2.keys()
v = np.fromiter(
itertools.chain(
(bl1[cl] for cl in clades1 - clades2),
(bl2[cl] for cl in clades2 - clades1),
(bl1[cl] - bl2[cl] for cl in clades1 & clades2),
),
dtype=float,
)
return np.linalg.norm(v, ord=p) |
Python | def branches_l2(bl1, bl2):
"""
L2 (Euclidean) distance.
This is the shortest path between two points in an embedding of
BHV treespace into Euclidean space. Note that this path is
frequently outside BHV treespace.
Amenta et al. (2007) lower bound on BHV treespace geodesic.
http://doi.org/10.1016/j.ipl.2007.02.008
See also the "Branch Score" or BLD (Branch Length Distance),
Kuhner & Felsenstein (1994).
http://doi.org/10.1093/oxfordjournals.molbev.a040126
"""
return branches_pnorm(bl1, bl2, 2) | def branches_l2(bl1, bl2):
"""
L2 (Euclidean) distance.
This is the shortest path between two points in an embedding of
BHV treespace into Euclidean space. Note that this path is
frequently outside BHV treespace.
Amenta et al. (2007) lower bound on BHV treespace geodesic.
http://doi.org/10.1016/j.ipl.2007.02.008
See also the "Branch Score" or BLD (Branch Length Distance),
Kuhner & Felsenstein (1994).
http://doi.org/10.1093/oxfordjournals.molbev.a040126
"""
return branches_pnorm(bl1, bl2, 2) |
Python | def branches_BHVub(bl1, bl2):
"""
Distance through a strict consensus tree.
Amenta et al. (2007) upper bound on BHV treespace geodesic.
http://doi.org/10.1016/j.ipl.2007.02.008
"""
clades1 = bl1.keys()
clades2 = bl2.keys()
u1 = np.fromiter((bl1[cl] for cl in clades1 - clades2), dtype=float)
u2 = np.fromiter((bl2[cl] for cl in clades2 - clades1), dtype=float)
u3 = np.fromiter((bl1[cl] - bl2[cl] for cl in clades1 & clades2), dtype=float)
v = [(np.linalg.norm(u1) + np.linalg.norm(u2)), np.linalg.norm(u3)]
return np.linalg.norm(v) | def branches_BHVub(bl1, bl2):
"""
Distance through a strict consensus tree.
Amenta et al. (2007) upper bound on BHV treespace geodesic.
http://doi.org/10.1016/j.ipl.2007.02.008
"""
clades1 = bl1.keys()
clades2 = bl2.keys()
u1 = np.fromiter((bl1[cl] for cl in clades1 - clades2), dtype=float)
u2 = np.fromiter((bl2[cl] for cl in clades2 - clades1), dtype=float)
u3 = np.fromiter((bl1[cl] - bl2[cl] for cl in clades1 & clades2), dtype=float)
v = [(np.linalg.norm(u1) + np.linalg.norm(u2)), np.linalg.norm(u3)]
return np.linalg.norm(v) |
Python | def rf_distance(ts1, ts2, dist_func):
"""
Robinson-Foulds family of distances, calculated between two tree sequences.
"""
assert ts1.num_samples == ts1.num_samples
assert ts1.sequence_length == ts2.sequence_length
ed1 = ts1.edge_diffs()
ed2 = ts2.edge_diffs()
try:
(interval1, edges_out1, edges_in1) = next(ed1)
(interval2, edges_out2, edges_in2) = next(ed2)
except StopIteration:
return 0
def branches(tree, node_enc):
b = dict()
for u, enc in enumerate(node_enc):
branch_length = tree.branch_length(u)
if branch_length:
b[enc.tobytes()] = branch_length
return b
trees1 = ts1.trees()
trees2 = ts2.trees()
nodes1 = iter(np.packbits(node_encodings(ts1), axis=-1))
nodes2 = iter(np.packbits(node_encodings(ts2), axis=-1))
branches1 = branches(next(trees1), next(nodes1))
branches2 = branches(next(trees2), next(nodes2))
def overlap(it1, it2):
if it1[1] < it2[0] or it2[1] < it1[0]:
return -1
lo = max(it1[0], it2[0])
hi = min(it1[1], it2[1])
return hi - lo
d = 0
while True:
span = overlap(interval1, interval2)
assert span > 0
d += span * dist_func(branches1, branches2)
x = interval2[1] - interval1[1]
if x >= 0:
# advance through ts1
try:
(interval1, edges_out1, edges_in1) = next(ed1)
except StopIteration:
break
# TODO update dict based on edges in/out
branches1 = branches(next(trees1), next(nodes1))
if x <= 0:
# advance through ts2
try:
(interval2, edges_out2, edges_in2) = next(ed2)
except StopIteration:
break
# TODO update dict based on edges in/out
branches2 = branches(next(trees2), next(nodes2))
d /= ts1.sequence_length
return d | def rf_distance(ts1, ts2, dist_func):
"""
Robinson-Foulds family of distances, calculated between two tree sequences.
"""
assert ts1.num_samples == ts1.num_samples
assert ts1.sequence_length == ts2.sequence_length
ed1 = ts1.edge_diffs()
ed2 = ts2.edge_diffs()
try:
(interval1, edges_out1, edges_in1) = next(ed1)
(interval2, edges_out2, edges_in2) = next(ed2)
except StopIteration:
return 0
def branches(tree, node_enc):
b = dict()
for u, enc in enumerate(node_enc):
branch_length = tree.branch_length(u)
if branch_length:
b[enc.tobytes()] = branch_length
return b
trees1 = ts1.trees()
trees2 = ts2.trees()
nodes1 = iter(np.packbits(node_encodings(ts1), axis=-1))
nodes2 = iter(np.packbits(node_encodings(ts2), axis=-1))
branches1 = branches(next(trees1), next(nodes1))
branches2 = branches(next(trees2), next(nodes2))
def overlap(it1, it2):
if it1[1] < it2[0] or it2[1] < it1[0]:
return -1
lo = max(it1[0], it2[0])
hi = min(it1[1], it2[1])
return hi - lo
d = 0
while True:
span = overlap(interval1, interval2)
assert span > 0
d += span * dist_func(branches1, branches2)
x = interval2[1] - interval1[1]
if x >= 0:
# advance through ts1
try:
(interval1, edges_out1, edges_in1) = next(ed1)
except StopIteration:
break
# TODO update dict based on edges in/out
branches1 = branches(next(trees1), next(nodes1))
if x <= 0:
# advance through ts2
try:
(interval2, edges_out2, edges_in2) = next(ed2)
except StopIteration:
break
# TODO update dict based on edges in/out
branches2 = branches(next(trees2), next(nodes2))
d /= ts1.sequence_length
return d |
Python | def run(params):
"""
Run a single inference, with the specified rates
"""
rho = params.rec_rate[1:]
base_rec_prob = np.quantile(rho, 0.5)
ma_mis_rate = ms_mis_rate = 1.0
if params.precision is None:
# Smallest recombination rate
min_rho = int(np.ceil(-np.min(np.log10(rho))))
# Smallest mean
av_min = int(np.ceil(-np.log10(
min(1, ma_mis_rate, ms_mis_rate) * base_rec_prob)))
precision = max(min_rho, av_min) + 3
else:
precision = params.precision
ma_mis = base_rec_prob * ma_mis_rate
ms_mis = base_rec_prob * ms_mis_rate
print(
f"Starting {params.cutoff_power}, trim_oldest={params.trim_oldest}",
f"with base rho {base_rec_prob:.5g}",
f"(mean {np.mean(rho):.4g} median {np.quantile(rho, 0.5):.4g}",
f"min {np.min(rho):.4g}, 2.5% quantile {np.quantile(rho, 0.025):.4g})",
f"precision {precision}")
prefix = None
if params.sample_data.path is not None:
assert params.sample_data.path.endswith(".samples")
prefix = params.sample_data.path[0:-len(".samples")]
inf_prefix = "{}_rma{}_rms{}_N{}_{}_p{}".format(
prefix,
ma_mis_rate,
ms_mis_rate,
params.cutoff_power,
"trim" if params.trim_oldest else "norm",
precision)
start_time = time.process_time()
anc = tsinfer.generate_ancestors(
params.sample_data,
cutoff_power=params.cutoff_power,
trim_oldest=params.trim_oldest,
num_threads=params.num_threads,
path=None if inf_prefix is None else inf_prefix + ".ancestors",
)
print(f"GA done (rel_ma_mis:{ma_mis_rate}, rel_ms_mis:{ms_mis_rate})")
inferred_anc_ts = tsinfer.match_ancestors(
params.sample_data,
anc,
num_threads=params.num_threads,
precision=precision,
recombination_rate=params.rec_rate,
mismatch_rate=ma_mis,
)
inferred_anc_ts.dump(path=inf_prefix + ".atrees")
print(f"MA done: abs_ma_mis rate = {ma_mis}")
inferred_ts = tsinfer.match_samples(
params.sample_data,
inferred_anc_ts,
num_threads=params.num_threads,
precision=precision,
recombination_rate=params.rec_rate,
mismatch_rate=ms_mis)
process_time = time.process_time() - start_time
ts_path = inf_prefix + ".trees"
inferred_ts.dump(path=ts_path)
print(f"MS done: abs_ms_mis rate = {ms_mis}")
simplified_inferred_ts = inferred_ts.simplify() # Remove unary nodes
# Calculate mean num children (polytomy-measure) for internal nodes
nc_sum = 0
nc_sum_sq = 0
nc_tot = 0
root_lengths = collections.defaultdict(float)
for tree in simplified_inferred_ts.trees():
for n in tree.nodes():
n_children = tree.num_children(n)
if n_children > 0: # exclude leaves/samples
nc_sum += n_children * tree.span
nc_sum_sq += (n_children ** 2) * tree.span
nc_tot += tree.span
nc_mean = nc_sum/nc_tot
nc_var = nc_sum_sq / nc_tot - (nc_mean ** 2) # can't be bothered to adjust for n
# Calculate span of root nodes in simplified tree
# Calculate KC
try:
kc = simplified_inferred_ts.kc_distance(tskit.load(prefix+".trees"))
except FileNotFoundError:
kc = None
return Results(
abs_ma_mis=ma_mis,
abs_ms_mis=ms_mis,
rel_ma_mis=ma_mis_rate,
rel_ms_mis=ms_mis_rate,
cutoff_power=params.cutoff_power,
trim_oldest=params.trim_oldest,
precision=precision,
edges=inferred_ts.num_edges,
muts=inferred_ts.num_mutations,
num_trees=inferred_ts.num_trees,
kc=kc,
mean_node_children=nc_mean,
var_node_children=nc_var,
process_time=process_time,
ts_size=os.path.getsize(ts_path),
ts_path=ts_path) | def run(params):
"""
Run a single inference, with the specified rates
"""
rho = params.rec_rate[1:]
base_rec_prob = np.quantile(rho, 0.5)
ma_mis_rate = ms_mis_rate = 1.0
if params.precision is None:
# Smallest recombination rate
min_rho = int(np.ceil(-np.min(np.log10(rho))))
# Smallest mean
av_min = int(np.ceil(-np.log10(
min(1, ma_mis_rate, ms_mis_rate) * base_rec_prob)))
precision = max(min_rho, av_min) + 3
else:
precision = params.precision
ma_mis = base_rec_prob * ma_mis_rate
ms_mis = base_rec_prob * ms_mis_rate
print(
f"Starting {params.cutoff_power}, trim_oldest={params.trim_oldest}",
f"with base rho {base_rec_prob:.5g}",
f"(mean {np.mean(rho):.4g} median {np.quantile(rho, 0.5):.4g}",
f"min {np.min(rho):.4g}, 2.5% quantile {np.quantile(rho, 0.025):.4g})",
f"precision {precision}")
prefix = None
if params.sample_data.path is not None:
assert params.sample_data.path.endswith(".samples")
prefix = params.sample_data.path[0:-len(".samples")]
inf_prefix = "{}_rma{}_rms{}_N{}_{}_p{}".format(
prefix,
ma_mis_rate,
ms_mis_rate,
params.cutoff_power,
"trim" if params.trim_oldest else "norm",
precision)
start_time = time.process_time()
anc = tsinfer.generate_ancestors(
params.sample_data,
cutoff_power=params.cutoff_power,
trim_oldest=params.trim_oldest,
num_threads=params.num_threads,
path=None if inf_prefix is None else inf_prefix + ".ancestors",
)
print(f"GA done (rel_ma_mis:{ma_mis_rate}, rel_ms_mis:{ms_mis_rate})")
inferred_anc_ts = tsinfer.match_ancestors(
params.sample_data,
anc,
num_threads=params.num_threads,
precision=precision,
recombination_rate=params.rec_rate,
mismatch_rate=ma_mis,
)
inferred_anc_ts.dump(path=inf_prefix + ".atrees")
print(f"MA done: abs_ma_mis rate = {ma_mis}")
inferred_ts = tsinfer.match_samples(
params.sample_data,
inferred_anc_ts,
num_threads=params.num_threads,
precision=precision,
recombination_rate=params.rec_rate,
mismatch_rate=ms_mis)
process_time = time.process_time() - start_time
ts_path = inf_prefix + ".trees"
inferred_ts.dump(path=ts_path)
print(f"MS done: abs_ms_mis rate = {ms_mis}")
simplified_inferred_ts = inferred_ts.simplify() # Remove unary nodes
# Calculate mean num children (polytomy-measure) for internal nodes
nc_sum = 0
nc_sum_sq = 0
nc_tot = 0
root_lengths = collections.defaultdict(float)
for tree in simplified_inferred_ts.trees():
for n in tree.nodes():
n_children = tree.num_children(n)
if n_children > 0: # exclude leaves/samples
nc_sum += n_children * tree.span
nc_sum_sq += (n_children ** 2) * tree.span
nc_tot += tree.span
nc_mean = nc_sum/nc_tot
nc_var = nc_sum_sq / nc_tot - (nc_mean ** 2) # can't be bothered to adjust for n
# Calculate span of root nodes in simplified tree
# Calculate KC
try:
kc = simplified_inferred_ts.kc_distance(tskit.load(prefix+".trees"))
except FileNotFoundError:
kc = None
return Results(
abs_ma_mis=ma_mis,
abs_ms_mis=ms_mis,
rel_ma_mis=ma_mis_rate,
rel_ms_mis=ms_mis_rate,
cutoff_power=params.cutoff_power,
trim_oldest=params.trim_oldest,
precision=precision,
edges=inferred_ts.num_edges,
muts=inferred_ts.num_mutations,
num_trees=inferred_ts.num_trees,
kc=kc,
mean_node_children=nc_mean,
var_node_children=nc_var,
process_time=process_time,
ts_size=os.path.getsize(ts_path),
ts_path=ts_path) |
Python | def run_replicate(rep, args):
"""
The main function that runs a parameter set
"""
seed = rep+args.random_seed
if len(args.precision) == 0:
precision = [None]
else:
precision = args.precision
nt = 0 if args.num_threads is None else args.num_threads
if args.sample_file is None:
# Simulate
sim = simulate_human(seed)
samples, rho, prefix, ts = setup_simulation(
*sim,
random_seed=seed,
cheat_recombination=args.cheat_breakpoints,
err=args.error)
else:
samples, rho, prefix, ts = setup_sample_file(args)
if ts is not None:
ts.dump(prefix + ".trees")
# Set up the range of params for multiprocessing
cutoff_powers = np.array([np.inf, 6, 5, 4, 3, 2])
trim_oldest = np.array([True, False])
param_iter = (
Params(samples, rho, c, t, p, nt)
for c in cutoff_powers for t in trim_oldest for p in precision)
with open(prefix + ".results", "wt") as file:
print("\t".join(Results._fields), file=file, flush=True)
with multiprocessing.Pool(40) as pool:
for result in pool.imap_unordered(run, param_iter):
# Save to a results file.
# NB this can be pasted into R and plotted using
# d <- read.table(stdin(), header=T)
# d$rel_ma <- factor(d$ma_mis / d$ms_mis)
# ggplot() + geom_line(data = d, aes(x = ms_mis, y = edges+muts, color = rel_ma)) + scale_x_continuous(trans='log10')
print("\t".join(str(r) for r in result), file=file, flush=True) | def run_replicate(rep, args):
"""
The main function that runs a parameter set
"""
seed = rep+args.random_seed
if len(args.precision) == 0:
precision = [None]
else:
precision = args.precision
nt = 0 if args.num_threads is None else args.num_threads
if args.sample_file is None:
# Simulate
sim = simulate_human(seed)
samples, rho, prefix, ts = setup_simulation(
*sim,
random_seed=seed,
cheat_recombination=args.cheat_breakpoints,
err=args.error)
else:
samples, rho, prefix, ts = setup_sample_file(args)
if ts is not None:
ts.dump(prefix + ".trees")
# Set up the range of params for multiprocessing
cutoff_powers = np.array([np.inf, 6, 5, 4, 3, 2])
trim_oldest = np.array([True, False])
param_iter = (
Params(samples, rho, c, t, p, nt)
for c in cutoff_powers for t in trim_oldest for p in precision)
with open(prefix + ".results", "wt") as file:
print("\t".join(Results._fields), file=file, flush=True)
with multiprocessing.Pool(40) as pool:
for result in pool.imap_unordered(run, param_iter):
# Save to a results file.
# NB this can be pasted into R and plotted using
# d <- read.table(stdin(), header=T)
# d$rel_ma <- factor(d$ma_mis / d$ms_mis)
# ggplot() + geom_line(data = d, aes(x = ms_mis, y = edges+muts, color = rel_ma)) + scale_x_continuous(trans='log10')
print("\t".join(str(r) for r in result), file=file, flush=True) |
Python | def mean_rate(self):
"""
Return the weighted mean across all windows of the entire map.
"""
window_sizes = self.position[1:] - self.position[:-1]
weights = window_sizes / self.sequence_length
if self.map_start != 0:
weights[0] = 0
return np.average(self.rate, weights=weights) | def mean_rate(self):
"""
Return the weighted mean across all windows of the entire map.
"""
window_sizes = self.position[1:] - self.position[:-1]
weights = window_sizes / self.sequence_length
if self.map_start != 0:
weights[0] = 0
return np.average(self.rate, weights=weights) |
Python | def slice(self, start=None, end=None, trim=False): # noqa: A003
"""
Returns a subset of this rate map between the specified end
points. If start is None, it defaults to 0. If end is None, it defaults
to the end of the map. If trim is True, remove the flanking
zero rate regions such that the sequence length of the
new rate map is end - start.
"""
if start is None:
i = 0
start = 0
if end is None:
end = self.position[-1]
j = len(self.position)
if (
start < 0
or end < 0
or start > self.position[-1]
or end > self.position[-1]
or start > end
):
raise IndexError(f"Invalid subset: start={start}, end={end}")
if start != 0:
i = np.searchsorted(self.position, start, side="left")
if start < self.position[i]:
i -= 1
if end != self.position[-1]:
j = i + np.searchsorted(self.position[i:], end, side="right")
if end > self.position[j - 1]:
j += 1
position = self.position[i:j].copy()
rate = self.rate[i : j - 1].copy()
position[0] = start
position[-1] = end
map_start = 0
if trim:
position -= start
else:
# Prepend or extend zero-rate region at start of map.
if position[0] != 0:
map_start = position[0] # TODO: is this what we want here?
if rate[0] == 0:
position[0] = 0
else:
position = np.insert(position, 0, 0)
rate = np.insert(rate, 0, 0)
# Append or extend zero-rate region at end of map.
if position[-1] != self.position[-1]:
if rate[-1] == 0:
position[-1] = self.position[-1]
else:
position = np.append(position, self.position[-1])
rate = np.append(rate, 0)
return self.__class__(position, rate, map_start=map_start) | def slice(self, start=None, end=None, trim=False): # noqa: A003
"""
Returns a subset of this rate map between the specified end
points. If start is None, it defaults to 0. If end is None, it defaults
to the end of the map. If trim is True, remove the flanking
zero rate regions such that the sequence length of the
new rate map is end - start.
"""
if start is None:
i = 0
start = 0
if end is None:
end = self.position[-1]
j = len(self.position)
if (
start < 0
or end < 0
or start > self.position[-1]
or end > self.position[-1]
or start > end
):
raise IndexError(f"Invalid subset: start={start}, end={end}")
if start != 0:
i = np.searchsorted(self.position, start, side="left")
if start < self.position[i]:
i -= 1
if end != self.position[-1]:
j = i + np.searchsorted(self.position[i:], end, side="right")
if end > self.position[j - 1]:
j += 1
position = self.position[i:j].copy()
rate = self.rate[i : j - 1].copy()
position[0] = start
position[-1] = end
map_start = 0
if trim:
position -= start
else:
# Prepend or extend zero-rate region at start of map.
if position[0] != 0:
map_start = position[0] # TODO: is this what we want here?
if rate[0] == 0:
position[0] = 0
else:
position = np.insert(position, 0, 0)
rate = np.insert(rate, 0, 0)
# Append or extend zero-rate region at end of map.
if position[-1] != self.position[-1]:
if rate[-1] == 0:
position[-1] = self.position[-1]
else:
position = np.append(position, self.position[-1])
rate = np.append(rate, 0)
return self.__class__(position, rate, map_start=map_start) |
Python | def uniform_map(cls, length, rate, num_loci=None):
"""
Returns a :class:`.RecombinationMap` instance in which the recombination
rate is constant over a chromosome of the specified length.
The legacy ``num_loci`` option is no longer supported and should not be used.
:param float length: The length of the chromosome.
:param float rate: The rate of recombination per unit of sequence length
along this chromosome.
:param int num_loci: This parameter is no longer supported.
"""
return cls([0, length], [rate, 0], num_loci=num_loci) | def uniform_map(cls, length, rate, num_loci=None):
"""
Returns a :class:`.RecombinationMap` instance in which the recombination
rate is constant over a chromosome of the specified length.
The legacy ``num_loci`` option is no longer supported and should not be used.
:param float length: The length of the chromosome.
:param float rate: The rate of recombination per unit of sequence length
along this chromosome.
:param int num_loci: This parameter is no longer supported.
"""
return cls([0, length], [rate, 0], num_loci=num_loci) |
Python | def mean_recombination_rate(self):
"""
Return the weighted mean recombination rate
across all windows of the entire recombination map.
"""
return self.map.mean_rate | def mean_recombination_rate(self):
"""
Return the weighted mean recombination rate
across all windows of the entire recombination map.
"""
return self.map.mean_rate |
Python | def read_hapmap(filename):
# Black barfs with an INTERNAL_ERROR trying to reformat this docstring,
# so we explicitly disable reformatting here.
# fmt: off
"""
Parses the specified file in HapMap format. These files must be
white-space-delimited, and contain a single header line (which is
ignored), and then each subsequent line contains the starting position
and recombination rate for the segment from that position (inclusive)
to the starting position on the next line (exclusive). Starting
positions of each segment are given in units of bases, and
recombination rates in centimorgans/Megabase. The first column in this
file is ignored, as are additional columns after the third (Position is
assumed to be the second column, and Rate is assumed to be the third).
If the first starting position is not equal to zero, then a
zero-recombination region is inserted at the start of the chromosome.
A sample of this format is as follows::
Chromosome Position(bp) Rate(cM/Mb) Map(cM)
chr1 55550 2.981822 0.000000
chr1 82571 2.082414 0.080572
chr1 88169 2.081358 0.092229
chr1 254996 3.354927 0.439456
chr1 564598 2.887498 1.478148
...
chr1 182973428 2.512769 122.832331
chr1 183630013 0.000000 124.482178
:param str filename: The name of the file to be parsed. This may be
in plain text or gzipped plain text.
:return: A RateMap object.
"""
# fmt: on
hapmap = np.loadtxt(filename, skiprows=1, usecols=(1, 2))
position = hapmap[:, 0]
# Rate is expressed in centimorgans per megabase, which
# we convert to per-base rates
rate = 1e-8 * hapmap[:, 1]
map_start = position[0]
if map_start != 0:
position = np.insert(position, 0, 0)
rate = np.insert(rate, 0, 0)
if rate[-1] != 0:
raise ValueError("The last rate provided in the recombination map must be zero")
return RateMap(position, rate[:-1], map_start=map_start) | def read_hapmap(filename):
# Black barfs with an INTERNAL_ERROR trying to reformat this docstring,
# so we explicitly disable reformatting here.
# fmt: off
"""
Parses the specified file in HapMap format. These files must be
white-space-delimited, and contain a single header line (which is
ignored), and then each subsequent line contains the starting position
and recombination rate for the segment from that position (inclusive)
to the starting position on the next line (exclusive). Starting
positions of each segment are given in units of bases, and
recombination rates in centimorgans/Megabase. The first column in this
file is ignored, as are additional columns after the third (Position is
assumed to be the second column, and Rate is assumed to be the third).
If the first starting position is not equal to zero, then a
zero-recombination region is inserted at the start of the chromosome.
A sample of this format is as follows::
Chromosome Position(bp) Rate(cM/Mb) Map(cM)
chr1 55550 2.981822 0.000000
chr1 82571 2.082414 0.080572
chr1 88169 2.081358 0.092229
chr1 254996 3.354927 0.439456
chr1 564598 2.887498 1.478148
...
chr1 182973428 2.512769 122.832331
chr1 183630013 0.000000 124.482178
:param str filename: The name of the file to be parsed. This may be
in plain text or gzipped plain text.
:return: A RateMap object.
"""
# fmt: on
hapmap = np.loadtxt(filename, skiprows=1, usecols=(1, 2))
position = hapmap[:, 0]
# Rate is expressed in centimorgans per megabase, which
# we convert to per-base rates
rate = 1e-8 * hapmap[:, 1]
map_start = position[0]
if map_start != 0:
position = np.insert(position, 0, 0)
rate = np.insert(rate, 0, 0)
if rate[-1] != 0:
raise ValueError("The last rate provided in the recombination map must be zero")
return RateMap(position, rate[:-1], map_start=map_start) |
Python | def run(params):
"""
Run a single inference, with the specified rates
"""
rho = params.rec_rate
av_rho = np.quantile(rho, 0.5)
ma_mis = av_rho * params.ma_mis_rate
ms_mis = av_rho * params.ms_mis_rate
if params.precision is None:
# Smallest nonzero recombination rate
min_rho = int(np.ceil(-np.min(np.log10(rho[rho > 0]))))
# Smallest mean
av_min = int(np.ceil(-np.log10(min(ma_mis, ms_mis))))
precision = max(min_rho, av_min) + 3
else:
precision = params.precision
print(
f"Starting {params.ma_mis_rate} {params.ms_mis_rate}",
f"with av rho {av_rho:.5g}",
f"(mean {np.mean(rho):.4g}, median {np.quantile(rho, 0.5):.4g}, ",
f"nonzero min {np.min(rho[rho > 0]):.4g}, ",
f"2.5% quantile {np.quantile(rho, 0.025):.4g}) precision {precision}")
prefix = None
if params.sample_data.path is not None:
assert params.sample_data.path.endswith(".samples")
prefix = params.sample_data.path[0:-len(".samples")]
inf_prefix = "{}_ma{}_ms{}_N{}_p{}".format(
prefix,
params.ma_mis_rate,
params.ms_mis_rate,
params.cutoff_exponent,
precision)
start_time = time.process_time()
extra_params = dict(num_threads=params.num_threads)
if params.cutoff_exponent is not None:
extra_params['cutoff_power'] = params.cutoff_exponent
anc = tsinfer.generate_ancestors(
params.sample_data,
path=None if inf_prefix is None else inf_prefix + ".ancestors",
progress_monitor=tsinfer.cli.ProgressMonitor(1, 1, 0, 0, 0),
**extra_params,
)
print(f"GA done (cutoff exponent: {params.cutoff_exponent}")
extra_params = dict(
num_threads=params.num_threads,
recombination_rate=rho,
precision=precision,
)
inferred_anc_ts = tsinfer.match_ancestors(
params.sample_data,
anc,
mismatch_rate=ma_mis,
progress_monitor=tsinfer.cli.ProgressMonitor(1, 0, 1, 0, 0),
**extra_params,
)
inferred_anc_ts.dump(path=inf_prefix + ".atrees")
print(f"MA done (ma_mis:{ma_mis}")
inferred_ts = tsinfer.match_samples(
params.sample_data,
inferred_anc_ts,
mismatch_rate=ms_mis,
progress_monitor=tsinfer.cli.ProgressMonitor(1, 0, 0, 0, 1),
**extra_params,
)
process_time = time.process_time() - start_time
ts_path = inf_prefix + ".trees"
inferred_ts.dump(path=ts_path)
print(f"MS done: ms_mis rate = {ms_mis})")
simplified_inferred_ts = inferred_ts.simplify() # Remove unary nodes
# Calculate mean num children (polytomy-measure) for internal nodes
nc_sum = 0
nc_sum_sq = 0
nc_tot = 0
root_lengths = collections.defaultdict(float)
for tree in simplified_inferred_ts.trees():
for n in tree.nodes():
n_children = tree.num_children(n)
if n_children > 0: # exclude leaves/samples
nc_sum += n_children * tree.span
nc_sum_sq += (n_children ** 2) * tree.span
nc_tot += tree.span
nc_mean = nc_sum/nc_tot
nc_var = nc_sum_sq / nc_tot - (nc_mean ** 2) # can't be bothered to adjust for n
# Calculate span of root nodes in simplified tree
# Calculate KC
try:
kc = simplified_inferred_ts.kc_distance(tskit.load(prefix+".trees"))
except FileNotFoundError:
kc = None
return Results(
abs_ma_mis=ma_mis,
abs_ms_mis=ms_mis,
rel_ma_mis=params.ma_mis_rate,
rel_ms_mis=params.ms_mis_rate,
precision=precision,
edges=inferred_ts.num_edges,
muts=inferred_ts.num_mutations,
num_trees=inferred_ts.num_trees,
kc=kc,
cutoff_exponent=params.cutoff_exponent,
mean_node_children=nc_mean,
var_node_children=nc_var,
process_time=process_time,
ts_size=os.path.getsize(ts_path),
ts_path=ts_path) | def run(params):
"""
Run a single inference, with the specified rates
"""
rho = params.rec_rate
av_rho = np.quantile(rho, 0.5)
ma_mis = av_rho * params.ma_mis_rate
ms_mis = av_rho * params.ms_mis_rate
if params.precision is None:
# Smallest nonzero recombination rate
min_rho = int(np.ceil(-np.min(np.log10(rho[rho > 0]))))
# Smallest mean
av_min = int(np.ceil(-np.log10(min(ma_mis, ms_mis))))
precision = max(min_rho, av_min) + 3
else:
precision = params.precision
print(
f"Starting {params.ma_mis_rate} {params.ms_mis_rate}",
f"with av rho {av_rho:.5g}",
f"(mean {np.mean(rho):.4g}, median {np.quantile(rho, 0.5):.4g}, ",
f"nonzero min {np.min(rho[rho > 0]):.4g}, ",
f"2.5% quantile {np.quantile(rho, 0.025):.4g}) precision {precision}")
prefix = None
if params.sample_data.path is not None:
assert params.sample_data.path.endswith(".samples")
prefix = params.sample_data.path[0:-len(".samples")]
inf_prefix = "{}_ma{}_ms{}_N{}_p{}".format(
prefix,
params.ma_mis_rate,
params.ms_mis_rate,
params.cutoff_exponent,
precision)
start_time = time.process_time()
extra_params = dict(num_threads=params.num_threads)
if params.cutoff_exponent is not None:
extra_params['cutoff_power'] = params.cutoff_exponent
anc = tsinfer.generate_ancestors(
params.sample_data,
path=None if inf_prefix is None else inf_prefix + ".ancestors",
progress_monitor=tsinfer.cli.ProgressMonitor(1, 1, 0, 0, 0),
**extra_params,
)
print(f"GA done (cutoff exponent: {params.cutoff_exponent}")
extra_params = dict(
num_threads=params.num_threads,
recombination_rate=rho,
precision=precision,
)
inferred_anc_ts = tsinfer.match_ancestors(
params.sample_data,
anc,
mismatch_rate=ma_mis,
progress_monitor=tsinfer.cli.ProgressMonitor(1, 0, 1, 0, 0),
**extra_params,
)
inferred_anc_ts.dump(path=inf_prefix + ".atrees")
print(f"MA done (ma_mis:{ma_mis}")
inferred_ts = tsinfer.match_samples(
params.sample_data,
inferred_anc_ts,
mismatch_rate=ms_mis,
progress_monitor=tsinfer.cli.ProgressMonitor(1, 0, 0, 0, 1),
**extra_params,
)
process_time = time.process_time() - start_time
ts_path = inf_prefix + ".trees"
inferred_ts.dump(path=ts_path)
print(f"MS done: ms_mis rate = {ms_mis})")
simplified_inferred_ts = inferred_ts.simplify() # Remove unary nodes
# Calculate mean num children (polytomy-measure) for internal nodes
nc_sum = 0
nc_sum_sq = 0
nc_tot = 0
root_lengths = collections.defaultdict(float)
for tree in simplified_inferred_ts.trees():
for n in tree.nodes():
n_children = tree.num_children(n)
if n_children > 0: # exclude leaves/samples
nc_sum += n_children * tree.span
nc_sum_sq += (n_children ** 2) * tree.span
nc_tot += tree.span
nc_mean = nc_sum/nc_tot
nc_var = nc_sum_sq / nc_tot - (nc_mean ** 2) # can't be bothered to adjust for n
# Calculate span of root nodes in simplified tree
# Calculate KC
try:
kc = simplified_inferred_ts.kc_distance(tskit.load(prefix+".trees"))
except FileNotFoundError:
kc = None
return Results(
abs_ma_mis=ma_mis,
abs_ms_mis=ms_mis,
rel_ma_mis=params.ma_mis_rate,
rel_ms_mis=params.ms_mis_rate,
precision=precision,
edges=inferred_ts.num_edges,
muts=inferred_ts.num_mutations,
num_trees=inferred_ts.num_trees,
kc=kc,
cutoff_exponent=params.cutoff_exponent,
mean_node_children=nc_mean,
var_node_children=nc_var,
process_time=process_time,
ts_size=os.path.getsize(ts_path),
ts_path=ts_path) |
Python | def validate(self):
"""Validates the configuration by verifying the mandatory fields are
present and in the correct format. If the validation fails, a
ConfigurationValidationError is raised. Otherwise nothing will happen.
"""
configuration_errors = []
for key in configuration_mandatory_fields:
if key not in self.endpoint:
configuration_errors.append(key)
if "expectation" in self.endpoint:
if not isinstance(self.endpoint["expectation"], list) or (
isinstance(self.endpoint["expectation"], list) and len(self.endpoint["expectation"]) == 0
):
configuration_errors.append("endpoint.expectation")
for key, message in self.messages.items():
if not isinstance(message, str):
configuration_errors.append(f"message.{key}")
if len(configuration_errors) > 0:
raise ConfigurationValidationError(
"Endpoint [%s] failed validation. Missing keys: %s" % (self.endpoint, ", ".join(configuration_errors))
) | def validate(self):
"""Validates the configuration by verifying the mandatory fields are
present and in the correct format. If the validation fails, a
ConfigurationValidationError is raised. Otherwise nothing will happen.
"""
configuration_errors = []
for key in configuration_mandatory_fields:
if key not in self.endpoint:
configuration_errors.append(key)
if "expectation" in self.endpoint:
if not isinstance(self.endpoint["expectation"], list) or (
isinstance(self.endpoint["expectation"], list) and len(self.endpoint["expectation"]) == 0
):
configuration_errors.append("endpoint.expectation")
for key, message in self.messages.items():
if not isinstance(message, str):
configuration_errors.append(f"message.{key}")
if len(configuration_errors) > 0:
raise ConfigurationValidationError(
"Endpoint [%s] failed validation. Missing keys: %s" % (self.endpoint, ", ".join(configuration_errors))
) |
Python | def evaluate(self):
"""Sends the request to the URL set in the configuration and executes
each one of the expectations, one by one. The status will be updated
according to the expectation results.
"""
try:
if self.endpoint_header is None:
self.request = requests.request(self.endpoint_method, self.endpoint_url, timeout=self.endpoint_timeout)
else:
self.request = requests.request(
self.endpoint_method, self.endpoint_url, timeout=self.endpoint_timeout, headers=self.endpoint_header,
verify=not self.endpoint['insecure'] if 'insecure' in self.endpoint else True
)
self.current_timestamp = int(time.time())
except requests.ConnectionError:
self.message = "The URL is unreachable: %s %s" % (self.endpoint_method, self.endpoint_url)
self.logger.warning(self.message)
self.status = st.ComponentStatus.PARTIAL_OUTAGE
return
except requests.HTTPError:
self.message = "Unexpected HTTP response"
self.logger.exception(self.message)
self.status = st.ComponentStatus.PARTIAL_OUTAGE
return
except (requests.Timeout, requests.ConnectTimeout):
self.message = "Request timed out"
self.logger.warning(self.message)
self.status = st.ComponentStatus.PERFORMANCE_ISSUES
return
# We initially assume the API is healthy.
self.status = st.ComponentStatus.OPERATIONAL
self.message = ""
for expectation in self.expectations:
status: ComponentStatus = expectation.get_status(self.request)
# The greater the status is, the worse the state of the API is.
if status.value > self.status.value:
self.status = status
self.message = expectation.get_message(self.request)
self.logger.info(self.message) | def evaluate(self):
"""Sends the request to the URL set in the configuration and executes
each one of the expectations, one by one. The status will be updated
according to the expectation results.
"""
try:
if self.endpoint_header is None:
self.request = requests.request(self.endpoint_method, self.endpoint_url, timeout=self.endpoint_timeout)
else:
self.request = requests.request(
self.endpoint_method, self.endpoint_url, timeout=self.endpoint_timeout, headers=self.endpoint_header,
verify=not self.endpoint['insecure'] if 'insecure' in self.endpoint else True
)
self.current_timestamp = int(time.time())
except requests.ConnectionError:
self.message = "The URL is unreachable: %s %s" % (self.endpoint_method, self.endpoint_url)
self.logger.warning(self.message)
self.status = st.ComponentStatus.PARTIAL_OUTAGE
return
except requests.HTTPError:
self.message = "Unexpected HTTP response"
self.logger.exception(self.message)
self.status = st.ComponentStatus.PARTIAL_OUTAGE
return
except (requests.Timeout, requests.ConnectTimeout):
self.message = "Request timed out"
self.logger.warning(self.message)
self.status = st.ComponentStatus.PERFORMANCE_ISSUES
return
# We initially assume the API is healthy.
self.status = st.ComponentStatus.OPERATIONAL
self.message = ""
for expectation in self.expectations:
status: ComponentStatus = expectation.get_status(self.request)
# The greater the status is, the worse the state of the API is.
if status.value > self.status.value:
self.status = status
self.message = expectation.get_message(self.request)
self.logger.info(self.message) |
Python | def if_trigger_update(self):
"""
Checks if update should be triggered - trigger it for all operational states
and only for non-operational ones above the configured threshold (allowed_fails).
"""
if self.status != st.ComponentStatus.OPERATIONAL:
self.current_fails = self.current_fails + 1
self.logger.warning(f"Failure #{self.current_fails} with threshold set to {self.allowed_fails}")
if self.current_fails <= self.allowed_fails:
self.trigger_update = False
return
self.current_fails = 0
self.trigger_update = True | def if_trigger_update(self):
"""
Checks if update should be triggered - trigger it for all operational states
and only for non-operational ones above the configured threshold (allowed_fails).
"""
if self.status != st.ComponentStatus.OPERATIONAL:
self.current_fails = self.current_fails + 1
self.logger.warning(f"Failure #{self.current_fails} with threshold set to {self.allowed_fails}")
if self.current_fails <= self.allowed_fails:
self.trigger_update = False
return
self.current_fails = 0
self.trigger_update = True |
Python | def push_status(self):
"""Pushes the status of the component to the cachet server. It will update the component
status based on the previous call to evaluate().
"""
if self.previous_status == self.status:
# We don't want to keep spamming if there's no change in status.
self.logger.info(f"No changes to component status.")
self.trigger_update = False
return
self.previous_status = self.status
if not self.trigger_update:
return
api_component_status = self.client.get_component_status(self.component_id)
if self.status == api_component_status:
return
component_request = self.client.push_status(self.component_id, self.status)
if component_request.ok:
# Successful update
self.logger.info(f"Component update: status [{self.status}]")
else:
# Failed to update the API status
self.logger.warning(
f"Component update failed with HTTP status: {component_request.status_code}. API"
f" status: {self.status}"
) | def push_status(self):
"""Pushes the status of the component to the cachet server. It will update the component
status based on the previous call to evaluate().
"""
if self.previous_status == self.status:
# We don't want to keep spamming if there's no change in status.
self.logger.info(f"No changes to component status.")
self.trigger_update = False
return
self.previous_status = self.status
if not self.trigger_update:
return
api_component_status = self.client.get_component_status(self.component_id)
if self.status == api_component_status:
return
component_request = self.client.push_status(self.component_id, self.status)
if component_request.ok:
# Successful update
self.logger.info(f"Component update: status [{self.status}]")
else:
# Failed to update the API status
self.logger.warning(
f"Component update failed with HTTP status: {component_request.status_code}. API"
f" status: {self.status}"
) |
Python | def push_metrics(self):
"""Pushes the total amount of seconds the request took to get a response from the URL.
It only will send a request if the metric id was set in the configuration.
In case of failed connection trial pushes the default metric value.
"""
if self.metric_id and hasattr(self, "request"):
# We convert the elapsed time from the request, in seconds, to the configured unit.
metrics_request = self.client.push_metrics(
self.metric_id, self.latency_unit, self.request.elapsed.total_seconds(), self.current_timestamp
)
if metrics_request.ok:
# Successful metrics upload
self.logger.info("Metric uploaded: %.6f %s" % (self.request.elapsed.total_seconds(), self.latency_unit))
else:
self.logger.warning(f"Metric upload failed with status [{metrics_request.status_code}]") | def push_metrics(self):
"""Pushes the total amount of seconds the request took to get a response from the URL.
It only will send a request if the metric id was set in the configuration.
In case of failed connection trial pushes the default metric value.
"""
if self.metric_id and hasattr(self, "request"):
# We convert the elapsed time from the request, in seconds, to the configured unit.
metrics_request = self.client.push_metrics(
self.metric_id, self.latency_unit, self.request.elapsed.total_seconds(), self.current_timestamp
)
if metrics_request.ok:
# Successful metrics upload
self.logger.info("Metric uploaded: %.6f %s" % (self.request.elapsed.total_seconds(), self.latency_unit))
else:
self.logger.warning(f"Metric upload failed with status [{metrics_request.status_code}]") |
Python | def push_incident(self):
"""If the component status has changed, we create a new incident (if this is the first time it becomes unstable)
or updates the existing incident once it becomes healthy again.
"""
if not self.trigger_update:
return
if hasattr(self, "incident_id") and self.status == st.ComponentStatus.OPERATIONAL:
incident_request = self.client.push_incident(
self.status,
self.public_incidents,
self.component_id,
self.get_incident_title(),
previous_incident_id=self.incident_id,
)
if incident_request.ok:
# Successful metrics upload
self.logger.info(
f'Incident updated, API healthy again: component status [{self.status}], message: "{self.message}"'
)
del self.incident_id
else:
self.logger.warning(
f'Incident update failed with status [{incident_request.status_code}], message: "{self.message}"'
)
self.trigger_webhooks()
elif not hasattr(self, "incident_id") and self.status != st.ComponentStatus.OPERATIONAL:
incident_request = self.client.push_incident(
self.status, self.public_incidents, self.component_id, self.get_incident_title(), message=self.message
)
if incident_request.ok:
# Successful incident upload.
self.incident_id = incident_request.json()["data"]["id"]
self.logger.info(
f'Incident uploaded, API unhealthy: component status [{self.status}], message: "{self.message}"'
)
else:
self.logger.warning(
f'Incident upload failed with status [{incident_request.status_code}], message: "{self.message}"'
)
self.trigger_webhooks() | def push_incident(self):
"""If the component status has changed, we create a new incident (if this is the first time it becomes unstable)
or updates the existing incident once it becomes healthy again.
"""
if not self.trigger_update:
return
if hasattr(self, "incident_id") and self.status == st.ComponentStatus.OPERATIONAL:
incident_request = self.client.push_incident(
self.status,
self.public_incidents,
self.component_id,
self.get_incident_title(),
previous_incident_id=self.incident_id,
)
if incident_request.ok:
# Successful metrics upload
self.logger.info(
f'Incident updated, API healthy again: component status [{self.status}], message: "{self.message}"'
)
del self.incident_id
else:
self.logger.warning(
f'Incident update failed with status [{incident_request.status_code}], message: "{self.message}"'
)
self.trigger_webhooks()
elif not hasattr(self, "incident_id") and self.status != st.ComponentStatus.OPERATIONAL:
incident_request = self.client.push_incident(
self.status, self.public_incidents, self.component_id, self.get_incident_title(), message=self.message
)
if incident_request.ok:
# Successful incident upload.
self.incident_id = incident_request.json()["data"]["id"]
self.logger.info(
f'Incident uploaded, API unhealthy: component status [{self.status}], message: "{self.message}"'
)
else:
self.logger.warning(
f'Incident upload failed with status [{incident_request.status_code}], message: "{self.message}"'
)
self.trigger_webhooks() |
Python | def adjacency_matrix_to_deps(mat: np.array, topo: List[str], keep_empty: bool = False) -> List:
"""
Convert an adjacency matrix to a list of dependencies.
A dependency itself is a list of length 2, where the first element is the source node and
the second element is a list of destination nodes.
:param mat: the adjacency matrix as a 2D numpy array
:param topo: a list of nodes sorted in topological order
:param keep_empty: a flag specifying whether to add nodes with no outgoing edges as well
:return: a list of dependencies
"""
edges = [(topo[r], topo[c]) for r, c in np.argwhere(mat > 0)]
if keep_empty:
used_nodes = set([src for src, targets in edges])
edges += [(node, []) for node in topo if node if node not in used_nodes]
return edges | def adjacency_matrix_to_deps(mat: np.array, topo: List[str], keep_empty: bool = False) -> List:
"""
Convert an adjacency matrix to a list of dependencies.
A dependency itself is a list of length 2, where the first element is the source node and
the second element is a list of destination nodes.
:param mat: the adjacency matrix as a 2D numpy array
:param topo: a list of nodes sorted in topological order
:param keep_empty: a flag specifying whether to add nodes with no outgoing edges as well
:return: a list of dependencies
"""
edges = [(topo[r], topo[c]) for r, c in np.argwhere(mat > 0)]
if keep_empty:
used_nodes = set([src for src, targets in edges])
edges += [(node, []) for node in topo if node if node not in used_nodes]
return edges |
Python | def adjacency_matrix_to_deps2(df_mat: pd.DataFrame, row_per_edge: bool = True, keep_empty: bool = False) -> List:
"""
Convert an adjacency matrix to a list of dependencies.
A dependency itself is a list of length 2, where the first element is the source node and
the second element is a list of destination nodes.
:param df_mat: the adjacency matrix as pandas DataFrame
:return: a list of dependencies
"""
dep_list = [
[src, s_adj[s_adj > 0].index.tolist()]
for src, s_adj in df_mat.iterrows()
if keep_empty or sum(s_adj) > 0
]
if not row_per_edge:
return dep_list
edgelist = [(src, target) for src, targets in dep_list for target in targets]
return edgelist | def adjacency_matrix_to_deps2(df_mat: pd.DataFrame, row_per_edge: bool = True, keep_empty: bool = False) -> List:
"""
Convert an adjacency matrix to a list of dependencies.
A dependency itself is a list of length 2, where the first element is the source node and
the second element is a list of destination nodes.
:param df_mat: the adjacency matrix as pandas DataFrame
:return: a list of dependencies
"""
dep_list = [
[src, s_adj[s_adj > 0].index.tolist()]
for src, s_adj in df_mat.iterrows()
if keep_empty or sum(s_adj) > 0
]
if not row_per_edge:
return dep_list
edgelist = [(src, target) for src, targets in dep_list for target in targets]
return edgelist |
Python | def mutate_edge_flip(ind: Individual, indpb: float) -> Tuple[Individual]:
"""Mutates the individual by flipping edges in the adjacency matrix.
The probability of an edge being flipped is independently applied to all edges in
the upper triangular matrix, so it remains a valid DAG.
:param ind: individual to be mutated.
:param indpb: independent probability for each attribute to be exchanged to another position.
:returns: a tuple of one individual
"""
triang_indices = np.triu_indices_from(ind.mat, k=1)
for row, col in zip(*triang_indices):
if random.random() < indpb:
ind.mat[(row, col)] = int(not ind.mat[(row, col)])
return (ind,) | def mutate_edge_flip(ind: Individual, indpb: float) -> Tuple[Individual]:
"""Mutates the individual by flipping edges in the adjacency matrix.
The probability of an edge being flipped is independently applied to all edges in
the upper triangular matrix, so it remains a valid DAG.
:param ind: individual to be mutated.
:param indpb: independent probability for each attribute to be exchanged to another position.
:returns: a tuple of one individual
"""
triang_indices = np.triu_indices_from(ind.mat, k=1)
for row, col in zip(*triang_indices):
if random.random() < indpb:
ind.mat[(row, col)] = int(not ind.mat[(row, col)])
return (ind,) |
Python | def ordered_crossover(
ind1: Individual, ind2: Individual
) -> Tuple[Individual, Individual]:
"""
Mate two individuals by recombining their respective topology order. This operator
produces two offsprings, where each inherits the unchanged adjacency matrix of a parent.
:param ind1: The first individual participating in the crossover.
:param ind2: The second individual participating in the crossover.
:returns: A tuple of two offsprings.
"""
if ind1 == ind2:
return ind1, ind2
# turn list of topologies into a list of indices
cols = sorted(ind1.topology)
idx_map = {n: i for i, n in enumerate(cols)}
p1_nodes_order = [idx_map[n] for n in ind1.topology]
p2_nodes_order = [idx_map[n] for n in ind2.topology]
# for actual crossover operation DEAPs ordered crossover function is used
ch1_node_order, ch2_node_order = tools.cxOrdered(p1_nodes_order, p2_nodes_order)
# update the topology list on the resulting offsprings
ind1.topology = [cols[i] for i in ch1_node_order]
ind2.topology = [cols[i] for i in ch2_node_order]
return ind1, ind2 | def ordered_crossover(
ind1: Individual, ind2: Individual
) -> Tuple[Individual, Individual]:
"""
Mate two individuals by recombining their respective topology order. This operator
produces two offsprings, where each inherits the unchanged adjacency matrix of a parent.
:param ind1: The first individual participating in the crossover.
:param ind2: The second individual participating in the crossover.
:returns: A tuple of two offsprings.
"""
if ind1 == ind2:
return ind1, ind2
# turn list of topologies into a list of indices
cols = sorted(ind1.topology)
idx_map = {n: i for i, n in enumerate(cols)}
p1_nodes_order = [idx_map[n] for n in ind1.topology]
p2_nodes_order = [idx_map[n] for n in ind2.topology]
# for actual crossover operation DEAPs ordered crossover function is used
ch1_node_order, ch2_node_order = tools.cxOrdered(p1_nodes_order, p2_nodes_order)
# update the topology list on the resulting offsprings
ind1.topology = [cols[i] for i in ch1_node_order]
ind2.topology = [cols[i] for i in ch2_node_order]
return ind1, ind2 |
Python | def learn_causal_structure(
toolbox: base.Toolbox,
pop_size: int = 10,
crossover_pr: float = 1,
mutation_pr: float = 0.2,
num_elites: int = 1,
max_gens: int = 50,
):
"""
Perform the structur learning task using a genetic algorithm
:param toolbox: registry of tools provided by DEAP
:param pop_size: the number of individuals per generation
:param crossover_pr: the crossover rate for every (monogamous) couple
:param mutation_pr: the mutation rate for every individual
:param num_elites:
:param max_gens: the maximum number of generations
:return:
"""
# initialize a collection of instrumentation utilities to facilitate later analysis
instrumentation = initialize_instrumentation()
# ====== 0️⃣ initialize population ======
population = toolbox.population(n=pop_size)
# ====== 1️⃣ Evaluate the entire population ======
n_evals = evaluate_population(population, toolbox)
# Log initial stats for later analysis
log_generation_stats(0, population, n_evals, **instrumentation)
# ====== 2️⃣ the loop is the only termination criterion ======
for gen in range(max_gens):
elites = get_fittest_individuals(population, num_elites)
# ====== 3️⃣ Parent selection ======
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# ====== 4️⃣ Apply crossover and mutation on the offspring ======
for child1, child2 in zip(offspring[::2], offspring[1::2]):
# crossover probability applies to every couple
if random.random() < crossover_pr:
toolbox.mate(child1, child2)
child1.fitness = np.nan
child2.fitness = np.nan
# mutation probability applies to every individual
for mutant in offspring:
if random.random() < mutation_pr:
toolbox.mutate(mutant)
mutant.fitness = np.nan
# ====== 5️⃣ Evaluate the individuals with an invalid fitness ======
n_evals = evaluate_population(offspring, toolbox)
# Log intermediary stats for later analysis
log_generation_stats(gen+1, population, n_evals, **instrumentation)
# ====== 6️⃣ Replacement ======
# The population is entirely replaced by the offspring, except for the top elites
fittest_offsprings = get_fittest_individuals(offspring, pop_size - num_elites)
population[:] = elites + fittest_offsprings
# ====== 7️⃣ Return final population ======
return population, instrumentation | def learn_causal_structure(
toolbox: base.Toolbox,
pop_size: int = 10,
crossover_pr: float = 1,
mutation_pr: float = 0.2,
num_elites: int = 1,
max_gens: int = 50,
):
"""
Perform the structur learning task using a genetic algorithm
:param toolbox: registry of tools provided by DEAP
:param pop_size: the number of individuals per generation
:param crossover_pr: the crossover rate for every (monogamous) couple
:param mutation_pr: the mutation rate for every individual
:param num_elites:
:param max_gens: the maximum number of generations
:return:
"""
# initialize a collection of instrumentation utilities to facilitate later analysis
instrumentation = initialize_instrumentation()
# ====== 0️⃣ initialize population ======
population = toolbox.population(n=pop_size)
# ====== 1️⃣ Evaluate the entire population ======
n_evals = evaluate_population(population, toolbox)
# Log initial stats for later analysis
log_generation_stats(0, population, n_evals, **instrumentation)
# ====== 2️⃣ the loop is the only termination criterion ======
for gen in range(max_gens):
elites = get_fittest_individuals(population, num_elites)
# ====== 3️⃣ Parent selection ======
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# ====== 4️⃣ Apply crossover and mutation on the offspring ======
for child1, child2 in zip(offspring[::2], offspring[1::2]):
# crossover probability applies to every couple
if random.random() < crossover_pr:
toolbox.mate(child1, child2)
child1.fitness = np.nan
child2.fitness = np.nan
# mutation probability applies to every individual
for mutant in offspring:
if random.random() < mutation_pr:
toolbox.mutate(mutant)
mutant.fitness = np.nan
# ====== 5️⃣ Evaluate the individuals with an invalid fitness ======
n_evals = evaluate_population(offspring, toolbox)
# Log intermediary stats for later analysis
log_generation_stats(gen+1, population, n_evals, **instrumentation)
# ====== 6️⃣ Replacement ======
# The population is entirely replaced by the offspring, except for the top elites
fittest_offsprings = get_fittest_individuals(offspring, pop_size - num_elites)
population[:] = elites + fittest_offsprings
# ====== 7️⃣ Return final population ======
return population, instrumentation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.