language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def broadcast( network: TRTNetwork, a: TRTTensor, b: TRTTensor, a_name: str, b_name: str, preset_diff: int = 0 ) -> Tuple[TRTTensor, TRTTensor]: """ Broadcast two TensorRT tensors to the same number of dimensions by prepending 1s to the tensor with less number of dimensions. Args: network (TRTNetwork): TensorRT network object. a (TRTTensor): A TensorRT ITensor. b (TRTTensor): A TensorRT ITensor. a_name (str): Name of tensor a. b_name (str): Name of tensor b. preset_diff (int): The difference of number of dimensions after broadcast. A positive number means after broadcast, tensor `a` would have `preset_diff` more dimensions than `b`. This is used in matmul, since we need to broadcast tensors but not always to the same number of dimension. The reason is that matmul supports Matrix x Vector and in this case broadcasted vector should have 1 less number of dimensions than the matrix tensor. Returns: Two TensorRT ITensors that are broadcasted to the same number of dimensions. """ a_shape = tuple(a.shape) b_shape = tuple(b.shape) diff = len(a_shape) - len(b_shape) - preset_diff if diff > 0: b = prepend_ones(network, b, f"{b_name}_broadcast", diff) elif diff < 0: a = prepend_ones(network, a, f"{a_name}_broadcast", -diff) return a, b
def broadcast( network: TRTNetwork, a: TRTTensor, b: TRTTensor, a_name: str, b_name: str, preset_diff: int = 0 ) -> Tuple[TRTTensor, TRTTensor]: """ Broadcast two TensorRT tensors to the same number of dimensions by prepending 1s to the tensor with less number of dimensions. Args: network (TRTNetwork): TensorRT network object. a (TRTTensor): A TensorRT ITensor. b (TRTTensor): A TensorRT ITensor. a_name (str): Name of tensor a. b_name (str): Name of tensor b. preset_diff (int): The difference of number of dimensions after broadcast. A positive number means after broadcast, tensor `a` would have `preset_diff` more dimensions than `b`. This is used in matmul, since we need to broadcast tensors but not always to the same number of dimension. The reason is that matmul supports Matrix x Vector and in this case broadcasted vector should have 1 less number of dimensions than the matrix tensor. Returns: Two TensorRT ITensors that are broadcasted to the same number of dimensions. """ a_shape = tuple(a.shape) b_shape = tuple(b.shape) diff = len(a_shape) - len(b_shape) - preset_diff if diff > 0: b = prepend_ones(network, b, f"{b_name}_broadcast", diff) elif diff < 0: a = prepend_ones(network, a, f"{a_name}_broadcast", -diff) return a, b
Python
def add_binary_elementwise_layer( network: TRTNetwork, lhs_val: Union[int, float, TRTTensor, torch.Tensor], rhs_val: Union[int, float, TRTTensor, torch.Tensor], op_type: trt.ElementWiseOperation, target: Target, name: str ) -> TRTTensor: """ This function adds a TensorRT elementwise layer. We only allow at most one operand to not be a trt tensor, otherwise, we should const fold it first. If any operand is not a trt tensor, we make it a trt constant layer which has the same type as the other trt tensor. Then we broadcast these two inputs to have the same number of dimensions. Limitation: If we are using implicit batch dim mode, the operand that is not a trt tensor are not allowed to have larger ranks than the trt tensor operand. Args: network (TRTNetwork): TensorRT network object. lhs_val (TRTTensor): Left operand of the binary operation. Could be a TensorRT tensor, a PyTorch tensor or a simple value. rhs_val (TRTTensor): Right operand of the binary operation. Similar to lhs_val. op_type (trt.ElementWiseOperation): Type of the TensorRT elementwise binary operation. target (Target): Target of fx node. name (str): The name we want to assign to the created TensorRT layer. Returns: The output of TensorRT Elementwise layer. """ dtype = None is_lhs_trt_tensor = False is_rhs_trt_tensor = False if isinstance(lhs_val, TRTTensor): dtype = torch_dtype_from_trt(lhs_val.dtype) is_lhs_trt_tensor = True if isinstance(rhs_val, TRTTensor): dtype = torch_dtype_from_trt(rhs_val.dtype) is_rhs_trt_tensor = True if not is_lhs_trt_tensor and not is_rhs_trt_tensor: raise RuntimeError(f"Both operands of the binary elementwise op {name}" "are constant. In this case, please consider constant fold the model first.") lhs_val = get_trt_tensor(network, lhs_val, f"{name}_lhs", dtype) rhs_val = get_trt_tensor(network, rhs_val, f"{name}_rhs", dtype) # Check the limitation in the doc string. if network.has_implicit_batch_dimension: if is_lhs_trt_tensor and not is_rhs_trt_tensor: assert len(lhs_val.shape) >= len(rhs_val.shape), f"{lhs_val.shape} >= {rhs_val.shape}" elif not is_lhs_trt_tensor and is_rhs_trt_tensor: assert len(rhs_val.shape) >= len(lhs_val.shape), f"{rhs_val.shape} >= {lhs_val.shape}" lhs_val, rhs_val = broadcast( network, lhs_val, rhs_val, f"{name}_lhs", f"{name}_rhs" ) layer = network.add_elementwise(lhs_val, rhs_val, op_type) set_layer_name(layer, target, name) return layer.get_output(0)
def add_binary_elementwise_layer( network: TRTNetwork, lhs_val: Union[int, float, TRTTensor, torch.Tensor], rhs_val: Union[int, float, TRTTensor, torch.Tensor], op_type: trt.ElementWiseOperation, target: Target, name: str ) -> TRTTensor: """ This function adds a TensorRT elementwise layer. We only allow at most one operand to not be a trt tensor, otherwise, we should const fold it first. If any operand is not a trt tensor, we make it a trt constant layer which has the same type as the other trt tensor. Then we broadcast these two inputs to have the same number of dimensions. Limitation: If we are using implicit batch dim mode, the operand that is not a trt tensor are not allowed to have larger ranks than the trt tensor operand. Args: network (TRTNetwork): TensorRT network object. lhs_val (TRTTensor): Left operand of the binary operation. Could be a TensorRT tensor, a PyTorch tensor or a simple value. rhs_val (TRTTensor): Right operand of the binary operation. Similar to lhs_val. op_type (trt.ElementWiseOperation): Type of the TensorRT elementwise binary operation. target (Target): Target of fx node. name (str): The name we want to assign to the created TensorRT layer. Returns: The output of TensorRT Elementwise layer. """ dtype = None is_lhs_trt_tensor = False is_rhs_trt_tensor = False if isinstance(lhs_val, TRTTensor): dtype = torch_dtype_from_trt(lhs_val.dtype) is_lhs_trt_tensor = True if isinstance(rhs_val, TRTTensor): dtype = torch_dtype_from_trt(rhs_val.dtype) is_rhs_trt_tensor = True if not is_lhs_trt_tensor and not is_rhs_trt_tensor: raise RuntimeError(f"Both operands of the binary elementwise op {name}" "are constant. In this case, please consider constant fold the model first.") lhs_val = get_trt_tensor(network, lhs_val, f"{name}_lhs", dtype) rhs_val = get_trt_tensor(network, rhs_val, f"{name}_rhs", dtype) # Check the limitation in the doc string. if network.has_implicit_batch_dimension: if is_lhs_trt_tensor and not is_rhs_trt_tensor: assert len(lhs_val.shape) >= len(rhs_val.shape), f"{lhs_val.shape} >= {rhs_val.shape}" elif not is_lhs_trt_tensor and is_rhs_trt_tensor: assert len(rhs_val.shape) >= len(lhs_val.shape), f"{rhs_val.shape} >= {lhs_val.shape}" lhs_val, rhs_val = broadcast( network, lhs_val, rhs_val, f"{name}_lhs", f"{name}_rhs" ) layer = network.add_elementwise(lhs_val, rhs_val, op_type) set_layer_name(layer, target, name) return layer.get_output(0)
Python
def add_unary_layer( network: TRTNetwork, input_val: TRTTensor, operation_type: trt.UnaryOperation, target: Target, name: str, ) -> TRTTensor: """ Add a TensorRT Unary layer to `network`. Args: network (TRTNetwork): TensorRT network object. input_val (TRTTensor): Input to the unary op. Must be a TensorRT tensor. op_type (trt.ElementWiseOperation): Type of the TensorRT unary operation. target (Target): Target of fx node. name (str): The name we want to assign to the created TensorRT layer. Returns: The output of TensorRT Unary layer. """ if not isinstance(input_val, TRTTensor): raise RuntimeError( f"{operation_type} received input {input_val} that is not part " "of the TensorRT region!" ) layer = network.add_unary(input_val, operation_type) set_layer_name(layer, target, name) return layer.get_output(0)
def add_unary_layer( network: TRTNetwork, input_val: TRTTensor, operation_type: trt.UnaryOperation, target: Target, name: str, ) -> TRTTensor: """ Add a TensorRT Unary layer to `network`. Args: network (TRTNetwork): TensorRT network object. input_val (TRTTensor): Input to the unary op. Must be a TensorRT tensor. op_type (trt.ElementWiseOperation): Type of the TensorRT unary operation. target (Target): Target of fx node. name (str): The name we want to assign to the created TensorRT layer. Returns: The output of TensorRT Unary layer. """ if not isinstance(input_val, TRTTensor): raise RuntimeError( f"{operation_type} received input {input_val} that is not part " "of the TensorRT region!" ) layer = network.add_unary(input_val, operation_type) set_layer_name(layer, target, name) return layer.get_output(0)
Python
def add_activation_layer( network: TRTNetwork, input_val: TRTTensor, operation_type: trt.ActivationType, target: Target, name: str, alpha: Optional[Any] = None, beta: Optional[Any] = None, ) -> TRTTensor: """ Add a TensorRT Activation layer to `network`. Args: network (TRTNetwork): TensorRT network object. input_val (TRTTensor): Input to the activation op. Must be a TensorRT tensor. op_type (trt.ElementWiseOperation): Type of the TensorRT activation operation. target (Target): Target of fx node. name (str): The name we want to assign to the created TensorRT layer. alpha (Optional[Any]): If not None, we will use it to set the alpha attribute of the created TensorRT activation layer. beta (Optional[Any]): If not None, we will use it to set the beta attribute of the created TensorRT activation layer. Returns: The output of TensorRT Activation layer. """ if not isinstance(input_val, TRTTensor): raise RuntimeError( f"{operation_type} received input {input_val} that is not part " "of the TensorRT region!" ) layer = network.add_activation(input_val, operation_type) if alpha: layer.alpha = alpha if beta: layer.beta = beta set_layer_name(layer, target, name) return layer.get_output(0)
def add_activation_layer( network: TRTNetwork, input_val: TRTTensor, operation_type: trt.ActivationType, target: Target, name: str, alpha: Optional[Any] = None, beta: Optional[Any] = None, ) -> TRTTensor: """ Add a TensorRT Activation layer to `network`. Args: network (TRTNetwork): TensorRT network object. input_val (TRTTensor): Input to the activation op. Must be a TensorRT tensor. op_type (trt.ElementWiseOperation): Type of the TensorRT activation operation. target (Target): Target of fx node. name (str): The name we want to assign to the created TensorRT layer. alpha (Optional[Any]): If not None, we will use it to set the alpha attribute of the created TensorRT activation layer. beta (Optional[Any]): If not None, we will use it to set the beta attribute of the created TensorRT activation layer. Returns: The output of TensorRT Activation layer. """ if not isinstance(input_val, TRTTensor): raise RuntimeError( f"{operation_type} received input {input_val} that is not part " "of the TensorRT region!" ) layer = network.add_activation(input_val, operation_type) if alpha: layer.alpha = alpha if beta: layer.beta = beta set_layer_name(layer, target, name) return layer.get_output(0)
Python
def trunc_div( input: TRTTensor, other: TRTTensor, network: TRTNetwork, target: Target, name: str ) -> TRTTensor: """ Perform trunc divide on Tensor, result of divide will be round toward zero. This means for positive number, it will be floor round; for negative number, it will be ceil round. Example: [2.1, 0.8, -3.2] -> [2, 0, -3]. Args: input: divisor. other: dividend. network: INetworkDefinition. target: node target. name: namespace for the op Returns: A TensorRT tensor represent the result of trunc divide. """ prod_output = add_binary_elementwise_layer(network, input, other, trt.ElementWiseOperation.PROD, target, f"{name}_prod") sign_output = sign(network, prod_output, target, name) # Convert constant input into ITensor for UnaryOperation if not isinstance(input, trt.tensorrt.ITensor): input = get_trt_tensor(network, input, f"{name}_input") if not isinstance(other, trt.tensorrt.ITensor): other = get_trt_tensor(network, other, f"{name}_other", dtype=torch_dtype_from_trt(input.dtype)) abs_input_output = add_unary_layer(network, input, trt.UnaryOperation.ABS, target, f"{name}_abs_input") abs_other_output = add_unary_layer(network, other, trt.UnaryOperation.ABS, target, f"{name}_abs_other") abs_floor_output = add_binary_elementwise_layer(network, abs_input_output, abs_other_output, trt.ElementWiseOperation.FLOOR_DIV, target, f"{name}_floor_div") output = add_binary_elementwise_layer(network, abs_floor_output, sign_output, trt.ElementWiseOperation.PROD, target, f"{name}_output") return output
def trunc_div( input: TRTTensor, other: TRTTensor, network: TRTNetwork, target: Target, name: str ) -> TRTTensor: """ Perform trunc divide on Tensor, result of divide will be round toward zero. This means for positive number, it will be floor round; for negative number, it will be ceil round. Example: [2.1, 0.8, -3.2] -> [2, 0, -3]. Args: input: divisor. other: dividend. network: INetworkDefinition. target: node target. name: namespace for the op Returns: A TensorRT tensor represent the result of trunc divide. """ prod_output = add_binary_elementwise_layer(network, input, other, trt.ElementWiseOperation.PROD, target, f"{name}_prod") sign_output = sign(network, prod_output, target, name) # Convert constant input into ITensor for UnaryOperation if not isinstance(input, trt.tensorrt.ITensor): input = get_trt_tensor(network, input, f"{name}_input") if not isinstance(other, trt.tensorrt.ITensor): other = get_trt_tensor(network, other, f"{name}_other", dtype=torch_dtype_from_trt(input.dtype)) abs_input_output = add_unary_layer(network, input, trt.UnaryOperation.ABS, target, f"{name}_abs_input") abs_other_output = add_unary_layer(network, other, trt.UnaryOperation.ABS, target, f"{name}_abs_other") abs_floor_output = add_binary_elementwise_layer(network, abs_input_output, abs_other_output, trt.ElementWiseOperation.FLOOR_DIV, target, f"{name}_floor_div") output = add_binary_elementwise_layer(network, abs_floor_output, sign_output, trt.ElementWiseOperation.PROD, target, f"{name}_output") return output
Python
def fuse(self, quantizer: QuantizerCls, load_arg: Callable, root_node: Node, matched_node_pattern: NodePattern, fuse_custom_config_dict: Dict[str, Any], fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]]) -> Node: additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {}) assert root_node.op == "call_module", "Expecting module node to be a call_module Node" root_module = quantizer.modules[root_node.target] assert len(additional_fuser_method_mapping) == 0, "Fusion implementation is " "undergoing changes, additoinal_fuser_method_mapping is not supported currently." def get_module(n): if n.op == "call_module": return quantizer.modules[n.target] elif n.op == "call_function" and n.target == torch.nn.functional.relu: relu = torch.nn.ReLU() relu.training = root_module.training return relu return MatchAllNode matched_modules = tuple(map(get_module, matched_node_pattern)) # since relu can be used multiple times, we'll need to create a relu module for each match def get_type(m): return type(m) matched_module_types = tuple(map(get_type, matched_modules)) module_parent_name, module_name = _parent_name(root_node.target) fuser_method = get_fuser_method_new(matched_module_types, fuser_method_mapping) # TODO: change the signature for fuser_method to take matched module patterns # as input fused_module = fuser_method(*matched_modules) setattr(quantizer.modules[module_parent_name], module_name, fused_module) return quantizer.fused_graph.node_copy(root_node, load_arg)
def fuse(self, quantizer: QuantizerCls, load_arg: Callable, root_node: Node, matched_node_pattern: NodePattern, fuse_custom_config_dict: Dict[str, Any], fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]]) -> Node: additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {}) assert root_node.op == "call_module", "Expecting module node to be a call_module Node" root_module = quantizer.modules[root_node.target] assert len(additional_fuser_method_mapping) == 0, "Fusion implementation is " "undergoing changes, additoinal_fuser_method_mapping is not supported currently." def get_module(n): if n.op == "call_module": return quantizer.modules[n.target] elif n.op == "call_function" and n.target == torch.nn.functional.relu: relu = torch.nn.ReLU() relu.training = root_module.training return relu return MatchAllNode matched_modules = tuple(map(get_module, matched_node_pattern)) # since relu can be used multiple times, we'll need to create a relu module for each match def get_type(m): return type(m) matched_module_types = tuple(map(get_type, matched_modules)) module_parent_name, module_name = _parent_name(root_node.target) fuser_method = get_fuser_method_new(matched_module_types, fuser_method_mapping) # TODO: change the signature for fuser_method to take matched module patterns # as input fused_module = fuser_method(*matched_modules) setattr(quantizer.modules[module_parent_name], module_name, fused_module) return quantizer.fused_graph.node_copy(root_node, load_arg)
Python
def _init_from_local_shards_and_global_metadata( cls, local_shards: List[Shard], sharded_tensor_metadata: ShardedTensorMetadata, process_group=None, init_rrefs=False, ): """ Initialize a ShardedTensor with local shards and a global ShardedTensorMetadata built on each rank. Warning: This API is experimental and subject to change. It does not do cross rank validations, and fully rely on the user for the correctness of sharded_tensor_metadata on each rank """ process_group = ( process_group if process_group is not None else distributed_c10d._get_default_group() ) current_rank = dist.get_rank(process_group) shards_metadata = sharded_tensor_metadata.shards_metadata tensor_properties = sharded_tensor_metadata.tensor_properties if len(shards_metadata) == 0: raise ValueError("shards_metadata must not be empty!") if tensor_properties.layout != torch.strided: raise ValueError('Only torch.strided layout is currently supported') sharded_tensor = cls.__new__(cls) sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) sharded_tensor._metadata = sharded_tensor_metadata local_shard_metadatas = [] def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False): tensor_property_or_metadata = "tensor property" if is_property else "local ShardMetadata" if expected != actual: raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with " f"{tensor_property_or_metadata} on rank {rank}: " f"{tensor_property_or_metadata} {prop_name}={expected}, " f"local shard tensor {prop_name}={actual}.") # collect local shard metadatas from the global sharded_tensor_metadata for shard_metadata in shards_metadata: # type: ignore[attr-defined] rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement) if current_rank == rank: local_shard_metadatas.append(shard_metadata) if len(local_shards) != len(local_shard_metadatas): raise RuntimeError( f'Number of local shards ({len(local_shards)}) does not match number of local ' f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) ' f'on rank ({current_rank}) ' ) for shard in local_shards: shard_meta = shard.metadata local_shard_tensor = shard.tensor rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement) # validate if shard_meta in the metadatas collected from sharded_tensor_metadata assert shard_meta in local_shard_metadatas, \ "local shard metadata not in sharded_tensor_metadata!" _raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, "layout", current_rank, True) if not local_shard_tensor.is_contiguous(): raise ValueError('Only torch.contiguous_format memory_format is currently supported') _raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank) _raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), "pin_memory", current_rank, True) _raise_if_mismatch(local_device, local_shard_tensor.device, "device", current_rank) _raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, "dtype", current_rank, True) _raise_if_mismatch( tensor_properties.requires_grad, local_shard_tensor.requires_grad, "requires_grad", current_rank, True) # check if shards_metadata have overlap shards validate_non_overlapping_shards_metadata(shards_metadata) # check if the shards_metadata is compatible with overall size of the sharded tensor. check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) # done validation, add local_shards sharded_tensor._local_shards = local_shards # make a EnumerableShardingSpec for sharded tensors that initialized from this API. # TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list. # see issue https://github.com/pytorch/pytorch/issues/67244 sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata) # run post initialization, i.e. map registration, rpc initialization sharded_tensor._post_init() return sharded_tensor
def _init_from_local_shards_and_global_metadata( cls, local_shards: List[Shard], sharded_tensor_metadata: ShardedTensorMetadata, process_group=None, init_rrefs=False, ): """ Initialize a ShardedTensor with local shards and a global ShardedTensorMetadata built on each rank. Warning: This API is experimental and subject to change. It does not do cross rank validations, and fully rely on the user for the correctness of sharded_tensor_metadata on each rank """ process_group = ( process_group if process_group is not None else distributed_c10d._get_default_group() ) current_rank = dist.get_rank(process_group) shards_metadata = sharded_tensor_metadata.shards_metadata tensor_properties = sharded_tensor_metadata.tensor_properties if len(shards_metadata) == 0: raise ValueError("shards_metadata must not be empty!") if tensor_properties.layout != torch.strided: raise ValueError('Only torch.strided layout is currently supported') sharded_tensor = cls.__new__(cls) sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) sharded_tensor._metadata = sharded_tensor_metadata local_shard_metadatas = [] def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False): tensor_property_or_metadata = "tensor property" if is_property else "local ShardMetadata" if expected != actual: raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with " f"{tensor_property_or_metadata} on rank {rank}: " f"{tensor_property_or_metadata} {prop_name}={expected}, " f"local shard tensor {prop_name}={actual}.") # collect local shard metadatas from the global sharded_tensor_metadata for shard_metadata in shards_metadata: # type: ignore[attr-defined] rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement) if current_rank == rank: local_shard_metadatas.append(shard_metadata) if len(local_shards) != len(local_shard_metadatas): raise RuntimeError( f'Number of local shards ({len(local_shards)}) does not match number of local ' f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) ' f'on rank ({current_rank}) ' ) for shard in local_shards: shard_meta = shard.metadata local_shard_tensor = shard.tensor rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement) # validate if shard_meta in the metadatas collected from sharded_tensor_metadata assert shard_meta in local_shard_metadatas, \ "local shard metadata not in sharded_tensor_metadata!" _raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, "layout", current_rank, True) if not local_shard_tensor.is_contiguous(): raise ValueError('Only torch.contiguous_format memory_format is currently supported') _raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank) _raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), "pin_memory", current_rank, True) _raise_if_mismatch(local_device, local_shard_tensor.device, "device", current_rank) _raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, "dtype", current_rank, True) _raise_if_mismatch( tensor_properties.requires_grad, local_shard_tensor.requires_grad, "requires_grad", current_rank, True) # check if shards_metadata have overlap shards validate_non_overlapping_shards_metadata(shards_metadata) # check if the shards_metadata is compatible with overall size of the sharded tensor. check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) # done validation, add local_shards sharded_tensor._local_shards = local_shards # make a EnumerableShardingSpec for sharded tensors that initialized from this API. # TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list. # see issue https://github.com/pytorch/pytorch/issues/67244 sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata) # run post initialization, i.e. map registration, rpc initialization sharded_tensor._post_init() return sharded_tensor
Python
def train(self, **kwargs): """ Train model Load the whole train data in memory for faster operations args: **kwargs (dict) keyword arguments that specify the model hyper parameters """ # Load and rescale data # x_target, y_input_condition, x_target_val, y_input_condition_val = self.dataset self.callback_manager.on_train_begin() # Start training print("Start training") for epoch in range(self.epochs): self.callback_manager.on_epoch_begin(epoch) avg_gen_loss = 0 avg_disc_loss = 0 for batch, (x_full_batch, x_sketch_batch) in enumerate( self.dataset.random_batch_generator(self.batch_size, data_type="train"), start=1): self.callback_manager.on_batch_begin(batch) # Create a batch to feed the discriminator model x_disc, y_disc = self.get_disc_batch(x_full_batch, x_sketch_batch, # alternate between generated and real samples generate=batch % 2 == 0) # Update the discriminator disc_loss = self.discriminator.train_on_batch(x_disc, y_disc) avg_disc_loss += disc_loss # Create a batch to feed the generator model x_gen_target, x_gen = next(self.dataset.random_batch_generator(self.batch_size, data_type="train")) y_gen = np.zeros((x_gen.shape[0], 2), dtype=np.uint8) y_gen[:, 1] = 1 if self.info_gan: # Sample code code = np.random.uniform(0, 1, self.img_shape[:-2]) x_gen = np.concatenate(x_gen, code, axis=2) # Freeze the discriminator self.discriminator.trainable = False gen_loss = self.model.train_on_batch(x_gen, [x_gen_target, y_gen]) avg_gen_loss += gen_loss[0] # Train the auxiliary if self.info_gan: gen_loss = self.discriminator.auxiliary.train_on_batch(x_gen, [x_gen_target, code]) # self.variational_mutual_information_regularizer(code, x_gen) # Unfreeze the discriminator self.discriminator.trainable = True # Save images for visualization # if batch % (self.n_batch_per_epoch / 2) == 0: # # Get new images from validation # self.plot_generated_batch(x_full_batch, x_sketch_batch, "training", # epoch) # x_full_batch, x_sketch_batch = next( # map_data_utils.random_batch(x_target_val, y_input_condition_val, self.batch_size)) # self.plot_generated_batch(x_full_batch, x_sketch_batch, "validation", # epoch) self.callback_manager.on_batch_end(batch, logs={"g_loss": gen_loss[0], "d_loss": disc_loss, "g_l1_loss": gen_loss[1], "g_log_loss": gen_loss[2], "size": self.batch_size}) if batch >= self.n_batch_per_epoch: break avg_disc_loss = avg_disc_loss / self.n_batch_per_epoch avg_gen_loss = avg_gen_loss / self.n_batch_per_epoch self.callback_manager.on_epoch_end(epoch + 1, logs={"g_loss": avg_gen_loss, "d_loss": avg_disc_loss}) self.callback_manager.on_train_end()
def train(self, **kwargs): """ Train model Load the whole train data in memory for faster operations args: **kwargs (dict) keyword arguments that specify the model hyper parameters """ # Load and rescale data # x_target, y_input_condition, x_target_val, y_input_condition_val = self.dataset self.callback_manager.on_train_begin() # Start training print("Start training") for epoch in range(self.epochs): self.callback_manager.on_epoch_begin(epoch) avg_gen_loss = 0 avg_disc_loss = 0 for batch, (x_full_batch, x_sketch_batch) in enumerate( self.dataset.random_batch_generator(self.batch_size, data_type="train"), start=1): self.callback_manager.on_batch_begin(batch) # Create a batch to feed the discriminator model x_disc, y_disc = self.get_disc_batch(x_full_batch, x_sketch_batch, # alternate between generated and real samples generate=batch % 2 == 0) # Update the discriminator disc_loss = self.discriminator.train_on_batch(x_disc, y_disc) avg_disc_loss += disc_loss # Create a batch to feed the generator model x_gen_target, x_gen = next(self.dataset.random_batch_generator(self.batch_size, data_type="train")) y_gen = np.zeros((x_gen.shape[0], 2), dtype=np.uint8) y_gen[:, 1] = 1 if self.info_gan: # Sample code code = np.random.uniform(0, 1, self.img_shape[:-2]) x_gen = np.concatenate(x_gen, code, axis=2) # Freeze the discriminator self.discriminator.trainable = False gen_loss = self.model.train_on_batch(x_gen, [x_gen_target, y_gen]) avg_gen_loss += gen_loss[0] # Train the auxiliary if self.info_gan: gen_loss = self.discriminator.auxiliary.train_on_batch(x_gen, [x_gen_target, code]) # self.variational_mutual_information_regularizer(code, x_gen) # Unfreeze the discriminator self.discriminator.trainable = True # Save images for visualization # if batch % (self.n_batch_per_epoch / 2) == 0: # # Get new images from validation # self.plot_generated_batch(x_full_batch, x_sketch_batch, "training", # epoch) # x_full_batch, x_sketch_batch = next( # map_data_utils.random_batch(x_target_val, y_input_condition_val, self.batch_size)) # self.plot_generated_batch(x_full_batch, x_sketch_batch, "validation", # epoch) self.callback_manager.on_batch_end(batch, logs={"g_loss": gen_loss[0], "d_loss": disc_loss, "g_l1_loss": gen_loss[1], "g_log_loss": gen_loss[2], "size": self.batch_size}) if batch >= self.n_batch_per_epoch: break avg_disc_loss = avg_disc_loss / self.n_batch_per_epoch avg_gen_loss = avg_gen_loss / self.n_batch_per_epoch self.callback_manager.on_epoch_end(epoch + 1, logs={"g_loss": avg_gen_loss, "d_loss": avg_disc_loss}) self.callback_manager.on_train_end()
Python
def format_image(img_path, size, nb_channels): """ Load img with opencv and reshape """ if nb_channels == 1: img = cv2.imread(img_path, 0) img = np.expand_dims(img, axis=-1) else: img = cv2.imread(img_path) img = img[:, :, ::-1] # GBR to RGB w = img.shape[1] # Slice image in 2 to get both parts img_full = img[:, :w // 2, :] img_sketch = img[:, w // 2:, :] img_full = cv2.resize(img_full, (size, size), interpolation=cv2.INTER_AREA) img_sketch = cv2.resize(img_sketch, (size, size), interpolation=cv2.INTER_AREA) if nb_channels == 1: img_full = np.expand_dims(img_full, -1) img_sketch = np.expand_dims(img_sketch, -1) img_full = np.expand_dims(img_full, 0).transpose(0, 3, 1, 2) img_sketch = np.expand_dims(img_sketch, 0).transpose(0, 3, 1, 2) return img_full, img_sketch
def format_image(img_path, size, nb_channels): """ Load img with opencv and reshape """ if nb_channels == 1: img = cv2.imread(img_path, 0) img = np.expand_dims(img, axis=-1) else: img = cv2.imread(img_path) img = img[:, :, ::-1] # GBR to RGB w = img.shape[1] # Slice image in 2 to get both parts img_full = img[:, :w // 2, :] img_sketch = img[:, w // 2:, :] img_full = cv2.resize(img_full, (size, size), interpolation=cv2.INTER_AREA) img_sketch = cv2.resize(img_sketch, (size, size), interpolation=cv2.INTER_AREA) if nb_channels == 1: img_full = np.expand_dims(img_full, -1) img_sketch = np.expand_dims(img_sketch, -1) img_full = np.expand_dims(img_full, 0).transpose(0, 3, 1, 2) img_sketch = np.expand_dims(img_sketch, 0).transpose(0, 3, 1, 2) return img_full, img_sketch
Python
def build_hdf5(source, name="maps", file_name="maps_data.h5", size=256, max_samples=None): """ Gather the data in a single HDF5 file. """ pro_data_dir = odin.check_or_create_dir(odin.data_dir) hdf5_file = os.path.join(pro_data_dir, file_name) if os.path.isfile(hdf5_file): return hdf5_file path = download_and_unwrap_tarball(source=source, name=name) nb_channels = 3 with h5py.File(hdf5_file, "w") as hfw: for dset_type in ["train", "test", "val"]: list_img = [img for img in Path(path).glob('%s/*.jpg' % dset_type)] list_img = [str(img) for img in list_img] list_img.extend(map(str, Path(path).glob('%s/*.png' % dset_type))) list_img = np.array(list_img) num_files = len(list_img) if num_files == 0: print("No files in %s" % dset_type) continue data_full = hfw.create_dataset("%s_data_full" % dset_type, (0, nb_channels, size, size), maxshape=(max_samples, 3, size, size), dtype=np.uint8) data_sketch = hfw.create_dataset("%s_data_sketch" % dset_type, (0, nb_channels, size, size), maxshape=(max_samples, 3, size, size), dtype=np.uint8) chunk_size = 100 num_chunks = num_files // chunk_size arr_chunks = np.array_split(np.arange(num_files), num_chunks) for chunk_idx in tqdm(arr_chunks): list_img_path = list_img[chunk_idx].tolist() output = parmap.map(format_image, list_img_path, size, nb_channels, pm_parallel=False) arr_img_full = np.concatenate([o[0] for o in output], axis=0) arr_img_sketch = np.concatenate([o[1] for o in output], axis=0) # Resize HDF5 dataset data_full.resize(data_full.shape[0] + arr_img_full.shape[0], axis=0) data_sketch.resize(data_sketch.shape[0] + arr_img_sketch.shape[0], axis=0) data_full[-arr_img_full.shape[0]:] = arr_img_full.astype(np.uint8) data_sketch[-arr_img_sketch.shape[0]:] = arr_img_sketch.astype(np.uint8) # Plot result check_hdf5(path, nb_channels) return hdf5_file
def build_hdf5(source, name="maps", file_name="maps_data.h5", size=256, max_samples=None): """ Gather the data in a single HDF5 file. """ pro_data_dir = odin.check_or_create_dir(odin.data_dir) hdf5_file = os.path.join(pro_data_dir, file_name) if os.path.isfile(hdf5_file): return hdf5_file path = download_and_unwrap_tarball(source=source, name=name) nb_channels = 3 with h5py.File(hdf5_file, "w") as hfw: for dset_type in ["train", "test", "val"]: list_img = [img for img in Path(path).glob('%s/*.jpg' % dset_type)] list_img = [str(img) for img in list_img] list_img.extend(map(str, Path(path).glob('%s/*.png' % dset_type))) list_img = np.array(list_img) num_files = len(list_img) if num_files == 0: print("No files in %s" % dset_type) continue data_full = hfw.create_dataset("%s_data_full" % dset_type, (0, nb_channels, size, size), maxshape=(max_samples, 3, size, size), dtype=np.uint8) data_sketch = hfw.create_dataset("%s_data_sketch" % dset_type, (0, nb_channels, size, size), maxshape=(max_samples, 3, size, size), dtype=np.uint8) chunk_size = 100 num_chunks = num_files // chunk_size arr_chunks = np.array_split(np.arange(num_files), num_chunks) for chunk_idx in tqdm(arr_chunks): list_img_path = list_img[chunk_idx].tolist() output = parmap.map(format_image, list_img_path, size, nb_channels, pm_parallel=False) arr_img_full = np.concatenate([o[0] for o in output], axis=0) arr_img_sketch = np.concatenate([o[1] for o in output], axis=0) # Resize HDF5 dataset data_full.resize(data_full.shape[0] + arr_img_full.shape[0], axis=0) data_sketch.resize(data_sketch.shape[0] + arr_img_sketch.shape[0], axis=0) data_full[-arr_img_full.shape[0]:] = arr_img_full.astype(np.uint8) data_sketch[-arr_img_sketch.shape[0]:] = arr_img_sketch.astype(np.uint8) # Plot result check_hdf5(path, nb_channels) return hdf5_file
Python
def check_hdf5(jpeg_dir, nb_channels): """ Plot images with landmarks to check the processing """ # Get hdf5 file file_name = os.path.basename(jpeg_dir.rstrip("/")) hdf5_file = os.path.join(odin.data_dir, "%s_data.h5" % file_name) max_plots = 20 with h5py.File(hdf5_file, "r") as hf: data_full = hf["train_data_full"] data_sketch = hf["train_data_sketch"] for i in range(min(data_full.shape[0], max_plots)): oplt.figure() img = data_full[i, :, :, :].transpose(1, 2, 0) img2 = data_sketch[i, :, :, :].transpose(1, 2, 0) img = np.concatenate((img, img2), axis=1) if nb_channels == 1: oplt.imshow(img[:, :, 0], cmap="gray") else: oplt.imshow(img) oplt.show() oplt.clf() oplt.close()
def check_hdf5(jpeg_dir, nb_channels): """ Plot images with landmarks to check the processing """ # Get hdf5 file file_name = os.path.basename(jpeg_dir.rstrip("/")) hdf5_file = os.path.join(odin.data_dir, "%s_data.h5" % file_name) max_plots = 20 with h5py.File(hdf5_file, "r") as hf: data_full = hf["train_data_full"] data_sketch = hf["train_data_sketch"] for i in range(min(data_full.shape[0], max_plots)): oplt.figure() img = data_full[i, :, :, :].transpose(1, 2, 0) img2 = data_sketch[i, :, :, :].transpose(1, 2, 0) img = np.concatenate((img, img2), axis=1) if nb_channels == 1: oplt.imshow(img[:, :, 0], cmap="gray") else: oplt.imshow(img) oplt.show() oplt.clf() oplt.close()
Python
def on_batch_begin(self, batch, logs=None): """Called right before processing a batch. # Arguments batch: integer, index of batch within the current epoch. logs: dictionary of logs. """ logs = logs or {} t_before_callbacks = time.time() for callback in self.callbacks: callback.on_batch_begin(batch, logs) self._delta_ts_batch_begin.append(time.time() - t_before_callbacks) delta_t_median = np.median(self._delta_ts_batch_begin) if (self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1): warnings.warn('Method on_batch_begin() is slow compared ' 'to the batch update (%f). Check your callbacks.' % delta_t_median) self._t_enter_batch = time.time()
def on_batch_begin(self, batch, logs=None): """Called right before processing a batch. # Arguments batch: integer, index of batch within the current epoch. logs: dictionary of logs. """ logs = logs or {} t_before_callbacks = time.time() for callback in self.callbacks: callback.on_batch_begin(batch, logs) self._delta_ts_batch_begin.append(time.time() - t_before_callbacks) delta_t_median = np.median(self._delta_ts_batch_begin) if (self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1): warnings.warn('Method on_batch_begin() is slow compared ' 'to the batch update (%f). Check your callbacks.' % delta_t_median) self._t_enter_batch = time.time()
Python
def on_batch_end(self, batch, logs=None): """Called at the end of a batch. # Arguments batch: integer, index of batch within the current epoch. logs: dictionary of logs. """ logs = logs or {} if not hasattr(self, '_t_enter_batch'): self._t_enter_batch = time.time() self._delta_t_batch = time.time() - self._t_enter_batch t_before_callbacks = time.time() for callback in self.callbacks: callback.on_batch_end(batch, logs) self._delta_ts_batch_end.append(time.time() - t_before_callbacks) delta_t_median = np.median(self._delta_ts_batch_end) if (self._delta_t_batch > 0. and (delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)): warnings.warn('Method on_batch_end() is slow compared ' 'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_batch_end(self, batch, logs=None): """Called at the end of a batch. # Arguments batch: integer, index of batch within the current epoch. logs: dictionary of logs. """ logs = logs or {} if not hasattr(self, '_t_enter_batch'): self._t_enter_batch = time.time() self._delta_t_batch = time.time() - self._t_enter_batch t_before_callbacks = time.time() for callback in self.callbacks: callback.on_batch_end(batch, logs) self._delta_ts_batch_end.append(time.time() - t_before_callbacks) delta_t_median = np.median(self._delta_ts_batch_end) if (self._delta_t_batch > 0. and (delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)): warnings.warn('Method on_batch_end() is slow compared ' 'to the batch update (%f). Check your callbacks.' % delta_t_median)
Python
def dummy_batch(self, batch_size, dtype=np.float32): """ Get a batch of the same shape as the dataset but with only zeros. For testing purposes. :param batch_size: :param dtype: :return: """ return np.zeros(shape=(batch_size,) + self.sample_shape, dtype=dtype)
def dummy_batch(self, batch_size, dtype=np.float32): """ Get a batch of the same shape as the dataset but with only zeros. For testing purposes. :param batch_size: :param dtype: :return: """ return np.zeros(shape=(batch_size,) + self.sample_shape, dtype=dtype)
Python
def default_arguments_and_behavior(): """ Sets up a basic environment for chainer and arguments for most actions :return: args, model_wrapper """ print("----SETUP----") ap = argparse.ArgumentParser() ap.add_argument("-m", "--model", required=False, help="model to do action on") ap.add_argument("--new-model", "--new_model", required=False, type=bool, help="load or start from scratch") ap.add_argument("-p", '--prefix', type=str, default='default', help="An additional sub label for your model") ap.add_argument("-e", '--experiment', type=str, required=False, dest="experiment", help="An additional experiment label for your model") ap.add_argument("-a", "--action", dest="action", required=False, nargs="?", choices=odin.actions.action_map.keys(), help="action keyword") ap.add_argument("-f", "--file-prefix", required=False, default="save", help="prefix for the files of computed values") ap.add_argument('--gpu', type=int, default=-1) ap.add_argument('--epochs', type=int, required=False) ap.add_argument('--batch_size', type=int, required=False) ap.add_argument('--snapshot', type=int, default=10) ap.add_argument('-n', "--no-large-files", type=bool, default=False, help="To prevent out of memory on test run") ap.add_argument("--ask", "--prompt", type=bool, dest="prompt", default=True, help="Whether to ask before downloading.") # Model options ap.add_argument('--unit', '-u', type=int, default=1000, help='Number of units') ap.add_argument("--use_bias", "--use-bias", required=False, type=bool, default=False, help="Use a bias vector") ap.add_argument('--max', type=float, required=False, help='Maximum; action specific.') ap.add_argument('--num', type=int, required=False, help='Count; action specific.') # Plot and print ap.add_argument("--plot", dest="plot", action="store_true", help="Plot the results.") ap.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print detailed information. May slow down execution.") # optimization ap.add_argument('--opt', type=str, default='MomentumSGD', choices=['MomentumSGD', 'Adam', 'AdaGrad']) ap.add_argument('--weight_decay', type=float, default=0.0001) ap.add_argument('--alpha', type=float, default=0.001) ap.add_argument('--lr', type=float, required=False) ap.add_argument('--lr_decay_freq', type=int, required=False) ap.add_argument('--lr_decay_ratio', type=float, required=False) ap.add_argument('--validate_freq', type=int, default=1) ap.add_argument('--seed', type=int, default=1701) ap.add_argument('--frequency', type=int, default=-1) ap.add_argument('--gradclip', type=float, required=False, help='Gradient norm threshold to clip') ap.add_argument('--bproplen', type=int, required=False, help='Number of words in each mini-batch ' '(= length of truncated BPTT)') ap.add_argument('--regularizing', type=str, default="sq", choices=["sq", "abs"], help="Which regularizer to use in lambda optimizer") # RNN text generation ap.add_argument('--primetext', type=str, required=False, help='base text data, used for text generation') ap.add_argument('--available_cores', type=int, default=4, help='The number of CPU cores that can be used for computation') ap.add_argument('--config', '-c', type=str, required=False, dest="config", help='A config file describing the experiment.') args = ap.parse_args() kwargs = vars(args) if args.config: config = odin.load_yaml_config(args.config) kwargs.update(config) co.update_args(args) np.random.seed(args.seed) # Remove None values and use programmatic defaults kwargs = {a: b for a, b in kwargs.items() if b is not None} prepare_logging(kwargs) odin.update_config(kwargs) print(kwargs) print("----END-SETUP----") return kwargs
def default_arguments_and_behavior(): """ Sets up a basic environment for chainer and arguments for most actions :return: args, model_wrapper """ print("----SETUP----") ap = argparse.ArgumentParser() ap.add_argument("-m", "--model", required=False, help="model to do action on") ap.add_argument("--new-model", "--new_model", required=False, type=bool, help="load or start from scratch") ap.add_argument("-p", '--prefix', type=str, default='default', help="An additional sub label for your model") ap.add_argument("-e", '--experiment', type=str, required=False, dest="experiment", help="An additional experiment label for your model") ap.add_argument("-a", "--action", dest="action", required=False, nargs="?", choices=odin.actions.action_map.keys(), help="action keyword") ap.add_argument("-f", "--file-prefix", required=False, default="save", help="prefix for the files of computed values") ap.add_argument('--gpu', type=int, default=-1) ap.add_argument('--epochs', type=int, required=False) ap.add_argument('--batch_size', type=int, required=False) ap.add_argument('--snapshot', type=int, default=10) ap.add_argument('-n', "--no-large-files", type=bool, default=False, help="To prevent out of memory on test run") ap.add_argument("--ask", "--prompt", type=bool, dest="prompt", default=True, help="Whether to ask before downloading.") # Model options ap.add_argument('--unit', '-u', type=int, default=1000, help='Number of units') ap.add_argument("--use_bias", "--use-bias", required=False, type=bool, default=False, help="Use a bias vector") ap.add_argument('--max', type=float, required=False, help='Maximum; action specific.') ap.add_argument('--num', type=int, required=False, help='Count; action specific.') # Plot and print ap.add_argument("--plot", dest="plot", action="store_true", help="Plot the results.") ap.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print detailed information. May slow down execution.") # optimization ap.add_argument('--opt', type=str, default='MomentumSGD', choices=['MomentumSGD', 'Adam', 'AdaGrad']) ap.add_argument('--weight_decay', type=float, default=0.0001) ap.add_argument('--alpha', type=float, default=0.001) ap.add_argument('--lr', type=float, required=False) ap.add_argument('--lr_decay_freq', type=int, required=False) ap.add_argument('--lr_decay_ratio', type=float, required=False) ap.add_argument('--validate_freq', type=int, default=1) ap.add_argument('--seed', type=int, default=1701) ap.add_argument('--frequency', type=int, default=-1) ap.add_argument('--gradclip', type=float, required=False, help='Gradient norm threshold to clip') ap.add_argument('--bproplen', type=int, required=False, help='Number of words in each mini-batch ' '(= length of truncated BPTT)') ap.add_argument('--regularizing', type=str, default="sq", choices=["sq", "abs"], help="Which regularizer to use in lambda optimizer") # RNN text generation ap.add_argument('--primetext', type=str, required=False, help='base text data, used for text generation') ap.add_argument('--available_cores', type=int, default=4, help='The number of CPU cores that can be used for computation') ap.add_argument('--config', '-c', type=str, required=False, dest="config", help='A config file describing the experiment.') args = ap.parse_args() kwargs = vars(args) if args.config: config = odin.load_yaml_config(args.config) kwargs.update(config) co.update_args(args) np.random.seed(args.seed) # Remove None values and use programmatic defaults kwargs = {a: b for a, b in kwargs.items() if b is not None} prepare_logging(kwargs) odin.update_config(kwargs) print(kwargs) print("----END-SETUP----") return kwargs
Python
def sample(self): """Return a sample batch of images.""" z = self.next_batch(self.z) x = self.generator(z, test=True) # [-1, 1] -> [0, 1] x += 1.0 x /= 2 return x
def sample(self): """Return a sample batch of images.""" z = self.next_batch(self.z) x = self.generator(z, test=True) # [-1, 1] -> [0, 1] x += 1.0 x /= 2 return x
Python
def put_group(self, group, elements, experiment=None): """ Save a group of data associated with this model. :param experiment: :param group: :param elements: :return: """ if group not in self._elements.keys(): if experiment: if experiment not in self._elements[group].keys(): self._elements[group] = {} else: self._elements[group][experiment].update(elements) self._elements[group][experiment] = elements else: self._elements[group] = elements else: if experiment: if type(self._elements[group]) != dict: a = {"default": self._elements[group]} self._elements[group] = a elif experiment not in self._elements[group].keys(): self._elements[group][experiment] = elements else: self._elements[group][experiment].update(elements) else: self._elements[group].update(elements) return co.store_elements(group_name=group, model_wrapper=self, elements=elements, experiment=experiment)
def put_group(self, group, elements, experiment=None): """ Save a group of data associated with this model. :param experiment: :param group: :param elements: :return: """ if group not in self._elements.keys(): if experiment: if experiment not in self._elements[group].keys(): self._elements[group] = {} else: self._elements[group][experiment].update(elements) self._elements[group][experiment] = elements else: self._elements[group] = elements else: if experiment: if type(self._elements[group]) != dict: a = {"default": self._elements[group]} self._elements[group] = a elif experiment not in self._elements[group].keys(): self._elements[group][experiment] = elements else: self._elements[group][experiment].update(elements) else: self._elements[group].update(elements) return co.store_elements(group_name=group, model_wrapper=self, elements=elements, experiment=experiment)
Python
def calc_inter_layer_covariance(self, model_wrapper): """ Calculate the covariance matrix for each layer in the network :param model_wrapper: :return: """ pass
def calc_inter_layer_covariance(self, model_wrapper): """ Calculate the covariance matrix for each layer in the network :param model_wrapper: :return: """ pass
Python
def load_elements(self, group_name, model_wrapper, experiment=None): """ Load a group of previously computed values. :param group_name: directory where the elements are placed :param model_wrapper: :param experiment: An extra label :return: """ path = os.path.join(model_wrapper.model_path, group_name, "*.npy") if experiment: path = os.path.join(path, experiment) path = os.path.join(path, "*.npy") group = {} for f in glob(path): element = self.xp.load(f) name = f.split("/")[-1].split(".")[0] group[name] = element return group
def load_elements(self, group_name, model_wrapper, experiment=None): """ Load a group of previously computed values. :param group_name: directory where the elements are placed :param model_wrapper: :param experiment: An extra label :return: """ path = os.path.join(model_wrapper.model_path, group_name, "*.npy") if experiment: path = os.path.join(path, experiment) path = os.path.join(path, "*.npy") group = {} for f in glob(path): element = self.xp.load(f) name = f.split("/")[-1].split(".")[0] group[name] = element return group
Python
def store_elements(self, elements, group_name, model_wrapper, experiment=None): """ Store a group of elements in the results_dir :param elements: a map of values and their names. The name will result in their filename. :param group_name: The name of the group which will be used as a directory name. :param model_wrapper: :param experiment: :return: """ path = os.path.join(model_wrapper.model_path, group_name) if experiment: path = os.path.join(model_wrapper.model_path, group_name, experiment) if not os.path.isdir(path): os.makedirs(path) for key in elements: self.xp.save(os.path.join(path, key), elements[key]) logging.info("Stored %s of %s for %s in '%s'" % (elements.keys(), group_name, model_wrapper.model_name, path)) return elements
def store_elements(self, elements, group_name, model_wrapper, experiment=None): """ Store a group of elements in the results_dir :param elements: a map of values and their names. The name will result in their filename. :param group_name: The name of the group which will be used as a directory name. :param model_wrapper: :param experiment: :return: """ path = os.path.join(model_wrapper.model_path, group_name) if experiment: path = os.path.join(model_wrapper.model_path, group_name, experiment) if not os.path.isdir(path): os.makedirs(path) for key in elements: self.xp.save(os.path.join(path, key), elements[key]) logging.info("Stored %s of %s for %s in '%s'" % (elements.keys(), group_name, model_wrapper.model_name, path)) return elements
Python
def calc_index_set(**kwargs): """ [WIP] Calculate the new architecture given already computed layer widths. Dependency: calc_dof :param kwargs: :return: """ model_wrapper = load_model(kwargs.get("model"), **kwargs) method = "Newton-CG" r_dof = model_wrapper.get_group("range_test") # bounds = r_dof["rho_range"] layer_widths_list = r_dof["all_layer_widths"] # lambdas = r_dof["all_lambdas"] data = model_wrapper.get_group("inter_layer_covariance") cov_list = data["cov"] # eigen_values = data["eigen_values"] loss_before = [] loss_after = [] num = kwargs.get("num") or 1 layer_widths = layer_widths_list[num] # TODO this is not what we want logging.info("Finding the new architecture for %s" % layer_widths) print("Finding the new architecture for %s" % layer_widths) architecture.calculate_index_set(model_wrapper=model_wrapper, layer_widths=layer_widths, cov_list=cov_list)
def calc_index_set(**kwargs): """ [WIP] Calculate the new architecture given already computed layer widths. Dependency: calc_dof :param kwargs: :return: """ model_wrapper = load_model(kwargs.get("model"), **kwargs) method = "Newton-CG" r_dof = model_wrapper.get_group("range_test") # bounds = r_dof["rho_range"] layer_widths_list = r_dof["all_layer_widths"] # lambdas = r_dof["all_lambdas"] data = model_wrapper.get_group("inter_layer_covariance") cov_list = data["cov"] # eigen_values = data["eigen_values"] loss_before = [] loss_after = [] num = kwargs.get("num") or 1 layer_widths = layer_widths_list[num] # TODO this is not what we want logging.info("Finding the new architecture for %s" % layer_widths) print("Finding the new architecture for %s" % layer_widths) architecture.calculate_index_set(model_wrapper=model_wrapper, layer_widths=layer_widths, cov_list=cov_list)
Python
def measure_goodness(**kwargs): """ [WIP] Test the compressed model against the original model. :param kwargs: :return: """ original_model = load_model(kwargs.get("model"), **kwargs) kwargs['prefix'] = kwargs['prefix'] + "_compressed" compressed_model = load_model(kwargs.get("model"), **kwargs) loss_original = original_model.test() loss_compressed = compressed_model.test() print(loss_original, loss_compressed) # Fine tune print("Fine tuning") compressed_model.train() loss_compressed_fine_tuned = compressed_model.test() print(loss_compressed_fine_tuned) compressed_model.prefix += "_fine_tuned" compressed_model.save()
def measure_goodness(**kwargs): """ [WIP] Test the compressed model against the original model. :param kwargs: :return: """ original_model = load_model(kwargs.get("model"), **kwargs) kwargs['prefix'] = kwargs['prefix'] + "_compressed" compressed_model = load_model(kwargs.get("model"), **kwargs) loss_original = original_model.test() loss_compressed = compressed_model.test() print(loss_original, loss_compressed) # Fine tune print("Fine tuning") compressed_model.train() loss_compressed_fine_tuned = compressed_model.test() print(loss_compressed_fine_tuned) compressed_model.prefix += "_fine_tuned" compressed_model.save()
Python
def calc_dof(**kwargs): """ This computes the layer widths for a range of hyper parameters for the given model. Dependency: calc_eigs :param kwargs: :return: """ model_wrapper = load_model(kwargs.get("model"), **kwargs) l_opt = lambda_param.LambdaOptimizer(model_wrapper) method = "Newton-CG" line = co.xp.linspace(5.0, 100.0, num=100) lambdas, layer_widths, success = l_opt.range_lambda_dof(line, method=method) co.store_elements(model_wrapper=model_wrapper, experiment=kwargs.get("experiment"), elements={"lambdas": lambdas, "bounds": line, "layer_widths": layer_widths, "success": success}, group_name="range_dof_%s" % method)
def calc_dof(**kwargs): """ This computes the layer widths for a range of hyper parameters for the given model. Dependency: calc_eigs :param kwargs: :return: """ model_wrapper = load_model(kwargs.get("model"), **kwargs) l_opt = lambda_param.LambdaOptimizer(model_wrapper) method = "Newton-CG" line = co.xp.linspace(5.0, 100.0, num=100) lambdas, layer_widths, success = l_opt.range_lambda_dof(line, method=method) co.store_elements(model_wrapper=model_wrapper, experiment=kwargs.get("experiment"), elements={"lambdas": lambdas, "bounds": line, "layer_widths": layer_widths, "success": success}, group_name="range_dof_%s" % method)
Python
def plot_dof(**kwargs): """ Makes a plot of layer widths given different hyper parameters Dependency: calc_dof :param kwargs: :return: """ model_wrapper = load_model(kwargs.get("model"), **kwargs) # lambdas, optimal = l_opt.optimize(bound=0.1, debug=True) # l_arr, layer_widths = l_opt.n_sphere_lambda_dof() # lambda_param.plot_lambdas(l_arr, layer_widths, optimal=optimal, prefix="sphere") method = "Newton-CG" r_dof = model_wrapper.get_group("range_dof_%s" % method) bounds = r_dof["bounds"] layer_widths = r_dof["layer_widths"] lambdas = r_dof["lambdas"] lambda_param.plot_lambdas(lambdas, bounds, layer_widths, prefix=method)
def plot_dof(**kwargs): """ Makes a plot of layer widths given different hyper parameters Dependency: calc_dof :param kwargs: :return: """ model_wrapper = load_model(kwargs.get("model"), **kwargs) # lambdas, optimal = l_opt.optimize(bound=0.1, debug=True) # l_arr, layer_widths = l_opt.n_sphere_lambda_dof() # lambda_param.plot_lambdas(l_arr, layer_widths, optimal=optimal, prefix="sphere") method = "Newton-CG" r_dof = model_wrapper.get_group("range_dof_%s" % method) bounds = r_dof["bounds"] layer_widths = r_dof["layer_widths"] lambdas = r_dof["lambdas"] lambda_param.plot_lambdas(lambdas, bounds, layer_widths, prefix=method)
Python
def calc_eigs(**kwargs): """ Calculates the inter layer covariance and corresponding eigen values and stores them as 'inter_layer_covariance'. Dependency: train_model :param kwargs: :return: """ model_wrapper = odin.model_wrapper = load_model(kwargs.get("model"), **kwargs) co.calc_inter_layer_covariance(model_wrapper=model_wrapper, **kwargs) datastore = model_wrapper.get_group("inter_layer_covariance") cov = datastore["cov"] eigen_values = datastore["eigen_values"] if kwargs["plot"]: for i, eigs in enumerate(eigen_values): l = len(eigs) eigs = abs(eigs) oplt.plot_eigen_values(eigs, title="%s layer %d (%d)" % (model_wrapper.model_name, i, l)) oplt.save("eigs(%d)" % i) oplt.plot_matrix(cov[i], title="Covariance (%d)" % i) oplt.save("cov(%d)" % i) return cov, eigen_values
def calc_eigs(**kwargs): """ Calculates the inter layer covariance and corresponding eigen values and stores them as 'inter_layer_covariance'. Dependency: train_model :param kwargs: :return: """ model_wrapper = odin.model_wrapper = load_model(kwargs.get("model"), **kwargs) co.calc_inter_layer_covariance(model_wrapper=model_wrapper, **kwargs) datastore = model_wrapper.get_group("inter_layer_covariance") cov = datastore["cov"] eigen_values = datastore["eigen_values"] if kwargs["plot"]: for i, eigs in enumerate(eigen_values): l = len(eigs) eigs = abs(eigs) oplt.plot_eigen_values(eigs, title="%s layer %d (%d)" % (model_wrapper.model_name, i, l)) oplt.save("eigs(%d)" % i) oplt.plot_matrix(cov[i], title="Covariance (%d)" % i) oplt.save("cov(%d)" % i) return cov, eigen_values
Python
def train_model(**kwargs): """ Train the specified model. Commandline arguments will be passed to the training function. :param kwargs: :return: """ # Remove None values kwargs = {a: b for a, b in kwargs.items() if b is not None} kwargs['new_model'] = True model_wrapper = load_model(kwargs.get("model"), **kwargs) model_wrapper.train(**kwargs) model_wrapper.save() if model_wrapper.history: model_wrapper.put_group("training_history", {"history": model_wrapper.history.history}) if kwargs["plot"]: oplt.plot_model_history(model_wrapper) oplt.save("loss") oplt.show() return model_wrapper
def train_model(**kwargs): """ Train the specified model. Commandline arguments will be passed to the training function. :param kwargs: :return: """ # Remove None values kwargs = {a: b for a, b in kwargs.items() if b is not None} kwargs['new_model'] = True model_wrapper = load_model(kwargs.get("model"), **kwargs) model_wrapper.train(**kwargs) model_wrapper.save() if model_wrapper.history: model_wrapper.put_group("training_history", {"history": model_wrapper.history.history}) if kwargs["plot"]: oplt.plot_model_history(model_wrapper) oplt.save("loss") oplt.show() return model_wrapper
Python
def clamp(self, lower=-0.01, upper=0.01): """Clamp all parameters, including the batch normalization parameters.""" for params in self.params(): params_clipped = F.clip(params, lower, upper) params.data = params_clipped.data
def clamp(self, lower=-0.01, upper=0.01): """Clamp all parameters, including the batch normalization parameters.""" for params in self.params(): params_clipped = F.clip(params, lower, upper) params.data = params_clipped.data
Python
def _get_encoded_list(items: List[str]) -> str: """ Return Python list encoded in a string """ list_contents = '' if items: list_contents = ', '.join(["'{}'".format(item) for item in items]) return '[{}]'.format(list_contents)
def _get_encoded_list(items: List[str]) -> str: """ Return Python list encoded in a string """ list_contents = '' if items: list_contents = ', '.join(["'{}'".format(item) for item in items]) return '[{}]'.format(list_contents)
Python
def start(self, addNodesRequest: dict, dbSession: Session, dbHardwareProfile: HardwareProfile, dbSoftwareProfile: Optional[SoftwareProfile] = None) \ -> List[Node]: """ Create Azure virtual machine to map to a Tortuga node. Called when nodes are added to Tortuga. """ self._logger.debug( 'start(): addNodesRequest=[%s], dbSession=[%s],' ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s]', addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile ) result = super().start(addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile) if 'nodeDetails' in addNodesRequest and \ addNodesRequest['nodeDetails']: # Instances already exist, create node records if 'metadata' in addNodesRequest['nodeDetails'][0] and \ 'instance_id' in \ addNodesRequest['nodeDetails'][0]['metadata']: config = self.__get_config(addNodesRequest, dbHardwareProfile) azure_session = AzureSession(config=config) # inserting nodes based on metadata node = self.__insert_node( azure_session, dbSession, dbHardwareProfile, dbSoftwareProfile, addNodesRequest, addNodesRequest.get('resource_adapter_configuration') ) dbSession.commit() return [node] start_time = datetime.datetime.utcnow() nodes = self.__add_active_nodes( addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile ) if len(nodes) < addNodesRequest['count']: self._logger.warning( '%s node(s) requested, only %s launched successfully', addNodesRequest['count'], len(nodes) ) # This is a necessary evil for the time being, until there's # a proper context manager implemented. self.addHostApi.clear_session_nodes(nodes) end_time = datetime.datetime.utcnow() time_delta = end_time - start_time self._logger.debug( 'start() session [%s] completed in %.2f seconds', self.addHostSession, time_delta.seconds + time_delta.microseconds / 1000000.0 ) result.extend(nodes) return result
def start(self, addNodesRequest: dict, dbSession: Session, dbHardwareProfile: HardwareProfile, dbSoftwareProfile: Optional[SoftwareProfile] = None) \ -> List[Node]: """ Create Azure virtual machine to map to a Tortuga node. Called when nodes are added to Tortuga. """ self._logger.debug( 'start(): addNodesRequest=[%s], dbSession=[%s],' ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s]', addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile ) result = super().start(addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile) if 'nodeDetails' in addNodesRequest and \ addNodesRequest['nodeDetails']: # Instances already exist, create node records if 'metadata' in addNodesRequest['nodeDetails'][0] and \ 'instance_id' in \ addNodesRequest['nodeDetails'][0]['metadata']: config = self.__get_config(addNodesRequest, dbHardwareProfile) azure_session = AzureSession(config=config) # inserting nodes based on metadata node = self.__insert_node( azure_session, dbSession, dbHardwareProfile, dbSoftwareProfile, addNodesRequest, addNodesRequest.get('resource_adapter_configuration') ) dbSession.commit() return [node] start_time = datetime.datetime.utcnow() nodes = self.__add_active_nodes( addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile ) if len(nodes) < addNodesRequest['count']: self._logger.warning( '%s node(s) requested, only %s launched successfully', addNodesRequest['count'], len(nodes) ) # This is a necessary evil for the time being, until there's # a proper context manager implemented. self.addHostApi.clear_session_nodes(nodes) end_time = datetime.datetime.utcnow() time_delta = end_time - start_time self._logger.debug( 'start() session [%s] completed in %.2f seconds', self.addHostSession, time_delta.seconds + time_delta.microseconds / 1000000.0 ) result.extend(nodes) return result
Python
def __get_scale_set_parameters(self, session, name): # pylint: disable=no-self-use """ Create the VM parameters structure. """ ssh_public_key = session.config['ssh_key_value'] storage_profile = { 'os_disk': { 'os_type': 'Linux', 'caching': 'ReadWrite', 'create_option': 'fromImage', } } if session.config['use_managed_disks']: # Managed disk if session.config['ssd']: storage_profile['os_disk']['managed_disk'] = { 'storage_account_type': 'Premium_LRS', } else: # Regular (unmanaged) disk # Build unique URI for VHD vhd_uri = 'https://{}.blob.core.windows.net/vhds/{}.vhd'.format( session.config['storage_account'], name) storage_profile['os_disk']['vhd'] = { 'uri': vhd_uri, } if 'image_reference' in session.config: storage_profile['image_reference'] = \ session.config['image_reference'] else: # Look up id of image image_id = session.compute_client.images.get( session.config['resource_group'], session.config['image']).id storage_profile['image_reference'] = { 'id': image_id, } if 'storage_account_type' in session.config and \ session.config['storage_account_type']: storage_profile['os_disk']['managed_disk'] = { 'storage_account_type': session.config['storage_account_type'] } subnet = \ session.network_client.subnets.get( session.config['resource_group'], session.config['virtual_network_name'], session.config['subnet_name'][0]) ip_config = { 'name': name + 'IpConfig', 'subnet': { 'id': subnet.id } } if session.config['allocate_public_ip']: ip_config['public_ip_address_configuration'] = { 'name': 'pub1', 'idle_timeout_in_minutues': 15, } virtualMachineProfile = { 'os_profile': { 'admin_username': session.config['default_login'], 'linux_configuration': { 'disable_password_authentication': True, 'ssh': { 'public_keys': [ { 'path': '/home/%s/.ssh/authorized_keys' % ( session.config['default_login']), 'key_data': ssh_public_key, } ], }, }, }, 'hardware_profile': { 'vm_size': session.config['size'] }, 'storage_profile': storage_profile, 'network_profile': { 'network_interface_configurations': [ { 'name': name + 'Nic', 'primary': True, 'ip_configurations': [ ip_config ] } ] } } result = { 'sku': { 'tier': 'Standard', 'capacity': 0, 'name': session.config['size'], }, 'location': session.config['location'], 'properties': { 'overprovision': True, 'upgradePolicy': { 'mode': 'Manual' }, 'virtualMachineProfile': virtualMachineProfile, }, 'tags': {}, } return result
def __get_scale_set_parameters(self, session, name): # pylint: disable=no-self-use """ Create the VM parameters structure. """ ssh_public_key = session.config['ssh_key_value'] storage_profile = { 'os_disk': { 'os_type': 'Linux', 'caching': 'ReadWrite', 'create_option': 'fromImage', } } if session.config['use_managed_disks']: # Managed disk if session.config['ssd']: storage_profile['os_disk']['managed_disk'] = { 'storage_account_type': 'Premium_LRS', } else: # Regular (unmanaged) disk # Build unique URI for VHD vhd_uri = 'https://{}.blob.core.windows.net/vhds/{}.vhd'.format( session.config['storage_account'], name) storage_profile['os_disk']['vhd'] = { 'uri': vhd_uri, } if 'image_reference' in session.config: storage_profile['image_reference'] = \ session.config['image_reference'] else: # Look up id of image image_id = session.compute_client.images.get( session.config['resource_group'], session.config['image']).id storage_profile['image_reference'] = { 'id': image_id, } if 'storage_account_type' in session.config and \ session.config['storage_account_type']: storage_profile['os_disk']['managed_disk'] = { 'storage_account_type': session.config['storage_account_type'] } subnet = \ session.network_client.subnets.get( session.config['resource_group'], session.config['virtual_network_name'], session.config['subnet_name'][0]) ip_config = { 'name': name + 'IpConfig', 'subnet': { 'id': subnet.id } } if session.config['allocate_public_ip']: ip_config['public_ip_address_configuration'] = { 'name': 'pub1', 'idle_timeout_in_minutues': 15, } virtualMachineProfile = { 'os_profile': { 'admin_username': session.config['default_login'], 'linux_configuration': { 'disable_password_authentication': True, 'ssh': { 'public_keys': [ { 'path': '/home/%s/.ssh/authorized_keys' % ( session.config['default_login']), 'key_data': ssh_public_key, } ], }, }, }, 'hardware_profile': { 'vm_size': session.config['size'] }, 'storage_profile': storage_profile, 'network_profile': { 'network_interface_configurations': [ { 'name': name + 'Nic', 'primary': True, 'ip_configurations': [ ip_config ] } ] } } result = { 'sku': { 'tier': 'Standard', 'capacity': 0, 'name': session.config['size'], }, 'location': session.config['location'], 'properties': { 'overprovision': True, 'upgradePolicy': { 'mode': 'Manual' }, 'virtualMachineProfile': virtualMachineProfile, }, 'tags': {}, } return result
Python
def create_scale_set(self, name: str, resourceAdapterProfile: str, hardwareProfile: str, softwareProfile: str, minCount: int, maxCount: int, desiredCount: int, adapter_args: dict): """ Create a scale set in Azure :raises InvalidArgument: """ config = self.get_config(resourceAdapterProfile) az_session = AzureSession(config=config) tags = self.get_initial_tags(config, hardwareProfile, softwareProfile) parameters = self.__get_scale_set_parameters(az_session, name) parameters['sku']['capacity'] = desiredCount parameters['properties']['virtualMachineProfile']['os_profile']['computerNamePrefix'] = name parameters['tags'] = patch_managed_tags(tags) priority = adapter_args.get('priority') if priority is not None: parameters['properties']['virtualMachineProfile']['priority'] = priority evictionPolicy = adapter_args.get('evictionPolicy') if evictionPolicy is not None: parameters['properties']['virtualMachineProfile']['eviction_policy'] = evictionPolicy insertnode_request = { 'softwareProfile': softwareProfile, 'hardwareProfile': hardwareProfile, 'resource_adapter_configuration': resourceAdapterProfile, } encrypted_insertnode_request = encrypt_insertnode_request( self._cm.get_encryption_key(), insertnode_request) custom_data = self.__get_custom_data( az_session.config, insertnode_request=encrypted_insertnode_request ) if custom_data is not None: parameters['properties']['virtualMachineProfile']['os_profile']['custom_data'] = \ base64.b64encode(custom_data.encode()).decode() az_session.compute_client.virtual_machine_scale_sets.create_or_update( az_session.config['resource_group'], name, parameters)
def create_scale_set(self, name: str, resourceAdapterProfile: str, hardwareProfile: str, softwareProfile: str, minCount: int, maxCount: int, desiredCount: int, adapter_args: dict): """ Create a scale set in Azure :raises InvalidArgument: """ config = self.get_config(resourceAdapterProfile) az_session = AzureSession(config=config) tags = self.get_initial_tags(config, hardwareProfile, softwareProfile) parameters = self.__get_scale_set_parameters(az_session, name) parameters['sku']['capacity'] = desiredCount parameters['properties']['virtualMachineProfile']['os_profile']['computerNamePrefix'] = name parameters['tags'] = patch_managed_tags(tags) priority = adapter_args.get('priority') if priority is not None: parameters['properties']['virtualMachineProfile']['priority'] = priority evictionPolicy = adapter_args.get('evictionPolicy') if evictionPolicy is not None: parameters['properties']['virtualMachineProfile']['eviction_policy'] = evictionPolicy insertnode_request = { 'softwareProfile': softwareProfile, 'hardwareProfile': hardwareProfile, 'resource_adapter_configuration': resourceAdapterProfile, } encrypted_insertnode_request = encrypt_insertnode_request( self._cm.get_encryption_key(), insertnode_request) custom_data = self.__get_custom_data( az_session.config, insertnode_request=encrypted_insertnode_request ) if custom_data is not None: parameters['properties']['virtualMachineProfile']['os_profile']['custom_data'] = \ base64.b64encode(custom_data.encode()).decode() az_session.compute_client.virtual_machine_scale_sets.create_or_update( az_session.config['resource_group'], name, parameters)
Python
def __insert_node(self, session: AzureSession, dbSession: Session, dbHardwareProfile: HardwareProfile, dbSoftwareProfile: SoftwareProfile, addNodesRequest: dict, resourceAdapter: str ) -> Node: """ Directly insert nodes with pre-existing Azure instances This is primarily used for supporting spot instances where an Azure instance exists before the Tortuga associated node record. """ self._logger.info( 'Inserting %d node', 1 ) nodeDetail = addNodesRequest['nodeDetails'][0] instance_id: Optional[int] = \ nodeDetail['metadata']['instance_id'] \ if 'metadata' in nodeDetail and \ 'instance_id' in nodeDetail['metadata'] else None if instance_id is None: # TODO: currently not handled self._logger.error( 'instance_id not set in metadata. Unable to insert Azure nodes' ' with invalid metadata: %s', nodeDetail ) return None internal_ip: Optional[str] = \ nodeDetail['metadata']['private_ip'] \ if 'metadata' in nodeDetail and \ 'private_ip' in nodeDetail['metadata'] else None if not internal_ip: # TODO: currently not handled self._logger.error( 'private_ip not set in metadata. Unable to insert Azure nodes' ' without passed ip address' ) return None scale_set_name: Optional[str] = \ nodeDetail['metadata']['scale_set_name'] \ if 'metadata' in nodeDetail and \ 'scale_set_name' in nodeDetail['metadata'] else None if scale_set_name == "": scale_set_name = None instance = self.__azure_get_vm( session, get_vm_name(nodeDetail['name']), scale_set_name, instance_id ) if not instance: self._logger.warning( 'Error inserting node [%s]. Azure instance [%s] does not exist', nodeDetail['name'], ) return None node_created = False node = self.__get_node_by_instance(dbSession, nodeDetail['name']) if node is None: try: config = self.__get_config(addNodesRequest, dbHardwareProfile) azure_session = AzureSession(config=config) node = self.__build_nodes(azure_session, addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile)[0] node_created = True node.state = state.NODE_STATE_PROVISIONED except InvalidArgument: self._logger.exception( 'Error creating new node record in insert workflow' ) raise else: self._logger.debug( 'Found existing node record [%s] for instance id [%s]', node.name, nodeDetail['name'] ) # set node properties node.nics.append(Nic(ip=internal_ip, boot=True)) # Call pre-add-host to set up DNS record self._pre_add_host( node.name, node.hardwareprofile.name, node.softwareprofile.name, internal_ip, ) vm_name = get_vm_name(node.name) node.instance = InstanceMapping( instance=nodeDetail['name'], instance_metadata=[ InstanceMetadata(key='vm_name', value=vm_name), InstanceMetadata(key='resource_group', value=session.config['resource_group']), ], resource_adapter_configuration=self.load_resource_adapter_config( dbSession, resourceAdapter ) ) if node_created: # only fire the new node event if creating the record for the # first time self.fire_provisioned_event(node) return node
def __insert_node(self, session: AzureSession, dbSession: Session, dbHardwareProfile: HardwareProfile, dbSoftwareProfile: SoftwareProfile, addNodesRequest: dict, resourceAdapter: str ) -> Node: """ Directly insert nodes with pre-existing Azure instances This is primarily used for supporting spot instances where an Azure instance exists before the Tortuga associated node record. """ self._logger.info( 'Inserting %d node', 1 ) nodeDetail = addNodesRequest['nodeDetails'][0] instance_id: Optional[int] = \ nodeDetail['metadata']['instance_id'] \ if 'metadata' in nodeDetail and \ 'instance_id' in nodeDetail['metadata'] else None if instance_id is None: # TODO: currently not handled self._logger.error( 'instance_id not set in metadata. Unable to insert Azure nodes' ' with invalid metadata: %s', nodeDetail ) return None internal_ip: Optional[str] = \ nodeDetail['metadata']['private_ip'] \ if 'metadata' in nodeDetail and \ 'private_ip' in nodeDetail['metadata'] else None if not internal_ip: # TODO: currently not handled self._logger.error( 'private_ip not set in metadata. Unable to insert Azure nodes' ' without passed ip address' ) return None scale_set_name: Optional[str] = \ nodeDetail['metadata']['scale_set_name'] \ if 'metadata' in nodeDetail and \ 'scale_set_name' in nodeDetail['metadata'] else None if scale_set_name == "": scale_set_name = None instance = self.__azure_get_vm( session, get_vm_name(nodeDetail['name']), scale_set_name, instance_id ) if not instance: self._logger.warning( 'Error inserting node [%s]. Azure instance [%s] does not exist', nodeDetail['name'], ) return None node_created = False node = self.__get_node_by_instance(dbSession, nodeDetail['name']) if node is None: try: config = self.__get_config(addNodesRequest, dbHardwareProfile) azure_session = AzureSession(config=config) node = self.__build_nodes(azure_session, addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile)[0] node_created = True node.state = state.NODE_STATE_PROVISIONED except InvalidArgument: self._logger.exception( 'Error creating new node record in insert workflow' ) raise else: self._logger.debug( 'Found existing node record [%s] for instance id [%s]', node.name, nodeDetail['name'] ) # set node properties node.nics.append(Nic(ip=internal_ip, boot=True)) # Call pre-add-host to set up DNS record self._pre_add_host( node.name, node.hardwareprofile.name, node.softwareprofile.name, internal_ip, ) vm_name = get_vm_name(node.name) node.instance = InstanceMapping( instance=nodeDetail['name'], instance_metadata=[ InstanceMetadata(key='vm_name', value=vm_name), InstanceMetadata(key='resource_group', value=session.config['resource_group']), ], resource_adapter_configuration=self.load_resource_adapter_config( dbSession, resourceAdapter ) ) if node_created: # only fire the new node event if creating the record for the # first time self.fire_provisioned_event(node) return node
Python
def __cleanup_node(self, session, node): \ # pylint: disable=no-self-use """Remove Nodes and associated nics from database""" # Ensure session node cache entry is removed for failed launch self.addHostApi.clear_session_node(node) for nic in node.nics: session.delete(nic) session.delete(node)
def __cleanup_node(self, session, node): \ # pylint: disable=no-self-use """Remove Nodes and associated nics from database""" # Ensure session node cache entry is removed for failed launch self.addHostApi.clear_session_node(node) for nic in node.nics: session.delete(nic) session.delete(node)
Python
def __init_node_request_queue(self, nodes): \ # pylint: disable=no-self-use """Construct a lookup table of instances, nodes, and VPN IDs, keyed on the instance """ node_request_queue = [] for node in nodes: node_request = { 'node': node, 'status': 'pending', } node_request_queue.append(node_request) return node_request_queue
def __init_node_request_queue(self, nodes): \ # pylint: disable=no-self-use """Construct a lookup table of instances, nodes, and VPN IDs, keyed on the instance """ node_request_queue = [] for node in nodes: node_request = { 'node': node, 'status': 'pending', } node_request_queue.append(node_request) return node_request_queue
Python
def __get_cloud_init_custom_data(self, configDict): """Process cloud-init template using Jinja2 templating language""" srcpath, srcfile = os.path.split( configDict['cloud_init_script_template']) env = Environment(loader=FileSystemLoader(srcpath)) template = env.get_template(srcfile) tmpl_vars = self.__get_common_tmpl_vars(configDict) tmpl_vars.update({ 'installer': self.installer_public_hostname, 'installer_ip_address': self.installer_public_ipaddress, }) return template.render(tmpl_vars)
def __get_cloud_init_custom_data(self, configDict): """Process cloud-init template using Jinja2 templating language""" srcpath, srcfile = os.path.split( configDict['cloud_init_script_template']) env = Environment(loader=FileSystemLoader(srcpath)) template = env.get_template(srcfile) tmpl_vars = self.__get_common_tmpl_vars(configDict) tmpl_vars.update({ 'installer': self.installer_public_hostname, 'installer_ip_address': self.installer_public_ipaddress, }) return template.render(tmpl_vars)
Python
def __get_common_tmpl_vars(self, configDict: dict) -> dict: """Returns dict containing common template variables shared between user-data script template and cloud-init template. """ dns_domain = configDict.get('dns_domain', None) if dns_domain: dns_domain = f"'{dns_domain}'" return { 'override_dns_domain': configDict.get('override_dns_domain', False), 'dns_domain': dns_domain, 'dns_nameservers': _get_encoded_list(configDict.get('dns_nameservers')) }
def __get_common_tmpl_vars(self, configDict: dict) -> dict: """Returns dict containing common template variables shared between user-data script template and cloud-init template. """ dns_domain = configDict.get('dns_domain', None) if dns_domain: dns_domain = f"'{dns_domain}'" return { 'override_dns_domain': configDict.get('override_dns_domain', False), 'dns_domain': dns_domain, 'dns_nameservers': _get_encoded_list(configDict.get('dns_nameservers')) }
Python
def generate_startup_script(self, configDict: Dict[str, str], node: Optional[Node] = None, insertnode_request: Optional[bytes] = None) \ -> str: """Generate node-specific custom data from template""" self._logger.info( 'Using cloud-init script template [%s]', configDict['user_data_script_template'] ) installerIp = self.installer_public_ipaddress if node is not None: installerIp = node.hardwareprofile.nics[0].ip \ if node.hardwareprofile.nics else installerIp with open(configDict['user_data_script_template']) as fp: result = '' settings_dict = self.__get_common_tmpl_vars(configDict) settings_dict.update({ 'installerHostName': self.installer_public_hostname, 'installerIp': installerIp, 'adminport': self._cm.getAdminPort(), 'cfmuser': self._cm.getCfmUser(), 'cfmpassword': self._cm.getCfmPassword(), 'insertnode_request': insertnode_request, }) for inp in fp.readlines(): if inp.startswith('### SETTINGS'): result += '''\ installerHostName = '%(installerHostName)s' installerIpAddress = '%(installerIp)s' port = %(adminport)d override_dns_domain = %(override_dns_domain)s dns_search = %(dns_domain)s dns_domain = %(dns_domain)s dns_nameservers = %(dns_nameservers)s # Insert_node insertnode_request = %(insertnode_request)s ''' % settings_dict else: result += inp return result
def generate_startup_script(self, configDict: Dict[str, str], node: Optional[Node] = None, insertnode_request: Optional[bytes] = None) \ -> str: """Generate node-specific custom data from template""" self._logger.info( 'Using cloud-init script template [%s]', configDict['user_data_script_template'] ) installerIp = self.installer_public_ipaddress if node is not None: installerIp = node.hardwareprofile.nics[0].ip \ if node.hardwareprofile.nics else installerIp with open(configDict['user_data_script_template']) as fp: result = '' settings_dict = self.__get_common_tmpl_vars(configDict) settings_dict.update({ 'installerHostName': self.installer_public_hostname, 'installerIp': installerIp, 'adminport': self._cm.getAdminPort(), 'cfmuser': self._cm.getCfmUser(), 'cfmpassword': self._cm.getCfmPassword(), 'insertnode_request': insertnode_request, }) for inp in fp.readlines(): if inp.startswith('### SETTINGS'): result += '''\ installerHostName = '%(installerHostName)s' installerIpAddress = '%(installerIp)s' port = %(adminport)d override_dns_domain = %(override_dns_domain)s dns_search = %(dns_domain)s dns_domain = %(dns_domain)s dns_nameservers = %(dns_nameservers)s # Insert_node insertnode_request = %(insertnode_request)s ''' % settings_dict else: result += inp return result
Python
def create_vm_parameters(self, session, nic_id, vm_name, custom_data=None, tags=None): \ # pylint: disable=no-self-use """Create the VM parameters structure. """ ssh_public_key = session.config['ssh_key_value'] vhd_name = vm_name storage_profile = { 'os_disk': { 'os_type': 'Linux', 'name': '%s-os-disk' % (vm_name), 'caching': 'ReadWrite', 'create_option': 'fromImage', } } if session.config['use_managed_disks']: # Managed disk if session.config['ssd']: storage_profile['os_disk']['managed_disk'] = { 'storage_account_type': 'Premium_LRS', } else: # Regular (unmanaged) disk # Build unique URI for VHD vhd_uri = 'https://{}.blob.core.windows.net/vhds/{}.vhd'.format( session.config['storage_account'], vhd_name) storage_profile['os_disk']['vhd'] = { 'uri': vhd_uri, } if 'image_reference' in session.config: storage_profile['image_reference'] = \ session.config['image_reference'] else: # Look up id of image image_id = session.compute_client.images.get( session.config['resource_group'], session.config['image']).id storage_profile['image_reference'] = { 'id': image_id, } if 'storage_account_type' in session.config and \ session.config['storage_account_type']: storage_profile['os_disk']['managed_disk'] = { 'storage_account_type': session.config['storage_account_type'] } result = { 'location': session.config['location'], 'os_profile': { 'computer_name': vm_name, 'admin_username': session.config['default_login'], 'linux_configuration': { 'disable_password_authentication': True, 'ssh': { 'public_keys': [ { 'path': '/home/%s/.ssh/authorized_keys' % ( session.config['default_login']), 'key_data': ssh_public_key, } ], }, }, }, 'hardware_profile': { 'vm_size': session.config['size'] }, 'storage_profile': storage_profile, 'network_profile': { 'network_interfaces': [{ 'id': nic_id, 'primary': True, }] }, } if tags: result['tags'] = patch_managed_tags(tags) if custom_data is not None: result['os_profile']['custom_data'] = \ base64.b64encode(custom_data.encode()).decode() return result
def create_vm_parameters(self, session, nic_id, vm_name, custom_data=None, tags=None): \ # pylint: disable=no-self-use """Create the VM parameters structure. """ ssh_public_key = session.config['ssh_key_value'] vhd_name = vm_name storage_profile = { 'os_disk': { 'os_type': 'Linux', 'name': '%s-os-disk' % (vm_name), 'caching': 'ReadWrite', 'create_option': 'fromImage', } } if session.config['use_managed_disks']: # Managed disk if session.config['ssd']: storage_profile['os_disk']['managed_disk'] = { 'storage_account_type': 'Premium_LRS', } else: # Regular (unmanaged) disk # Build unique URI for VHD vhd_uri = 'https://{}.blob.core.windows.net/vhds/{}.vhd'.format( session.config['storage_account'], vhd_name) storage_profile['os_disk']['vhd'] = { 'uri': vhd_uri, } if 'image_reference' in session.config: storage_profile['image_reference'] = \ session.config['image_reference'] else: # Look up id of image image_id = session.compute_client.images.get( session.config['resource_group'], session.config['image']).id storage_profile['image_reference'] = { 'id': image_id, } if 'storage_account_type' in session.config and \ session.config['storage_account_type']: storage_profile['os_disk']['managed_disk'] = { 'storage_account_type': session.config['storage_account_type'] } result = { 'location': session.config['location'], 'os_profile': { 'computer_name': vm_name, 'admin_username': session.config['default_login'], 'linux_configuration': { 'disable_password_authentication': True, 'ssh': { 'public_keys': [ { 'path': '/home/%s/.ssh/authorized_keys' % ( session.config['default_login']), 'key_data': ssh_public_key, } ], }, }, }, 'hardware_profile': { 'vm_size': session.config['size'] }, 'storage_profile': storage_profile, 'network_profile': { 'network_interfaces': [{ 'id': nic_id, 'primary': True, }] }, } if tags: result['tags'] = patch_managed_tags(tags) if custom_data is not None: result['os_profile']['custom_data'] = \ base64.b64encode(custom_data.encode()).decode() return result
Python
def __azure_delete_network_interface(self, session, interface_id): """Delete network interface and all associated ip configurations Raises: msrestazure.azure_exceptions.CloudError """ try: network_interface_obj = \ session.network_client.network_interfaces.get( session.config['resource_group'], interface_id) except azure_exceptions.CloudError as exc: if exc.status_code == 404: # Quietly ignore "not found" error return # Re-raise all other exceptions raise self._logger.debug('Deleting network interface [%s]', interface_id) retries = 0 while retries < 5: total_wait_time = 0 try: delete_network_interface_request = \ session.network_client.network_interfaces.delete( session.config['resource_group'], interface_id) while total_wait_time < 300 and \ not delete_network_interface_request.done(): # delete_network_interface_request.wait() gevent.sleep(5) total_wait_time += 5 if total_wait_time < 300: # Break out of retry loop break except Exception: # TODO: ensure non-recoverable errors are handled self._logger.warning( 'Failure attempting to delete network interface' ' %s', interface_id ) retries += 1 # Wait 10s before reattempting failed delete network interface gevent.sleep(10) if retries == 5: self._logger.error( 'unable to delete network interface [%s]', interface_id ) return False # Iterate over ip configurations, deleting any public ip address # configurations for ip_configuration in network_interface_obj.ip_configurations: if not ip_configuration.public_ip_address: # Ignore any interfaces without public ip address continue self.__azure_delete_ip_configuration( session, os.path.basename(ip_configuration.public_ip_address.id)) return True
def __azure_delete_network_interface(self, session, interface_id): """Delete network interface and all associated ip configurations Raises: msrestazure.azure_exceptions.CloudError """ try: network_interface_obj = \ session.network_client.network_interfaces.get( session.config['resource_group'], interface_id) except azure_exceptions.CloudError as exc: if exc.status_code == 404: # Quietly ignore "not found" error return # Re-raise all other exceptions raise self._logger.debug('Deleting network interface [%s]', interface_id) retries = 0 while retries < 5: total_wait_time = 0 try: delete_network_interface_request = \ session.network_client.network_interfaces.delete( session.config['resource_group'], interface_id) while total_wait_time < 300 and \ not delete_network_interface_request.done(): # delete_network_interface_request.wait() gevent.sleep(5) total_wait_time += 5 if total_wait_time < 300: # Break out of retry loop break except Exception: # TODO: ensure non-recoverable errors are handled self._logger.warning( 'Failure attempting to delete network interface' ' %s', interface_id ) retries += 1 # Wait 10s before reattempting failed delete network interface gevent.sleep(10) if retries == 5: self._logger.error( 'unable to delete network interface [%s]', interface_id ) return False # Iterate over ip configurations, deleting any public ip address # configurations for ip_configuration in network_interface_obj.ip_configurations: if not ip_configuration.public_ip_address: # Ignore any interfaces without public ip address continue self.__azure_delete_ip_configuration( session, os.path.basename(ip_configuration.public_ip_address.id)) return True
Python
def deleteNode(self, nodes: List[Node]) -> None: """Delete Azure VMs associated with nodes""" reqs = [] # Iterate over nodes requested to be deleted getting vm_name # and Azure session for node, azure_session, vm_name in \ self.__iter_vm_name_and_session_tuples(nodes): # Initialize delete request delete_request = self.__init_delete_request( node, vm_name, azure_session) # Perform pre-delete operation self.__pre_delete_node(node, azure_session) # Spawn one greenlet per node being deleted reqs.append(gevent.spawn(self.__azure_delete_vm_req, delete_request)) # Complete delete request self.__common_delete_nodes(reqs)
def deleteNode(self, nodes: List[Node]) -> None: """Delete Azure VMs associated with nodes""" reqs = [] # Iterate over nodes requested to be deleted getting vm_name # and Azure session for node, azure_session, vm_name in \ self.__iter_vm_name_and_session_tuples(nodes): # Initialize delete request delete_request = self.__init_delete_request( node, vm_name, azure_session) # Perform pre-delete operation self.__pre_delete_node(node, azure_session) # Spawn one greenlet per node being deleted reqs.append(gevent.spawn(self.__azure_delete_vm_req, delete_request)) # Complete delete request self.__common_delete_nodes(reqs)
Python
def __wait_for_async_request(self, async_request, tag: str = None, max_sleep_time: int = 7000, sleep_interval: int = 2000, initial_sleep_time: int = 7000): """ Generic routine for waiting on an async Azure request :param max_sleep_time: maximum sleep time (in milliseconds) :param sleep_interval: time between polling intervals (in milliseconds) :param initial_sleep_time: initial sleep time (in milliseconds) :return: result from async request Raise: AzureOperationTimeout """ logmsg_prefix = '{0}: '.format(tag) if tag else '' total_sleep_time = 0 for retries in itertools.count(0): if async_request.done(): break if retries == 0: sleeptime = initial_sleep_time / 1000.0 else: temp = min(max_sleep_time, sleep_interval * 2 ** retries) sleeptime = (temp / 2 + random.randint(0, temp / 2)) / 1000.0 self._logger.debug( '%ssleeping %.2f seconds on async request', logmsg_prefix, sleeptime ) gevent.sleep(sleeptime) total_sleep_time += sleeptime if total_sleep_time > AZURE_ASYNC_OP_TIMEOUT: raise AzureOperationTimeout( 'Timeout exceeded waiting for async operation' ' completion') return async_request.result()
def __wait_for_async_request(self, async_request, tag: str = None, max_sleep_time: int = 7000, sleep_interval: int = 2000, initial_sleep_time: int = 7000): """ Generic routine for waiting on an async Azure request :param max_sleep_time: maximum sleep time (in milliseconds) :param sleep_interval: time between polling intervals (in milliseconds) :param initial_sleep_time: initial sleep time (in milliseconds) :return: result from async request Raise: AzureOperationTimeout """ logmsg_prefix = '{0}: '.format(tag) if tag else '' total_sleep_time = 0 for retries in itertools.count(0): if async_request.done(): break if retries == 0: sleeptime = initial_sleep_time / 1000.0 else: temp = min(max_sleep_time, sleep_interval * 2 ** retries) sleeptime = (temp / 2 + random.randint(0, temp / 2)) / 1000.0 self._logger.debug( '%ssleeping %.2f seconds on async request', logmsg_prefix, sleeptime ) gevent.sleep(sleeptime) total_sleep_time += sleeptime if total_sleep_time > AZURE_ASYNC_OP_TIMEOUT: raise AzureOperationTimeout( 'Timeout exceeded waiting for async operation' ' completion') return async_request.result()
Python
def __restart_vm_worker(self, q): """Coroutine for Azure async restart operation""" while True: try: azure_session, vm_name = q.get() self._logger.info('Rebooting VM [%s]', vm_name) response = \ azure_session.compute_client.virtual_machines.restart( azure_session.config['resource_group'], vm_name) while not response.done(): gevent.sleep(5) self._logger.debug( 'VM [%s] restart async operation complete', vm_name ) except azure_exceptions.CloudError as exc: if exc.status_code == 404: # Quietly ignore "not found" error continue self._logger.error('Error restarting VM [%s]', vm_name) finally: q.task_done() continue
def __restart_vm_worker(self, q): """Coroutine for Azure async restart operation""" while True: try: azure_session, vm_name = q.get() self._logger.info('Rebooting VM [%s]', vm_name) response = \ azure_session.compute_client.virtual_machines.restart( azure_session.config['resource_group'], vm_name) while not response.done(): gevent.sleep(5) self._logger.debug( 'VM [%s] restart async operation complete', vm_name ) except azure_exceptions.CloudError as exc: if exc.status_code == 404: # Quietly ignore "not found" error continue self._logger.error('Error restarting VM [%s]', vm_name) finally: q.task_done() continue
Python
def validate_start_arguments(self, addNodesRequest: Dict[str, Any], dbHardwareProfile: HardwareProfile, dbSoftwareProfile: SoftwareProfile) -> None: \ # pylint: disable=unused-argument """Raise an exception if the dns component is not enabled :raises ConfigurationError: """ installer = dbHardwareProfile.nics[0].node \ if dbHardwareProfile.nics else \ self._nodesDbHandler.get_installer_node(self.session) if not self._is_component_enabled(installer, 'dns'): msg = 'DNS component must be enabled for Azure-based compute nodes' self._logger.error(msg) raise ConfigurationError(msg)
def validate_start_arguments(self, addNodesRequest: Dict[str, Any], dbHardwareProfile: HardwareProfile, dbSoftwareProfile: SoftwareProfile) -> None: \ # pylint: disable=unused-argument """Raise an exception if the dns component is not enabled :raises ConfigurationError: """ installer = dbHardwareProfile.nics[0].node \ if dbHardwareProfile.nics else \ self._nodesDbHandler.get_installer_node(self.session) if not self._is_component_enabled(installer, 'dns'): msg = 'DNS component must be enabled for Azure-based compute nodes' self._logger.error(msg) raise ConfigurationError(msg)
Python
def _run_cmd(self, cmd: List[str]) -> str: """ Runs a command line program and returns the results. :param cmd List[str]: a list of command and arguments :return str: the result """ if self.verbose: print(' '.join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() err = stderr.decode().strip() if err: raise Exception(err) result = stdout.decode().strip() if self.verbose: print(result) return result
def _run_cmd(self, cmd: List[str]) -> str: """ Runs a command line program and returns the results. :param cmd List[str]: a list of command and arguments :return str: the result """ if self.verbose: print(' '.join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() err = stderr.decode().strip() if err: raise Exception(err) result = stdout.decode().strip() if self.verbose: print(result) return result
Python
def _find_cli(self) -> str: """ Looks for the Azure CLI. :return str: the path to the current CLI """ cli_path = self._run_cmd(['which', 'az']) if not cli_path: raise Exception('Azure CLI not found') return cli_path
def _find_cli(self) -> str: """ Looks for the Azure CLI. :return str: the path to the current CLI """ cli_path = self._run_cmd(['which', 'az']) if not cli_path: raise Exception('Azure CLI not found') return cli_path
Python
def _get_current_compute_node(self) -> dict: """ Gets the current compute node metadata. :return: the current compute node metadata if available, otherwise {} """ print('Getting current compute node metadata...') cmd = [ 'curl', '--silent', '--connect-timeout', '5', '--header', 'Metadata:true', "http://169.254.169.254/metadata/instance?api-version=2017-08-01" ] try: result = json.loads(self._run_cmd(cmd)) except Exception as ex: result = {} return result
def _get_current_compute_node(self) -> dict: """ Gets the current compute node metadata. :return: the current compute node metadata if available, otherwise {} """ print('Getting current compute node metadata...') cmd = [ 'curl', '--silent', '--connect-timeout', '5', '--header', 'Metadata:true', "http://169.254.169.254/metadata/instance?api-version=2017-08-01" ] try: result = json.loads(self._run_cmd(cmd)) except Exception as ex: result = {} return result
Python
def _get_account(self) -> dict: """ Gets the account info for the current user. :return dict: the account info """ print('Getting account information...') return self._run_az(['account', 'show'])
def _get_account(self) -> dict: """ Gets the account info for the current user. :return dict: the account info """ print('Getting account information...') return self._run_az(['account', 'show'])
Python
def _get_applications(self) -> List[dict]: """ Gets the list of applications from AD :return List[dict]: a list of application data """ print('Getting application list...') # # This filter is a bit of a hack. I tried to pick a filter that would # return all applications. Without the filter, the command will # print a warning message stating that the result set will be # limited. # return self._run_az(['ad', 'app', 'list', '--filter=signInAudience eq \'AzureADMyOrg\''])
def _get_applications(self) -> List[dict]: """ Gets the list of applications from AD :return List[dict]: a list of application data """ print('Getting application list...') # # This filter is a bit of a hack. I tried to pick a filter that would # return all applications. Without the filter, the command will # print a warning message stating that the result set will be # limited. # return self._run_az(['ad', 'app', 'list', '--filter=signInAudience eq \'AzureADMyOrg\''])
Python
def _create_application(self): """ Creates a new Active Directory application. :return dict: the created application """ key = datetime.datetime.now().strftime('%Y%m%d%H%M%S') if not self.interactive: name = 'tortuga-{}'.format(key) else: name = '' while not name: name = input(self.format('Application name: ')) name = name.strip() if not self.interactive: url = 'https://univa.com/tortuga/{}'.format(key) else: url = '' while not url_valid(url): url = input(self.format('Application URL (a unique URI): ')) password = secrets.token_urlsafe() print('Creating application...') try: application = self._run_az([ 'ad', 'app', 'create', '--display-name', name, '--native-app', 'false', '--identifier-uris', url, '--key-type', 'Password', '--password', password ]) # # Attach password to the application object so we can refer to # it later. # application['password'] = password self._az_applications.append(application) except APIError as e: print(self.format_error(str(e))) return self._create_application() # # Create the Service Principal # print('Creating service principal...') self._run_az([ 'ad', 'sp', 'create', '--id', application['appId'] ]) print(self.format('The following application API password was ' 'generated: {}', password)) return application
def _create_application(self): """ Creates a new Active Directory application. :return dict: the created application """ key = datetime.datetime.now().strftime('%Y%m%d%H%M%S') if not self.interactive: name = 'tortuga-{}'.format(key) else: name = '' while not name: name = input(self.format('Application name: ')) name = name.strip() if not self.interactive: url = 'https://univa.com/tortuga/{}'.format(key) else: url = '' while not url_valid(url): url = input(self.format('Application URL (a unique URI): ')) password = secrets.token_urlsafe() print('Creating application...') try: application = self._run_az([ 'ad', 'app', 'create', '--display-name', name, '--native-app', 'false', '--identifier-uris', url, '--key-type', 'Password', '--password', password ]) # # Attach password to the application object so we can refer to # it later. # application['password'] = password self._az_applications.append(application) except APIError as e: print(self.format_error(str(e))) return self._create_application() # # Create the Service Principal # print('Creating service principal...') self._run_az([ 'ad', 'sp', 'create', '--id', application['appId'] ]) print(self.format('The following application API password was ' 'generated: {}', password)) return application
Python
def _get_resource_groups(self): """ Gets the list of resource groups from AD :return List[dict]: a list of resource group data """ print('Getting resource groups...') return self._run_az(['group', 'list'])
def _get_resource_groups(self): """ Gets the list of resource groups from AD :return List[dict]: a list of resource group data """ print('Getting resource groups...') return self._run_az(['group', 'list'])
Python
def _create_resource_group(self): """ Creates a new resource group. :return dict: the created resource group """ name = '' while not name: name = input(self.format('Resource group name: ')) name = name.strip() location = '' while not location: location = input(self.format('Location: ')) location = location.strip().lower() print('Creating resource group...') try: resource_group = self._run_az([ 'group', 'create', '--name', name, '--location', location ]) self._az_resource_groups.append(resource_group) except APIError as e: print(self.format_error(str(e))) return self._create_resource_group() return resource_group
def _create_resource_group(self): """ Creates a new resource group. :return dict: the created resource group """ name = '' while not name: name = input(self.format('Resource group name: ')) name = name.strip() location = '' while not location: location = input(self.format('Location: ')) location = location.strip().lower() print('Creating resource group...') try: resource_group = self._run_az([ 'group', 'create', '--name', name, '--location', location ]) self._az_resource_groups.append(resource_group) except APIError as e: print(self.format_error(str(e))) return self._create_resource_group() return resource_group
Python
def _get_custom_roles(self): """ Gets the current list of custom roles for the subscription. :return List[dict]: a list of custom roles """ print('Getting custom roles...') return self._run_az([ 'role', 'definition', 'list', '--custom-role-only', 'true' ])
def _get_custom_roles(self): """ Gets the current list of custom roles for the subscription. :return List[dict]: a list of custom roles """ print('Getting custom roles...') return self._run_az([ 'role', 'definition', 'list', '--custom-role-only', 'true' ])
Python
def _check_custom_roles(self): """ Checks custom roles, and creates new ones as required. """ print('Checking custom roles...') found_rate_card = False for role in self._az_custom_roles: if role['roleName'] == self.DEFAULT_RATE_CARD_ROLE: found_rate_card = True if not found_rate_card: self._az_custom_roles.append(self._create_rate_card_role())
def _check_custom_roles(self): """ Checks custom roles, and creates new ones as required. """ print('Checking custom roles...') found_rate_card = False for role in self._az_custom_roles: if role['roleName'] == self.DEFAULT_RATE_CARD_ROLE: found_rate_card = True if not found_rate_card: self._az_custom_roles.append(self._create_rate_card_role())
Python
def _create_rate_card_role(self): """ Creates the Navops rate card role :return dict: the newly created role """ print('Creating {} rate card role...'.format( self.DEFAULT_RATE_CARD_ROLE)) role_definition = ( '{\n' ' "Name": "NavopsRateCardViewer",\n' ' "IsCustom": true,\n' ' "Description": "Navops rate card viewer",\n' ' "Actions": [\n' ' "Microsoft.Compute/virtualMachines/vmSizes/read",\n' ' "Microsoft.Resources/subscriptions/locations/read",\n' ' "Microsoft.Resources/providers/read",\n' ' "Microsoft.ContainerService/containerServices/read",\n' ' "Microsoft.Commerce/RateCard/read"\n' ' ],\n' ' "AssignableScopes": [\n' ' "/subscriptions/' + self._az_account["id"] + '"\n' ' ]\n' '}\n' ) if self.verbose: print(role_definition) with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix="rate_card.json") as fp: fp.write(role_definition) try: role = self._run_az([ "role", "definition", "create", "--role-definition", "@{}".format(fp.name) ]) finally: os.remove(fp.name) return role
def _create_rate_card_role(self): """ Creates the Navops rate card role :return dict: the newly created role """ print('Creating {} rate card role...'.format( self.DEFAULT_RATE_CARD_ROLE)) role_definition = ( '{\n' ' "Name": "NavopsRateCardViewer",\n' ' "IsCustom": true,\n' ' "Description": "Navops rate card viewer",\n' ' "Actions": [\n' ' "Microsoft.Compute/virtualMachines/vmSizes/read",\n' ' "Microsoft.Resources/subscriptions/locations/read",\n' ' "Microsoft.Resources/providers/read",\n' ' "Microsoft.ContainerService/containerServices/read",\n' ' "Microsoft.Commerce/RateCard/read"\n' ' ],\n' ' "AssignableScopes": [\n' ' "/subscriptions/' + self._az_account["id"] + '"\n' ' ]\n' '}\n' ) if self.verbose: print(role_definition) with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix="rate_card.json") as fp: fp.write(role_definition) try: role = self._run_az([ "role", "definition", "create", "--role-definition", "@{}".format(fp.name) ]) finally: os.remove(fp.name) return role
Python
def _get_sub_role_assignments(self): """ Gets the current list of role assignments for the selected application in the subscription. :return List[dict: a list of role assignments """ print('Getting subscription role assignments...') return self._run_az([ 'role', 'assignment', 'list', '--assignee', self._selected_application['appId'] ])
def _get_sub_role_assignments(self): """ Gets the current list of role assignments for the selected application in the subscription. :return List[dict: a list of role assignments """ print('Getting subscription role assignments...') return self._run_az([ 'role', 'assignment', 'list', '--assignee', self._selected_application['appId'] ])
Python
def _check_sub_role_assignments(self): """ Ensures that the application has the correct roles assigned in the subscription. """ has_rate_card_role = False for role in self._az_sub_role_assignments: if role['roleDefinitionName'] == self.DEFAULT_RATE_CARD_ROLE: has_rate_card_role = True if not self.interactive: # # If this is a fully automated session, then just go ahead # and perform the role assignment without asking # if not has_rate_card_role: self._assign_rate_card_role() return print(self.format_white('----------')) if not len(self._az_rg_role_assignments): print( self.format( 'The {} application needs to have the {} role assigned ' 'in the subscription.\n', self._selected_application['displayName'], self.DEFAULT_RATE_CARD_ROLE ) ) print( self.format_white( '[1] Assign the application the {} role in the ' 'resource group', self.DEFAULT_RATE_CARD_ROLE ) ) print( self.format_white( '[2] I will assign the application a role myself in the ' 'Azure portal\n') ) options = ['1', '2'] selected = '' while selected not in options: selected = input(self.format('Select an option: ')) selected = selected.strip().lower() if selected == '1': self._assign_rate_card_role() else: print( self.format( 'The {} application has the following roles assigned in ' 'in the subscription:\n', self._selected_application['displayName'], self._selected_resource_group['name'] ) ) for assignment in self._az_sub_role_assignments: print( self.format_white( ' - {}', assignment['roleDefinitionName'] ) ) print( self.format( '\nThese role(s) may or may-not have sufficient' 'privileges to access the required Azure APIs. ' 'If you run into permissions problems, you may need to ' 'assign additional roles to the application in the Azure ' 'console.' ) ) input(self.format('\nPress return to continue...'))
def _check_sub_role_assignments(self): """ Ensures that the application has the correct roles assigned in the subscription. """ has_rate_card_role = False for role in self._az_sub_role_assignments: if role['roleDefinitionName'] == self.DEFAULT_RATE_CARD_ROLE: has_rate_card_role = True if not self.interactive: # # If this is a fully automated session, then just go ahead # and perform the role assignment without asking # if not has_rate_card_role: self._assign_rate_card_role() return print(self.format_white('----------')) if not len(self._az_rg_role_assignments): print( self.format( 'The {} application needs to have the {} role assigned ' 'in the subscription.\n', self._selected_application['displayName'], self.DEFAULT_RATE_CARD_ROLE ) ) print( self.format_white( '[1] Assign the application the {} role in the ' 'resource group', self.DEFAULT_RATE_CARD_ROLE ) ) print( self.format_white( '[2] I will assign the application a role myself in the ' 'Azure portal\n') ) options = ['1', '2'] selected = '' while selected not in options: selected = input(self.format('Select an option: ')) selected = selected.strip().lower() if selected == '1': self._assign_rate_card_role() else: print( self.format( 'The {} application has the following roles assigned in ' 'in the subscription:\n', self._selected_application['displayName'], self._selected_resource_group['name'] ) ) for assignment in self._az_sub_role_assignments: print( self.format_white( ' - {}', assignment['roleDefinitionName'] ) ) print( self.format( '\nThese role(s) may or may-not have sufficient' 'privileges to access the required Azure APIs. ' 'If you run into permissions problems, you may need to ' 'assign additional roles to the application in the Azure ' 'console.' ) ) input(self.format('\nPress return to continue...'))
Python
def _assign_rate_card_role(self) -> dict: """ Assigns the selected application the rate card role in the selected application. :return dict: the role assignment """ print('Assigning {} role...'.format(self.DEFAULT_RATE_CARD_ROLE)) count = 5 # # This operation can fail if the service principal is not finished # being created on the application # while True: try: return self._run_az([ 'role', 'assignment', 'create', '--assignee', self._selected_application['appId'], '--role', self.DEFAULT_RATE_CARD_ROLE, '--scope', '/subscriptions/{}'.format( self._az_account['id']) ]) except Exception as e: if count: print(self.format_error( 'Role assignment failed, trying again...')) time.sleep(5) count -= 1 else: raise e
def _assign_rate_card_role(self) -> dict: """ Assigns the selected application the rate card role in the selected application. :return dict: the role assignment """ print('Assigning {} role...'.format(self.DEFAULT_RATE_CARD_ROLE)) count = 5 # # This operation can fail if the service principal is not finished # being created on the application # while True: try: return self._run_az([ 'role', 'assignment', 'create', '--assignee', self._selected_application['appId'], '--role', self.DEFAULT_RATE_CARD_ROLE, '--scope', '/subscriptions/{}'.format( self._az_account['id']) ]) except Exception as e: if count: print(self.format_error( 'Role assignment failed, trying again...')) time.sleep(5) count -= 1 else: raise e
Python
def _get_rg_role_assignments(self): """ Gets the current list of role assignments for the selected application in the selected resource group. :return List[dict: a list of role assignments """ print('Getting resource group role assignments...') return self._run_az([ 'role', 'assignment', 'list', '--assignee', self._selected_application['appId'], '--resource-group', self._selected_resource_group['name'] ])
def _get_rg_role_assignments(self): """ Gets the current list of role assignments for the selected application in the selected resource group. :return List[dict: a list of role assignments """ print('Getting resource group role assignments...') return self._run_az([ 'role', 'assignment', 'list', '--assignee', self._selected_application['appId'], '--resource-group', self._selected_resource_group['name'] ])
Python
def _check_rg_role_assignments(self): """ Ensures that the application has the correct roles assigned in the resource group. """ if not self.interactive: # # If this is a fully automated session, then just go ahead # and perform the role assignment without asking # if not len(self._az_rg_role_assignments): self._assign_owner_role() return print(self.format_white('----------')) if not len(self._az_rg_role_assignments): print( self.format( 'The {} application has no roles assigned in ' 'in the {} resource group.\n', self._selected_application['displayName'], self._selected_resource_group['name'] ) ) print( self.format_white( '[1] Assign the application the Owner role in the ' 'resource group' ) ) print( self.format_white( '[2] I will assign the application a role myself in the ' 'Azure portal\n') ) options = ['1', '2'] selected = '' while selected not in options: selected = input(self.format('Select an option: ')) selected = selected.strip().lower() if selected == '1': self._assign_owner_role() else: print( self.format( 'The {} application has the following roles assigned in ' 'in the {} resource group:\n', self._selected_application['displayName'], self._selected_resource_group['name'] ) ) for assignment in self._az_rg_role_assignments: print( self.format_white( ' - {}', assignment['roleDefinitionName'] ) ) print( self.format( '\nThese role(s) may or may-not have sufficient' 'privileges to create resources in the resource group. ' 'If you run into permissions problems, you may need to ' 'assign additional roles to the application in the Azure ' 'console.' ) ) input(self.format('\nPress return to continue...'))
def _check_rg_role_assignments(self): """ Ensures that the application has the correct roles assigned in the resource group. """ if not self.interactive: # # If this is a fully automated session, then just go ahead # and perform the role assignment without asking # if not len(self._az_rg_role_assignments): self._assign_owner_role() return print(self.format_white('----------')) if not len(self._az_rg_role_assignments): print( self.format( 'The {} application has no roles assigned in ' 'in the {} resource group.\n', self._selected_application['displayName'], self._selected_resource_group['name'] ) ) print( self.format_white( '[1] Assign the application the Owner role in the ' 'resource group' ) ) print( self.format_white( '[2] I will assign the application a role myself in the ' 'Azure portal\n') ) options = ['1', '2'] selected = '' while selected not in options: selected = input(self.format('Select an option: ')) selected = selected.strip().lower() if selected == '1': self._assign_owner_role() else: print( self.format( 'The {} application has the following roles assigned in ' 'in the {} resource group:\n', self._selected_application['displayName'], self._selected_resource_group['name'] ) ) for assignment in self._az_rg_role_assignments: print( self.format_white( ' - {}', assignment['roleDefinitionName'] ) ) print( self.format( '\nThese role(s) may or may-not have sufficient' 'privileges to create resources in the resource group. ' 'If you run into permissions problems, you may need to ' 'assign additional roles to the application in the Azure ' 'console.' ) ) input(self.format('\nPress return to continue...'))
Python
def _assign_owner_role(self) -> dict: """ Assigns the selected application the Owner role in the selected resource group. :return dict: the role assignment """ print('Assigning Owner role...') count = 5 # # This operation can fail if the service principal is not finished # being created on the application # while True: try: return self._run_az([ 'role', 'assignment', 'create', '--assignee', self._selected_application['appId'], '--role', 'Owner', '--resource-group', self._selected_resource_group['name'] ]) except Exception as e: if count: print(self.format_error( 'Role assignment failed, trying again...')) time.sleep(5) count -= 1 else: raise e
def _assign_owner_role(self) -> dict: """ Assigns the selected application the Owner role in the selected resource group. :return dict: the role assignment """ print('Assigning Owner role...') count = 5 # # This operation can fail if the service principal is not finished # being created on the application # while True: try: return self._run_az([ 'role', 'assignment', 'create', '--assignee', self._selected_application['appId'], '--role', 'Owner', '--resource-group', self._selected_resource_group['name'] ]) except Exception as e: if count: print(self.format_error( 'Role assignment failed, trying again...')) time.sleep(5) count -= 1 else: raise e
Python
def _get_virtual_networks(self) -> List[dict]: """ Gets the list of virtual networks for the selected resource group. :return List[dict]: a list of virtual networks """ print('Getting virtual networks...') return self._run_az([ 'network', 'vnet', 'list', '--resource-group', self._selected_resource_group['name'] ])
def _get_virtual_networks(self) -> List[dict]: """ Gets the list of virtual networks for the selected resource group. :return List[dict]: a list of virtual networks """ print('Getting virtual networks...') return self._run_az([ 'network', 'vnet', 'list', '--resource-group', self._selected_resource_group['name'] ])
Python
def _create_virtual_network(self) -> dict: """ Creates a new virtual network. :return dict: the created virtual network """ name = '' while not name: name = input(self.format('Virtual network name: ')) name = name.strip() print('Creating virtual network...') try: virtual_network = self._run_az([ 'network', 'vnet', 'create', '--name', name, '--location', self._selected_resource_group['location'], '--resource-group', self._selected_resource_group['name'] ]) self._az_virtual_networks.append(virtual_network) except APIError as e: print(self.format_error(str(e))) return self._create_virtual_network() return virtual_network
def _create_virtual_network(self) -> dict: """ Creates a new virtual network. :return dict: the created virtual network """ name = '' while not name: name = input(self.format('Virtual network name: ')) name = name.strip() print('Creating virtual network...') try: virtual_network = self._run_az([ 'network', 'vnet', 'create', '--name', name, '--location', self._selected_resource_group['location'], '--resource-group', self._selected_resource_group['name'] ]) self._az_virtual_networks.append(virtual_network) except APIError as e: print(self.format_error(str(e))) return self._create_virtual_network() return virtual_network
Python
def _get_network_security_groups(self) -> List[dict]: """ Gets a list of network security groups for the selected resource group. :return List[dict]: a list of network security groups """ print('Getting network security groups...') return self._run_az([ 'network', 'nsg', 'list', '--resource-group', self._selected_resource_group['name'] ])
def _get_network_security_groups(self) -> List[dict]: """ Gets a list of network security groups for the selected resource group. :return List[dict]: a list of network security groups """ print('Getting network security groups...') return self._run_az([ 'network', 'nsg', 'list', '--resource-group', self._selected_resource_group['name'] ])
Python
def _create_network_security_group(self) -> dict: """ Creates a new network security group. :return dict: the created network security group """ name = '' while not name: name = input(self.format('Network security group name: ')) name = name.strip() # # Create the security group # print('Creating network security group...') try: network_security_group = self._run_az([ 'network', 'nsg', 'create', '--name', name, '--location', self._selected_resource_group['location'], '--resource-group', self._selected_resource_group['name'] ]) self._az_network_security_groups.append(network_security_group) except APIError as e: print(self.format_error(str(e))) return self._create_network_security_group() # # Allow SSH on security group # print('Enabling inbound SSH (port 22) on network security group...') network_security_group = self._run_az([ 'network', 'nsg', 'rule', 'create', '--nsg-name', name, '--resource-group', self._selected_resource_group['name'], '--name', 'ssh', '--priority', '100', '--destination-address-prefix', '*', '--destination-port-range', '22', '--access', 'Allow', '--protocol', 'Tcp', '--description', 'Allow incoming ssh' ]) return network_security_group
def _create_network_security_group(self) -> dict: """ Creates a new network security group. :return dict: the created network security group """ name = '' while not name: name = input(self.format('Network security group name: ')) name = name.strip() # # Create the security group # print('Creating network security group...') try: network_security_group = self._run_az([ 'network', 'nsg', 'create', '--name', name, '--location', self._selected_resource_group['location'], '--resource-group', self._selected_resource_group['name'] ]) self._az_network_security_groups.append(network_security_group) except APIError as e: print(self.format_error(str(e))) return self._create_network_security_group() # # Allow SSH on security group # print('Enabling inbound SSH (port 22) on network security group...') network_security_group = self._run_az([ 'network', 'nsg', 'rule', 'create', '--nsg-name', name, '--resource-group', self._selected_resource_group['name'], '--name', 'ssh', '--priority', '100', '--destination-address-prefix', '*', '--destination-port-range', '22', '--access', 'Allow', '--protocol', 'Tcp', '--description', 'Allow incoming ssh' ]) return network_security_group
Python
def _get_subnets(self) -> List[dict]: """ Gets a list of subnets in selected resource group. :return List[dict]: a list of subnets """ print('Getting subnets...') return self._run_az([ 'network', 'vnet', 'subnet', 'list', '--resource-group', self._selected_resource_group['name'], '--vnet-name', self._selected_virtual_network['name'] ])
def _get_subnets(self) -> List[dict]: """ Gets a list of subnets in selected resource group. :return List[dict]: a list of subnets """ print('Getting subnets...') return self._run_az([ 'network', 'vnet', 'subnet', 'list', '--resource-group', self._selected_resource_group['name'], '--vnet-name', self._selected_virtual_network['name'] ])
Python
def _get_storage_accounts(self) -> List[dict]: """ Gets a list of storage accounts in resource group. :return List[dict]: a list of subnets """ print('Getting storage accounts...') return self._run_az([ 'storage', 'account', 'list', '--resource-group', self._selected_resource_group['name'] ])
def _get_storage_accounts(self) -> List[dict]: """ Gets a list of storage accounts in resource group. :return List[dict]: a list of subnets """ print('Getting storage accounts...') return self._run_az([ 'storage', 'account', 'list', '--resource-group', self._selected_resource_group['name'] ])
Python
def _create_storage_account(self) -> dict: """ Creates a new storage account. :return dict: the created storage account """ if not self.interactive: name = 'tortuga{}'.format( datetime.datetime.now().strftime('%Y%m%d%H%M%S'), ) else: name = '' while not storage_name_valid(name): name = input( self.format( 'Storage account name (3-24 characters, ' 'lower-case letters and numbers only): ' ) ) name = name.strip() print('Creating storage account...') try: storage_account = self._run_az([ 'storage', 'account', 'create', '--name', name, '--location', self._selected_resource_group['location'], '--resource-group', self._selected_resource_group['name'], '--sku', 'Premium_LRS', '--kind', 'Storage' ]) self._az_storage_accounts.append(storage_account) except APIError as e: print(self.format_error(str(e))) return self._create_storage_account() return storage_account
def _create_storage_account(self) -> dict: """ Creates a new storage account. :return dict: the created storage account """ if not self.interactive: name = 'tortuga{}'.format( datetime.datetime.now().strftime('%Y%m%d%H%M%S'), ) else: name = '' while not storage_name_valid(name): name = input( self.format( 'Storage account name (3-24 characters, ' 'lower-case letters and numbers only): ' ) ) name = name.strip() print('Creating storage account...') try: storage_account = self._run_az([ 'storage', 'account', 'create', '--name', name, '--location', self._selected_resource_group['location'], '--resource-group', self._selected_resource_group['name'], '--sku', 'Premium_LRS', '--kind', 'Storage' ]) self._az_storage_accounts.append(storage_account) except APIError as e: print(self.format_error(str(e))) return self._create_storage_account() return storage_account
Python
def _get_vm_sizes(self) -> List[dict]: """ Gets a list of vm sizes. :return List[dict]: a list of vm sizes """ print('Getting virtual machine sizes...') return self._run_az([ 'vm', 'list-sizes', '--location', self._selected_resource_group['location'] ])
def _get_vm_sizes(self) -> List[dict]: """ Gets a list of vm sizes. :return List[dict]: a list of vm sizes """ print('Getting virtual machine sizes...') return self._run_az([ 'vm', 'list-sizes', '--location', self._selected_resource_group['location'] ])
Python
def _select_image(self) -> dict: """ Selects the image to use as the basis for compute nodes. :return: the image data """ # # If not interactive, then use the same URN as the installer # node # if not self.interactive and self._az_compute_node: if self.same_image and \ self._az_compute_node['compute']['publisher'] and \ self._az_compute_node['compute']['offer'] and \ self._az_compute_node['compute']['sku'] and \ self._az_compute_node['compute']['version']: urn = '{}:{}:{}:{}'.format( self._az_compute_node['compute']['publisher'], self._az_compute_node['compute']['offer'], self._az_compute_node['compute']['sku'], self._az_compute_node['compute']['version'] ) else: urn = self.DEFAULT_URN image: dict = self._get_image(urn) if not image: print( self.format_error('The default URN is not valid: {}', urn) ) urn = '' else: print('----------') urn = '' image: dict = None while not urn: urn = input(self.format('Enter the VM image URN: ')).strip() # # Attempt to get the image # try: image = self._get_image(urn) except Exception: pass # # If there is no image, then the URN is invalid # if not image: print(self.format_error('The URN is not valid: {}', urn)) urn = '' # # Store the URN on the image data for future reference # image['urn'] = urn return image
def _select_image(self) -> dict: """ Selects the image to use as the basis for compute nodes. :return: the image data """ # # If not interactive, then use the same URN as the installer # node # if not self.interactive and self._az_compute_node: if self.same_image and \ self._az_compute_node['compute']['publisher'] and \ self._az_compute_node['compute']['offer'] and \ self._az_compute_node['compute']['sku'] and \ self._az_compute_node['compute']['version']: urn = '{}:{}:{}:{}'.format( self._az_compute_node['compute']['publisher'], self._az_compute_node['compute']['offer'], self._az_compute_node['compute']['sku'], self._az_compute_node['compute']['version'] ) else: urn = self.DEFAULT_URN image: dict = self._get_image(urn) if not image: print( self.format_error('The default URN is not valid: {}', urn) ) urn = '' else: print('----------') urn = '' image: dict = None while not urn: urn = input(self.format('Enter the VM image URN: ')).strip() # # Attempt to get the image # try: image = self._get_image(urn) except Exception: pass # # If there is no image, then the URN is invalid # if not image: print(self.format_error('The URN is not valid: {}', urn)) urn = '' # # Store the URN on the image data for future reference # image['urn'] = urn return image
Python
def _select_object(self, name: str, name_attr: str, create: bool = True, select_first: bool = False) -> dict: """ Selects and returns an object instance. :name str: the name of the object to select :name_attr str: the attribute that has the display name :create bool: whether or not creating a new item should be available as an option :select_first bool: whether or not to automatically select the first item in the list (i.e. auto) :return dict: the selected object data """ obj_list_name: str = '_az_{}s'.format(name.replace(' ', '_')) obj_list = getattr(self, obj_list_name) # # If we are supposed to pick the first item in the list, and there # is at least one item, then return it, otherwise we have to ask # the user to go ahead an create one # if select_first: if obj_list: obj = obj_list[0] print(self.format('Selected {}: {{}}'.format(name), obj[name_attr])) return obj_list[0] # # If not interactive, and nothing has been found, then automatically # create one # if not self.interactive: create_method = getattr( self, '_create_{}'.format(name.replace(' ', '_')) ) return create_method() options: List[str] = [] if create: options.append('c') print(self.format_white('----------')) print(self.format('The following is a list of {}:\n', name + 's')) for i in range(len(obj_list)): obj = obj_list[i] print(self.format_white('[{}] {}', i, obj[name_attr])) options.append(str(i)) if len(obj_list): print('') if create: print(self.format_white('[c] Create a new {}\n', name)) selected = '' while selected not in options: selected = input(self.format('Select {}: ', name)) selected = str(selected).strip().lower() if create and selected == 'c': create_method = getattr( self, '_create_{}'.format(name.replace(' ', '_')) ) obj = create_method() else: obj = obj_list[int(selected)] return obj
def _select_object(self, name: str, name_attr: str, create: bool = True, select_first: bool = False) -> dict: """ Selects and returns an object instance. :name str: the name of the object to select :name_attr str: the attribute that has the display name :create bool: whether or not creating a new item should be available as an option :select_first bool: whether or not to automatically select the first item in the list (i.e. auto) :return dict: the selected object data """ obj_list_name: str = '_az_{}s'.format(name.replace(' ', '_')) obj_list = getattr(self, obj_list_name) # # If we are supposed to pick the first item in the list, and there # is at least one item, then return it, otherwise we have to ask # the user to go ahead an create one # if select_first: if obj_list: obj = obj_list[0] print(self.format('Selected {}: {{}}'.format(name), obj[name_attr])) return obj_list[0] # # If not interactive, and nothing has been found, then automatically # create one # if not self.interactive: create_method = getattr( self, '_create_{}'.format(name.replace(' ', '_')) ) return create_method() options: List[str] = [] if create: options.append('c') print(self.format_white('----------')) print(self.format('The following is a list of {}:\n', name + 's')) for i in range(len(obj_list)): obj = obj_list[i] print(self.format_white('[{}] {}', i, obj[name_attr])) options.append(str(i)) if len(obj_list): print('') if create: print(self.format_white('[c] Create a new {}\n', name)) selected = '' while selected not in options: selected = input(self.format('Select {}: ', name)) selected = str(selected).strip().lower() if create and selected == 'c': create_method = getattr( self, '_create_{}'.format(name.replace(' ', '_')) ) obj = create_method() else: obj = obj_list[int(selected)] return obj
Python
def url_valid(url: str) -> bool: """ Determines whether or not a URL is in a valid format. :param str url: the URL to validate :return bool: True if valid, False otherwise """ result = urlparse(url) if all([result.scheme, result.netloc]): return True return False
def url_valid(url: str) -> bool: """ Determines whether or not a URL is in a valid format. :param str url: the URL to validate :return bool: True if valid, False otherwise """ result = urlparse(url) if all([result.scheme, result.netloc]): return True return False
Python
def subnet_prefix_valid(subnet_prefix: str) -> bool: """ Determines whether or not a subnet prefix is in the following format: x.x.x.x/xx :param str subnet_prefix: the subnet prefix to validate :return: True if valid, falise otherwise """ try: ipaddress.ip_network(subnet_prefix) return True except ValueError: pass return False
def subnet_prefix_valid(subnet_prefix: str) -> bool: """ Determines whether or not a subnet prefix is in the following format: x.x.x.x/xx :param str subnet_prefix: the subnet prefix to validate :return: True if valid, falise otherwise """ try: ipaddress.ip_network(subnet_prefix) return True except ValueError: pass return False
Python
def storage_name_valid(name: str) -> bool: """ Determines whether or not a storage account name is valid. Valid names must be between 3 and 24 characters in length, and use numbers and lower-case letters only. :param str name: the storage account name :return bool: True if valid, False otherwise """ if re.match('[a-z0-9]{3,24}$', name): return True return False
def storage_name_valid(name: str) -> bool: """ Determines whether or not a storage account name is valid. Valid names must be between 3 and 24 characters in length, and use numbers and lower-case letters only. :param str name: the storage account name :return bool: True if valid, False otherwise """ if re.match('[a-z0-9]{3,24}$', name): return True return False
Python
def step(self, closure=None): """Overwrite step of Pytorch SGD to change velocity and param update formula Args: closure (callable, optional): Evaluates model and returns Loss. Defaults to None. Returns: loss: loss from closure. Defaults to None """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] d_p_list = [] momentum_buffer_list = [] weight_decay = group["weight_decay"] momentum = group["momentum"] dampening = group["dampening"] nesterov = group["nesterov"] lr = group["lr"] try: maximize = group["maximize"] except: maximize = False # set maximize to false if not found for p in group["params"]: if p.grad is not None: params_with_grad.append(p) d_p_list.append(p.grad) state = self.state[p] if "momentum_buffer" not in state: momentum_buffer_list.append(None) else: momentum_buffer_list.append(state["momentum_buffer"]) sgd( params_with_grad, d_p_list, momentum_buffer_list, weight_decay=weight_decay, momentum=momentum, lr=lr, dampening=dampening, nesterov=nesterov, maximize=maximize, ) # update momentum_buffers in state for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list): state = self.state[p] state["momentum_buffer"] = momentum_buffer return loss
def step(self, closure=None): """Overwrite step of Pytorch SGD to change velocity and param update formula Args: closure (callable, optional): Evaluates model and returns Loss. Defaults to None. Returns: loss: loss from closure. Defaults to None """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] d_p_list = [] momentum_buffer_list = [] weight_decay = group["weight_decay"] momentum = group["momentum"] dampening = group["dampening"] nesterov = group["nesterov"] lr = group["lr"] try: maximize = group["maximize"] except: maximize = False # set maximize to false if not found for p in group["params"]: if p.grad is not None: params_with_grad.append(p) d_p_list.append(p.grad) state = self.state[p] if "momentum_buffer" not in state: momentum_buffer_list.append(None) else: momentum_buffer_list.append(state["momentum_buffer"]) sgd( params_with_grad, d_p_list, momentum_buffer_list, weight_decay=weight_decay, momentum=momentum, lr=lr, dampening=dampening, nesterov=nesterov, maximize=maximize, ) # update momentum_buffers in state for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list): state = self.state[p] state["momentum_buffer"] = momentum_buffer return loss
Python
def after_iter(self): """ `after_iter` contains two parts of logic: * log information * reset setting of resize """ # log needed information if (self.iter + 1) % self.exp.print_interval == 0: # TODO check ETA logic left_iters = self.max_iter * self.max_epoch - (self.progress_in_iter + 1) eta_seconds = self.meter["iter_time"].global_avg * left_iters eta_str = "ETA: {}".format(datetime.timedelta(seconds=int(eta_seconds))) progress_str = "epoch: {}/{}, iter: {}/{}".format( self.epoch + 1, self.max_epoch, self.iter + 1, self.max_iter ) loss_meter = self.meter.get_filtered_meter("loss") loss_str = ", ".join( ["{}: {:.1f}".format(k, v.latest) for k, v in loss_meter.items()] ) # record loss metrics self.loss_metrics.append( [(key, value.latest) for key, value in loss_meter.items()] ) time_meter = self.meter.get_filtered_meter("time") time_str = ", ".join( ["{}: {:.3f}s".format(k, v.avg) for k, v in time_meter.items()] ) logger.info( "{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}".format( progress_str, gpu_mem_usage(), time_str, loss_str, self.meter["lr"].latest, ) + (", size: {:d}, {}".format(self.input_size[0], eta_str)) ) if self.rank == 0: # log learning rate self.tblogger.add_scalar( "opt/lr", self.meter["lr"].latest, self.epoch + 1 ) self.wandb_logger.log({"lr": self.meter["lr"].latest}) self.meter.clear_meters() # random resizing if (self.progress_in_iter + 1) % 10 == 0: self.input_size = self.exp.random_resize( self.train_loader, self.epoch, self.rank, self.is_distributed )
def after_iter(self): """ `after_iter` contains two parts of logic: * log information * reset setting of resize """ # log needed information if (self.iter + 1) % self.exp.print_interval == 0: # TODO check ETA logic left_iters = self.max_iter * self.max_epoch - (self.progress_in_iter + 1) eta_seconds = self.meter["iter_time"].global_avg * left_iters eta_str = "ETA: {}".format(datetime.timedelta(seconds=int(eta_seconds))) progress_str = "epoch: {}/{}, iter: {}/{}".format( self.epoch + 1, self.max_epoch, self.iter + 1, self.max_iter ) loss_meter = self.meter.get_filtered_meter("loss") loss_str = ", ".join( ["{}: {:.1f}".format(k, v.latest) for k, v in loss_meter.items()] ) # record loss metrics self.loss_metrics.append( [(key, value.latest) for key, value in loss_meter.items()] ) time_meter = self.meter.get_filtered_meter("time") time_str = ", ".join( ["{}: {:.3f}s".format(k, v.avg) for k, v in time_meter.items()] ) logger.info( "{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}".format( progress_str, gpu_mem_usage(), time_str, loss_str, self.meter["lr"].latest, ) + (", size: {:d}, {}".format(self.input_size[0], eta_str)) ) if self.rank == 0: # log learning rate self.tblogger.add_scalar( "opt/lr", self.meter["lr"].latest, self.epoch + 1 ) self.wandb_logger.log({"lr": self.meter["lr"].latest}) self.meter.clear_meters() # random resizing if (self.progress_in_iter + 1) % 10 == 0: self.input_size = self.exp.random_resize( self.train_loader, self.epoch, self.rank, self.is_distributed )
Python
def parse_rec(filename): """ Parse a PASCAL VOC xml file """ tree = ET.parse(filename) objects = [] for obj in tree.findall("object"): obj_struct = {} obj_struct["name"] = obj.find("name").text # not parsing pose since redundant. try: obj_struct["truncated"] = int(obj.find("truncated").text) except: obj_struct["truncated"] = 0 # default try: obj_struct["difficult"] = int(obj.find("difficult").text) except: obj_struct["difficult"] = 0 # default bbox = obj.find("bndbox") obj_struct["bbox"] = [ int(bbox.find("xmin").text), int(bbox.find("ymin").text), int(bbox.find("xmax").text), int(bbox.find("ymax").text), ] objects.append(obj_struct) return objects
def parse_rec(filename): """ Parse a PASCAL VOC xml file """ tree = ET.parse(filename) objects = [] for obj in tree.findall("object"): obj_struct = {} obj_struct["name"] = obj.find("name").text # not parsing pose since redundant. try: obj_struct["truncated"] = int(obj.find("truncated").text) except: obj_struct["truncated"] = 0 # default try: obj_struct["difficult"] = int(obj.find("difficult").text) except: obj_struct["difficult"] = 0 # default bbox = obj.find("bndbox") obj_struct["bbox"] = [ int(bbox.find("xmin").text), int(bbox.find("ymin").text), int(bbox.find("xmax").text), int(bbox.find("ymax").text), ] objects.append(obj_struct) return objects
Python
def det_image_visualization(detpath, imagepath, classname): """ Logging helper function to visualize detections made during evaluation. Returns: figure: matplotlib figure containing grid of annotated images. """ # read dets detfile = detpath.format(classname) with open(detfile, "r") as f: lines = f.readlines() if len(lines) < 8: # not enough detections to create a meaningful plot. Return a blank figure instead of None figure = plt.figure(figsize=(20, 10)) return figure splitlines = [x.strip().split(" ") for x in lines] splitlines = [ [" ".join(x[:-5])] + x[-5:] for x in splitlines ] # -5 index protects against unexpected spaces in image_id. image_ids = [x[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]) BB = BB.astype(np.int32) # sort by confidence sorted_ind = np.argsort(-confidence) confidence = confidence[sorted_ind] BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] # collect all instances of boxes with matching img ids: image_id_to_indexes = {} for index in range(len(image_ids)): if image_ids[index] not in image_id_to_indexes: image_id_to_indexes[image_ids[index]] = [] image_id_to_indexes[image_ids[index]].append(index) def draw_boxes_on_image(index, color): img_id = image_ids[index] img = cv2.imread(imagepath.format(img_id), cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for det_index in image_id_to_indexes[img_id]: box = BB[det_index] x_min, y_min, x_max, y_max = box img = cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color, 2) img = cv2.putText( img, str(confidence[det_index]), ((x_min + x_max) // 2, (y_min + y_max) // 2), fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=1, color=color, thickness=2, ) return img # plot some random high confidence images: images = [] for index in random.sample(range(0, min(10, len(image_ids))), 4): images.append(draw_boxes_on_image(index, (0, 255, 0))) # Also add some other random images: for index in random.sample(range(30, len(image_ids)), 4): images.append(draw_boxes_on_image(index, (0, 255, 0))) # aggregate into single image grid: figure = plt.figure(figsize=(20, 10)) grid = ImageGrid(figure, rect=111, nrows_ncols=(2, 4), axes_pad=0.05) for ax, img in zip(grid, images): ax.imshow(img) return figure
def det_image_visualization(detpath, imagepath, classname): """ Logging helper function to visualize detections made during evaluation. Returns: figure: matplotlib figure containing grid of annotated images. """ # read dets detfile = detpath.format(classname) with open(detfile, "r") as f: lines = f.readlines() if len(lines) < 8: # not enough detections to create a meaningful plot. Return a blank figure instead of None figure = plt.figure(figsize=(20, 10)) return figure splitlines = [x.strip().split(" ") for x in lines] splitlines = [ [" ".join(x[:-5])] + x[-5:] for x in splitlines ] # -5 index protects against unexpected spaces in image_id. image_ids = [x[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]) BB = BB.astype(np.int32) # sort by confidence sorted_ind = np.argsort(-confidence) confidence = confidence[sorted_ind] BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] # collect all instances of boxes with matching img ids: image_id_to_indexes = {} for index in range(len(image_ids)): if image_ids[index] not in image_id_to_indexes: image_id_to_indexes[image_ids[index]] = [] image_id_to_indexes[image_ids[index]].append(index) def draw_boxes_on_image(index, color): img_id = image_ids[index] img = cv2.imread(imagepath.format(img_id), cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for det_index in image_id_to_indexes[img_id]: box = BB[det_index] x_min, y_min, x_max, y_max = box img = cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color, 2) img = cv2.putText( img, str(confidence[det_index]), ((x_min + x_max) // 2, (y_min + y_max) // 2), fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=1, color=color, thickness=2, ) return img # plot some random high confidence images: images = [] for index in random.sample(range(0, min(10, len(image_ids))), 4): images.append(draw_boxes_on_image(index, (0, 255, 0))) # Also add some other random images: for index in random.sample(range(30, len(image_ids)), 4): images.append(draw_boxes_on_image(index, (0, 255, 0))) # aggregate into single image grid: figure = plt.figure(figsize=(20, 10)) grid = ImageGrid(figure, rect=111, nrows_ncols=(2, 4), axes_pad=0.05) for ax, img in zip(grid, images): ax.imshow(img) return figure
Python
def run(self, fps, fps_list): ''' Log current, min and max of fps value ''' print("fps = {}".format(fps)) self.fps_list = fps_list
def run(self, fps, fps_list): ''' Log current, min and max of fps value ''' print("fps = {}".format(fps)) self.fps_list = fps_list
Python
def show_histogram(self, tub_paths, record_name, out): ''' Produce a histogram of record type frequency in the given tub ''' from matplotlib import pyplot as plt from donkeycar.parts.datastore import TubGroup from donkeycar.parts.tub_v2 import Tub import pandas as pd output = out or os.path.basename(tub_paths) base_path = Path(os.path.expanduser(tub_paths)).absolute().as_posix() tub = Tub(base_path) records = list(tub) user_angles = [] user_throttles = [] for record in records: user_angle = float(record["user/angle"]) user_throttle = float(record["user/throttle"]) user_angles.append(user_angle) user_throttles.append(user_throttle) df = pd.DataFrame({'user_angle': user_angles, 'user_throttle': user_throttles}) if record_name is not None: df[record_name].hist(bins=50) else: df.hist(bins=50) try: if out is not None: filename = output else: if record_name is not None: filename = output + '_hist_%s.png' % record_name.replace('/', '_') else: filename = output + '_hist.png' plt.savefig(filename) print('saving image to:', filename) except Exception as e: print(e) # plt.show()
def show_histogram(self, tub_paths, record_name, out): ''' Produce a histogram of record type frequency in the given tub ''' from matplotlib import pyplot as plt from donkeycar.parts.datastore import TubGroup from donkeycar.parts.tub_v2 import Tub import pandas as pd output = out or os.path.basename(tub_paths) base_path = Path(os.path.expanduser(tub_paths)).absolute().as_posix() tub = Tub(base_path) records = list(tub) user_angles = [] user_throttles = [] for record in records: user_angle = float(record["user/angle"]) user_throttle = float(record["user/throttle"]) user_angles.append(user_angle) user_throttles.append(user_throttle) df = pd.DataFrame({'user_angle': user_angles, 'user_throttle': user_throttles}) if record_name is not None: df[record_name].hist(bins=50) else: df.hist(bins=50) try: if out is not None: filename = output else: if record_name is not None: filename = output + '_hist_%s.png' % record_name.replace('/', '_') else: filename = output + '_hist.png' plt.savefig(filename) print('saving image to:', filename) except Exception as e: print(e) # plt.show()
Python
def execute_from_command_line(): """ This is the function linked to the "donkey" terminal command. """ commands = { 'createcar': CreateCar, 'findcar': FindCar, 'calibrate': CalibrateCar, 'tubclean': TubManager, 'tubhist': ShowHistogram, 'tubplot': ShowPredictionPlots, 'makemovie': MakeMovieShell, 'createjs': CreateJoystick, 'cnnactivations': ShowCnnActivations, 'update': UpdateCar, 'train': Train, 'trainremote': TrainRemote, 'ui': Gui, } args = sys.argv[:] if len(args) > 1 and args[1] in commands.keys(): command = commands[args[1]] c = command() c.run(args[2:]) else: dk.utils.eprint('Usage: The available commands are:') dk.utils.eprint(list(commands.keys()))
def execute_from_command_line(): """ This is the function linked to the "donkey" terminal command. """ commands = { 'createcar': CreateCar, 'findcar': FindCar, 'calibrate': CalibrateCar, 'tubclean': TubManager, 'tubhist': ShowHistogram, 'tubplot': ShowPredictionPlots, 'makemovie': MakeMovieShell, 'createjs': CreateJoystick, 'cnnactivations': ShowCnnActivations, 'update': UpdateCar, 'train': Train, 'trainremote': TrainRemote, 'ui': Gui, } args = sys.argv[:] if len(args) > 1 and args[1] in commands.keys(): command = commands[args[1]] c = command() c.run(args[2:]) else: dk.utils.eprint('Usage: The available commands are:') dk.utils.eprint(list(commands.keys()))
Python
def register_uuids(uuids_to_descriptions: Dict[str, str]) -> None: """Add or modify the mapping of 128-bit UUIDs for services and characteristics to descriptions. Args: uuids_to_descriptions: A dictionary of new mappings """ uuid128_dict.update(uuids_to_descriptions)
def register_uuids(uuids_to_descriptions: Dict[str, str]) -> None: """Add or modify the mapping of 128-bit UUIDs for services and characteristics to descriptions. Args: uuids_to_descriptions: A dictionary of new mappings """ uuid128_dict.update(uuids_to_descriptions)
Python
def console_input(default, validation=None, allow_empty=False): """ Get user input value from stdin Parameters ---------- default : string A default value. It will be used when user input nothing. validation : callable A validation function. The validation function must raise an error when validation has failed. Returns ------- string or any A user input string or validated value """ value = raw_input("> ") or default if value == "" and not allow_empty: print "Invalid: Empty value is not permitted." return console_input(default, validation) if validation: try: return validation(value) except ValidationError, e: print "Invalid: ", e return console_input(default, validation) return value
def console_input(default, validation=None, allow_empty=False): """ Get user input value from stdin Parameters ---------- default : string A default value. It will be used when user input nothing. validation : callable A validation function. The validation function must raise an error when validation has failed. Returns ------- string or any A user input string or validated value """ value = raw_input("> ") or default if value == "" and not allow_empty: print "Invalid: Empty value is not permitted." return console_input(default, validation) if validation: try: return validation(value) except ValidationError, e: print "Invalid: ", e return console_input(default, validation) return value
Python
def call(args): """ Call terminal command and return exit_code and stdout Parameters ---------- args : list A command and arguments list Returns ------- list : [exit_code, stdout] exit_code indicate the exit code of the command and stdout indicate the output of the command """ b = StringIO() p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8' # old python has bug in p.stdout, so the following little # hack is required. for stdout in iter(p.stdout.readline, ''): if len(stdout) == 0: break # translate non unicode to unicode stdout = force_unicode(stdout, encoding) # StringIO store unicode b.write(stdout) # stdout require non unicode sys.stdout.write(from_unicode(stdout, encoding)) sys.stdout.flush() buf = b.getvalue() p.stdout.close() return p.returncode or 0, buf
def call(args): """ Call terminal command and return exit_code and stdout Parameters ---------- args : list A command and arguments list Returns ------- list : [exit_code, stdout] exit_code indicate the exit code of the command and stdout indicate the output of the command """ b = StringIO() p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8' # old python has bug in p.stdout, so the following little # hack is required. for stdout in iter(p.stdout.readline, ''): if len(stdout) == 0: break # translate non unicode to unicode stdout = force_unicode(stdout, encoding) # StringIO store unicode b.write(stdout) # stdout require non unicode sys.stdout.write(from_unicode(stdout, encoding)) sys.stdout.flush() buf = b.getvalue() p.stdout.close() return p.returncode or 0, buf
Python
def split_arguments(args): """ Split specified arguments to two list. This is used to distinguish the options of the program and execution command/arguments. Parameters ---------- args : list Command line arguments Returns ------- list : options, arguments options indicate the optional arguments for the program and arguments indicate the execution command/arguments """ prev = False for i, value in enumerate(args[1:]): if value.startswith('-'): prev = True elif prev: prev = False else: return args[:i+1], args[i+1:] return args, []
def split_arguments(args): """ Split specified arguments to two list. This is used to distinguish the options of the program and execution command/arguments. Parameters ---------- args : list Command line arguments Returns ------- list : options, arguments options indicate the optional arguments for the program and arguments indicate the execution command/arguments """ prev = False for i, value in enumerate(args[1:]): if value.startswith('-'): prev = True elif prev: prev = False else: return args[:i+1], args[i+1:] return args, []
Python
def parse_arguments(args, config): """ Parse specified arguments via config Parameters ---------- args : list Command line arguments config : object ConfigParser instance which values are used as default values of options Returns ------- list : arguments, options options indicate the return value of ArgumentParser and arguments indicate the execution command/arguments """ import notify from conf import config_to_options opts = config_to_options(config) usage = ("%(prog)s " "[-h] [-t TO_ADDR] [-f FROM_ADDR] [-e ENCODING] [-s SUBJECT]\n" " " "[-o HOST] [-p PORT] [--username USERNAME] [--password PASSWORD]\n" " " "[--setup] [--check] COMMAND ARGUMENTS") % {'prog': "notify"} description = """ Call COMMAND with ARGUMENTS and send notification email to TO_ADDR """ parser = optparse.OptionParser( usage=usage, description=description, version=notify.__version__) parser.add_option('-t', '--to-addr', default=opts.to_addr, help=('Destination of the email.')) parser.add_option('-f', '--from-addr', default=opts.from_addr, help=('Source of the email.')) parser.add_option('-s', '--subject', default=opts.subject, help=('Subject of the email')) parser.add_option('-e', '--encoding', default=opts.encoding, help=('Encoding of the email')) parser.add_option('-o', '--host', default=opts.host, help=('Host address of MUA')) parser.add_option('-p', '--port', type='int', default=opts.port, help=('Port number of MUA')) parser.add_option('--username', default=opts.username, help=('Username for authentication')) parser.add_option('--password', help=('Password for authentication')) parser.add_option('--setup', default=False, action='store_true', help=('Setup %(prog)s configuration')) parser.add_option('--check', default=False, action='store_true', help=('Send %(prog)s configuration via email for ' 'checking. Only for Unix system.')) # display help and exit if len(args) == 1: parser.print_help() sys.exit(0) else: # translate all specified arguments to unicode if sys.version_info < (3,): encoding = sys.stdout.encoding args = map(lambda x: unicode(x, encoding), args) # split argv to two array lhs, rhs = split_arguments(args) # parse options opts = parser.parse_args(args=lhs[1:])[0] return rhs, opts
def parse_arguments(args, config): """ Parse specified arguments via config Parameters ---------- args : list Command line arguments config : object ConfigParser instance which values are used as default values of options Returns ------- list : arguments, options options indicate the return value of ArgumentParser and arguments indicate the execution command/arguments """ import notify from conf import config_to_options opts = config_to_options(config) usage = ("%(prog)s " "[-h] [-t TO_ADDR] [-f FROM_ADDR] [-e ENCODING] [-s SUBJECT]\n" " " "[-o HOST] [-p PORT] [--username USERNAME] [--password PASSWORD]\n" " " "[--setup] [--check] COMMAND ARGUMENTS") % {'prog': "notify"} description = """ Call COMMAND with ARGUMENTS and send notification email to TO_ADDR """ parser = optparse.OptionParser( usage=usage, description=description, version=notify.__version__) parser.add_option('-t', '--to-addr', default=opts.to_addr, help=('Destination of the email.')) parser.add_option('-f', '--from-addr', default=opts.from_addr, help=('Source of the email.')) parser.add_option('-s', '--subject', default=opts.subject, help=('Subject of the email')) parser.add_option('-e', '--encoding', default=opts.encoding, help=('Encoding of the email')) parser.add_option('-o', '--host', default=opts.host, help=('Host address of MUA')) parser.add_option('-p', '--port', type='int', default=opts.port, help=('Port number of MUA')) parser.add_option('--username', default=opts.username, help=('Username for authentication')) parser.add_option('--password', help=('Password for authentication')) parser.add_option('--setup', default=False, action='store_true', help=('Setup %(prog)s configuration')) parser.add_option('--check', default=False, action='store_true', help=('Send %(prog)s configuration via email for ' 'checking. Only for Unix system.')) # display help and exit if len(args) == 1: parser.print_help() sys.exit(0) else: # translate all specified arguments to unicode if sys.version_info < (3,): encoding = sys.stdout.encoding args = map(lambda x: unicode(x, encoding), args) # split argv to two array lhs, rhs = split_arguments(args) # parse options opts = parser.parse_args(args=lhs[1:])[0] return rhs, opts
Python
def call_and_notificate(args, opts): """ Execute specified arguments and send notification email Parameters ---------- args : list A execution command/arguments list opts : object A option instance """ # store starttime stctime = time.clock() stttime = time.time() stdtime = datetime.datetime.now() # call subprocess exit_code, output = call(args) # calculate delta cdelta = time.clock() - stctime tdelta = time.time() - stttime endtime = datetime.datetime.now() if exit_code == 0: status = u"Success" else: status = u"Fail (%d)" % exit_code # create email body body = EMAIL_BODY % { 'prog': get_command_str(args), 'status': status, 'stdtime': stdtime, 'endtime': endtime, 'tdelta': tdelta, 'cdelta': cdelta, 'output': output, 'cwd': os.getcwd(), } # create email subject subject = opts.subject % { 'prog': get_command_str(args), 'status': status.lower(), } # create email message msg = create_message(opts.from_addr, opts.to_addr, subject, body, opts.encoding) # obtain password from keyring password = keyring.get_password('notify', opts.username) # send email send_email(msg, opts.host, opts.port, opts.username, password)
def call_and_notificate(args, opts): """ Execute specified arguments and send notification email Parameters ---------- args : list A execution command/arguments list opts : object A option instance """ # store starttime stctime = time.clock() stttime = time.time() stdtime = datetime.datetime.now() # call subprocess exit_code, output = call(args) # calculate delta cdelta = time.clock() - stctime tdelta = time.time() - stttime endtime = datetime.datetime.now() if exit_code == 0: status = u"Success" else: status = u"Fail (%d)" % exit_code # create email body body = EMAIL_BODY % { 'prog': get_command_str(args), 'status': status, 'stdtime': stdtime, 'endtime': endtime, 'tdelta': tdelta, 'cdelta': cdelta, 'output': output, 'cwd': os.getcwd(), } # create email subject subject = opts.subject % { 'prog': get_command_str(args), 'status': status.lower(), } # create email message msg = create_message(opts.from_addr, opts.to_addr, subject, body, opts.encoding) # obtain password from keyring password = keyring.get_password('notify', opts.username) # send email send_email(msg, opts.host, opts.port, opts.username, password)
Python
def config_to_options(config): """ Convert ConfigParser instance to argparse.Namespace Parameters ---------- config : object A ConfigParser instance Returns ------- object An argparse.Namespace instance """ class Options: host=config.get('smtp', 'host', raw=True) port=config.getint('smtp', 'port') to_addr=config.get('mail', 'to_addr', raw=True) from_addr=config.get('mail', 'from_addr', raw=True) subject=config.get('mail', 'subject', raw=True) encoding=config.get('mail', 'encoding', raw=True) username=config.get('auth', 'username') opts = Options() # format opts.from_addr % {'host': opts.host, 'prog': 'notify'} opts.to_addr % {'host': opts.host, 'prog': 'notify'} return opts
def config_to_options(config): """ Convert ConfigParser instance to argparse.Namespace Parameters ---------- config : object A ConfigParser instance Returns ------- object An argparse.Namespace instance """ class Options: host=config.get('smtp', 'host', raw=True) port=config.getint('smtp', 'port') to_addr=config.get('mail', 'to_addr', raw=True) from_addr=config.get('mail', 'from_addr', raw=True) subject=config.get('mail', 'subject', raw=True) encoding=config.get('mail', 'encoding', raw=True) username=config.get('auth', 'username') opts = Options() # format opts.from_addr % {'host': opts.host, 'prog': 'notify'} opts.to_addr % {'host': opts.host, 'prog': 'notify'} return opts
Python
def decision_function(self, X): """ Reports the distance from the decision boundary for classifiers that support it. We need to override the `decision_function` from scikit-learn because we need a likelihood estimate for classifiers that only report `predict_proba`. After pull request [#10612](https://github.com/scikit-learn/scikit-learn/pull/10612) is merged, we can use the normal `decision_function` from sklearn. """ check_is_fitted(self, 'estimators_') if len(X) == 0: return pd.DataFrame(data=[], index=X.index, columns=self.classes_) try: T = np.array([est.decision_function(X).ravel() for est in self.estimators_]).T except AttributeError: T = np.array([e.predict_proba(X)[:, 1] * 2 - 1 for e in self.estimators_]).T if len(self.estimators_) == 1: T = T.ravel() return pd.DataFrame(data=T, columns=self.classes_, index=X.index)
def decision_function(self, X): """ Reports the distance from the decision boundary for classifiers that support it. We need to override the `decision_function` from scikit-learn because we need a likelihood estimate for classifiers that only report `predict_proba`. After pull request [#10612](https://github.com/scikit-learn/scikit-learn/pull/10612) is merged, we can use the normal `decision_function` from sklearn. """ check_is_fitted(self, 'estimators_') if len(X) == 0: return pd.DataFrame(data=[], index=X.index, columns=self.classes_) try: T = np.array([est.decision_function(X).ravel() for est in self.estimators_]).T except AttributeError: T = np.array([e.predict_proba(X)[:, 1] * 2 - 1 for e in self.estimators_]).T if len(self.estimators_) == 1: T = T.ravel() return pd.DataFrame(data=T, columns=self.classes_, index=X.index)
Python
def transformer2(ih, iw, nb_conv, size_conv, lr): """ The simple cnn model for image transformation with 2 times of downsampling. It is a choice for fast running. However, it will lose resolution during the transformation. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(pool1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(pool2) # fc1 = Flatten()(conv3) fc1 = Dense(iw * ih / 16)(fc1) fc1 = Reshape((ih // 4, iw // 4, 1))(fc1) conv4 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(fc1) up1 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv2], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv6) up2 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv1], axis=3) conv7 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv7) conv8 = Conv2DTranspose(1, (size_conv, size_conv), activation='relu', padding='same')(conv7) mdl = Model(inputs=inputs, outputs=conv8) # if nb_gpu > 1: # mdl = multi_gpu_model(mdl, nb_gpu) opt = keras.optimizers.Adam(learning_rate=lr) mdl.compile(loss='mse', optimizer=opt) return mdl
def transformer2(ih, iw, nb_conv, size_conv, lr): """ The simple cnn model for image transformation with 2 times of downsampling. It is a choice for fast running. However, it will lose resolution during the transformation. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(pool1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(pool2) # fc1 = Flatten()(conv3) fc1 = Dense(iw * ih / 16)(fc1) fc1 = Reshape((ih // 4, iw // 4, 1))(fc1) conv4 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(fc1) up1 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv2], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv6) up2 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv1], axis=3) conv7 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv7) conv8 = Conv2DTranspose(1, (size_conv, size_conv), activation='relu', padding='same')(conv7) mdl = Model(inputs=inputs, outputs=conv8) # if nb_gpu > 1: # mdl = multi_gpu_model(mdl, nb_gpu) opt = keras.optimizers.Adam(learning_rate=lr) mdl.compile(loss='mse', optimizer=opt) return mdl
Python
def transformer3_pooling(ih, iw, nb_conv, size_conv, lr): """ The cnn image transformation model with 3 times of downsampling. The downsampling uses maxpooling. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(pool1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(pool2) conv3 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(pool3) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) conv4 = Conv2D(1, (size_conv, size_conv), activation='relu', padding='same')(conv4) # fc1 = Flatten()(conv4) fc1 = Dense(iw * ih / 128, activation='relu')(fc1) fc1 = Dropout(0.2)(fc1) fc1 = Dense(iw * ih / 128, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Dense(iw * ih / 64, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Reshape((int(ih // 8), int(iw // 8), 1))(fc1) fc2 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(fc1) fc2 = Conv2DTranspose(nb_conv * 8, (size_conv, size_conv), activation='relu', padding='same')(fc2) up1 = concatenate([UpSampling2D(size=(2, 2))(fc2), conv3], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv6) up2 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv2], axis=3) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv7) up3 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv1], axis=3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(up3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv8) conv8 = Conv2DTranspose(1, (3, 3), activation='relu', padding='same')(conv8) mdl = Model(inputs=inputs, outputs=conv8) # if nb_gpu > 1: # mdl = multi_gpu_model(mdl, nb_gpu) opt = keras.optimizers.Adam(learning_rate=lr) mdl.compile(loss='mse', optimizer=opt) return mdl
def transformer3_pooling(ih, iw, nb_conv, size_conv, lr): """ The cnn image transformation model with 3 times of downsampling. The downsampling uses maxpooling. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(pool1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(pool2) conv3 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(pool3) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) conv4 = Conv2D(1, (size_conv, size_conv), activation='relu', padding='same')(conv4) # fc1 = Flatten()(conv4) fc1 = Dense(iw * ih / 128, activation='relu')(fc1) fc1 = Dropout(0.2)(fc1) fc1 = Dense(iw * ih / 128, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Dense(iw * ih / 64, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Reshape((int(ih // 8), int(iw // 8), 1))(fc1) fc2 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(fc1) fc2 = Conv2DTranspose(nb_conv * 8, (size_conv, size_conv), activation='relu', padding='same')(fc2) up1 = concatenate([UpSampling2D(size=(2, 2))(fc2), conv3], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv6) up2 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv2], axis=3) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv7) up3 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv1], axis=3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(up3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv8) conv8 = Conv2DTranspose(1, (3, 3), activation='relu', padding='same')(conv8) mdl = Model(inputs=inputs, outputs=conv8) # if nb_gpu > 1: # mdl = multi_gpu_model(mdl, nb_gpu) opt = keras.optimizers.Adam(learning_rate=lr) mdl.compile(loss='mse', optimizer=opt) return mdl
Python
def transformer3_super(ih, iw, nb_conv, size_conv): """ The cnn model for image transformation with 3 times downsampling. The downsampling uses strides. The model also merge the convolution layers from encoding and decoding parts to keep the resolution of the image. It works good for super-resolution and image enhancement. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1a = Conv2D(nb_conv, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv1a) conv2a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv2) conv3 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv2a) conv3a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv3) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv3a) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) conv4 = Conv2D(1, (size_conv, size_conv), activation='relu', padding='same')(conv4) # fc1 = Flatten()(conv4) fc1 = Dense(iw * ih / 128, activation='relu')(fc1) fc1 = Dropout(0.2)(fc1) fc1 = Dense(iw * ih / 128, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Dense(iw * ih / 64, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Reshape((ih // 8, iw // 8, 1))(fc1) fc2 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(fc1) fc2 = Conv2DTranspose(nb_conv * 8, (size_conv, size_conv), trides=(2, 2), activation='relu', padding='same')(fc2) up1 = concatenate([fc2, conv3], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv6) up2 = concatenate([conv6, conv2], axis=3) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv7) up3 = concatenate([conv7, conv1], axis=3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(up3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv8) conv8 = Conv2DTranspose(1, (3, 3), activation='relu', padding='same')(conv8) mdl = Model(inputs=inputs, outputs=conv8) mdl.compile(loss=psnr, optimizer='Adam', metrics=['mse']) return mdl
def transformer3_super(ih, iw, nb_conv, size_conv): """ The cnn model for image transformation with 3 times downsampling. The downsampling uses strides. The model also merge the convolution layers from encoding and decoding parts to keep the resolution of the image. It works good for super-resolution and image enhancement. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1a = Conv2D(nb_conv, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv1a) conv2a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv2) conv3 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv2a) conv3a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv3) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv3a) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) conv4 = Conv2D(1, (size_conv, size_conv), activation='relu', padding='same')(conv4) # fc1 = Flatten()(conv4) fc1 = Dense(iw * ih / 128, activation='relu')(fc1) fc1 = Dropout(0.2)(fc1) fc1 = Dense(iw * ih / 128, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Dense(iw * ih / 64, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Reshape((ih // 8, iw // 8, 1))(fc1) fc2 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(fc1) fc2 = Conv2DTranspose(nb_conv * 8, (size_conv, size_conv), trides=(2, 2), activation='relu', padding='same')(fc2) up1 = concatenate([fc2, conv3], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv6) up2 = concatenate([conv6, conv2], axis=3) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv7) up3 = concatenate([conv7, conv1], axis=3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(up3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv8) conv8 = Conv2DTranspose(1, (3, 3), activation='relu', padding='same')(conv8) mdl = Model(inputs=inputs, outputs=conv8) mdl.compile(loss=psnr, optimizer='Adam', metrics=['mse']) return mdl
Python
def transformer3_direct(ih, iw, nb_conv, size_conv): """ The cnn model for image transformation with 3 times downsampling. The downsampling uses strides. It does not have merged layers. It will lose resolution but possible to generate more different images. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1a = Conv2D(nb_conv, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv1a) conv2a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv2) conv3 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv2a) conv3 = Conv2D(nb_conv * 4, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv3) fc1 = Flatten()(conv3) fc1 = Dense(iw * ih / 64, activation='relu')(fc1) fc1 = Dropout(0.2)(fc1) fc1 = Dense(iw * ih / 16, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Reshape((ih // 4, iw // 4, 1))(fc1) fc2 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(fc1) fc2 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(fc2) up1 = concatenate([fc2, conv2], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv6) up2 = concatenate([conv6, conv1], axis=3) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv7) conv8 = Conv2DTranspose(1, (3, 3), activation='relu', padding='same')(conv7) mdl = Model(inputs=inputs, outputs=conv8) mdl.compile(loss=psnr, optimizer='Adam', metrics=['mse']) return mdl
def transformer3_direct(ih, iw, nb_conv, size_conv): """ The cnn model for image transformation with 3 times downsampling. The downsampling uses strides. It does not have merged layers. It will lose resolution but possible to generate more different images. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1a = Conv2D(nb_conv, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv1a) conv2a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv2) conv3 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv2a) conv3 = Conv2D(nb_conv * 4, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv3) fc1 = Flatten()(conv3) fc1 = Dense(iw * ih / 64, activation='relu')(fc1) fc1 = Dropout(0.2)(fc1) fc1 = Dense(iw * ih / 16, activation='relu')(fc1) fc1 = Dropout(0.25)(fc1) fc1 = Reshape((ih // 4, iw // 4, 1))(fc1) fc2 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(fc1) fc2 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(fc2) up1 = concatenate([fc2, conv2], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv6) up2 = concatenate([conv6, conv1], axis=3) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv7) conv8 = Conv2DTranspose(1, (3, 3), activation='relu', padding='same')(conv7) mdl = Model(inputs=inputs, outputs=conv8) mdl.compile(loss=psnr, optimizer='Adam', metrics=['mse']) return mdl
Python
def transformer3_filter(ih, iw, nb_conv, size_conv): """ The cnn model for image transformation with 3 times downsampling. This model does not include fully connected layers. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1a = Conv2D(nb_conv, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv1a) conv2a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv2) conv3 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv2a) conv3a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv3) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv3a) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) # conv5 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) conv5 = Conv2DTranspose(nb_conv * 8, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv5) up1 = concatenate([conv5, conv3], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv6) up2 = concatenate([conv6, conv2], axis=3) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv7) up3 = concatenate([conv7, conv1], axis=3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(up3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv8) conv8 = Conv2DTranspose(1, (3, 3), activation='relu', padding='same')(conv8) mdl = Model(inputs=inputs, outputs=conv8) mdl.compile(loss= 'mse', optimizer='Adam', metrics=['accuracy']) return mdl
def transformer3_filter(ih, iw, nb_conv, size_conv): """ The cnn model for image transformation with 3 times downsampling. This model does not include fully connected layers. Parameters ---------- ih, iw : int The input image dimension nb_conv : int Number of convolution kernels for each layer size_conv : int The size of convolution kernel Returns ------- mdl Description. """ inputs = Input((ih, iw, 1)) conv1 = Conv2D(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(inputs) conv1a = Conv2D(nb_conv, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv1) conv2 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv1a) conv2a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv2) conv3 = Conv2D(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(conv2a) conv3a = Conv2D(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv3) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv3a) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) conv4 = Conv2D(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) # conv5 = Conv2DTranspose(nb_conv * 4, (size_conv, size_conv), activation='relu', padding='same')(conv4) conv5 = Conv2DTranspose(nb_conv * 8, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv5) up1 = concatenate([conv5, conv3], axis=3) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up1) conv6 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv6) up2 = concatenate([conv6, conv2], axis=3) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), activation='relu', padding='same')(up2) conv7 = Conv2DTranspose(nb_conv * 2, (size_conv, size_conv), strides=(2, 2), activation='relu', padding='same')(conv7) up3 = concatenate([conv7, conv1], axis=3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(up3) conv8 = Conv2DTranspose(nb_conv, (size_conv, size_conv), activation='relu', padding='same')(conv8) conv8 = Conv2DTranspose(1, (3, 3), activation='relu', padding='same')(conv8) mdl = Model(inputs=inputs, outputs=conv8) mdl.compile(loss= 'mse', optimizer='Adam', metrics=['accuracy']) return mdl
Python
def psnr(y_true, y_pred): """ The cost function by computing the psnr. """ return 1/(10.0 * tf.log(1.0 / (tf.mean(tf.square(y_pred - y_true)))) / tf.log(10.0))
def psnr(y_true, y_pred): """ The cost function by computing the psnr. """ return 1/(10.0 * tf.log(1.0 / (tf.mean(tf.square(y_pred - y_true)))) / tf.log(10.0))
Python
def helper1(X): """this will create new columns in the dataset for each time metric i.e. year, month, week, etc...""" # Prevent SettingWithCopyWarning X = X.copy() # Convert date_recorded to datetime X['Date'] = pd.to_datetime(X['Date'], infer_datetime_format=True) # Extract components from date_recorded, then drop the original column X['year'] = X['Date'].dt.year X['month'] = X['Date'].dt.month X['week'] = X['Date'].dt.week X['day'] = X['Date'].dt.day X['hour'] = X['Date'].dt.hour X['minute'] = X['Date'].dt.minute X['second'] = X['Date'].dt.second X = X.drop(columns='Date') # return the wrangled dataframe return X
def helper1(X): """this will create new columns in the dataset for each time metric i.e. year, month, week, etc...""" # Prevent SettingWithCopyWarning X = X.copy() # Convert date_recorded to datetime X['Date'] = pd.to_datetime(X['Date'], infer_datetime_format=True) # Extract components from date_recorded, then drop the original column X['year'] = X['Date'].dt.year X['month'] = X['Date'].dt.month X['week'] = X['Date'].dt.week X['day'] = X['Date'].dt.day X['hour'] = X['Date'].dt.hour X['minute'] = X['Date'].dt.minute X['second'] = X['Date'].dt.second X = X.drop(columns='Date') # return the wrangled dataframe return X
Python
def overlapTrainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "grayscale", mask_color_mode = "grayscale",image_save_prefix = "image",mask_save_prefix = "mask", flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (512,512),seed = 1): ''' can generate image and mask at the same time use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same if you want to visualize the results of generator, set save_to_dir = "your path" ''' image_datagen = ImageDataGenerator(**aug_dict) mask_datagen = ImageDataGenerator(**aug_dict) image_generator = image_datagen.flow_from_directory( train_path, classes = [image_folder], class_mode = None, color_mode = image_color_mode, target_size = target_size, batch_size = batch_size, save_to_dir = save_to_dir, save_prefix = image_save_prefix, seed = seed) mask_generator = mask_datagen.flow_from_directory( train_path, classes = [mask_folder], class_mode = None, color_mode = mask_color_mode, target_size = target_size, batch_size = batch_size, save_to_dir = save_to_dir, save_prefix = mask_save_prefix, seed = seed) train_generator = zip(image_generator, mask_generator) for (img,mask) in train_generator: img,mask = adjustData(img,mask,flag_multi_class,num_class) img,mask = overlapData(img),mask yield img,mask # saveTrainResult("data/membrane/train/overlapTrain", img, mask)
def overlapTrainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "grayscale", mask_color_mode = "grayscale",image_save_prefix = "image",mask_save_prefix = "mask", flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (512,512),seed = 1): ''' can generate image and mask at the same time use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same if you want to visualize the results of generator, set save_to_dir = "your path" ''' image_datagen = ImageDataGenerator(**aug_dict) mask_datagen = ImageDataGenerator(**aug_dict) image_generator = image_datagen.flow_from_directory( train_path, classes = [image_folder], class_mode = None, color_mode = image_color_mode, target_size = target_size, batch_size = batch_size, save_to_dir = save_to_dir, save_prefix = image_save_prefix, seed = seed) mask_generator = mask_datagen.flow_from_directory( train_path, classes = [mask_folder], class_mode = None, color_mode = mask_color_mode, target_size = target_size, batch_size = batch_size, save_to_dir = save_to_dir, save_prefix = mask_save_prefix, seed = seed) train_generator = zip(image_generator, mask_generator) for (img,mask) in train_generator: img,mask = adjustData(img,mask,flag_multi_class,num_class) img,mask = overlapData(img),mask yield img,mask # saveTrainResult("data/membrane/train/overlapTrain", img, mask)
Python
def find_all_local_file_names(source_folder_name): """ Returns a list of all files that exist in the current working directory, filtered by source_folder_name if provided. """ cwd = os.getcwd() cwd_extension = os.path.normpath(f'{cwd}/{source_folder_name}/**') file_names = glob.glob(cwd_extension, recursive=True) return [file_name for file_name in file_names if os.path.isfile(file_name)]
def find_all_local_file_names(source_folder_name): """ Returns a list of all files that exist in the current working directory, filtered by source_folder_name if provided. """ cwd = os.getcwd() cwd_extension = os.path.normpath(f'{cwd}/{source_folder_name}/**') file_names = glob.glob(cwd_extension, recursive=True) return [file_name for file_name in file_names if os.path.isfile(file_name)]
Python
def compress_files(file_paths, destination_full_path, compression): """ Compress all of the matched files using the specified compression method. """ if f'.{compression}' in destination_full_path: compressed_file_name = destination_full_path else: compressed_file_name = f'{destination_full_path}.{compression}' if compression == 'zip': with ZipFile(compressed_file_name, 'w') as zip: for file in file_paths: file = file.replace(os.getcwd(), '') zip.write(file) print(f'Successfully compressed {file}') if compression == 'tar.bz2': with tarfile.open(compressed_file_name, 'w:bz2') as tar: for file in file_paths: file = file.replace(os.getcwd(), '') tar.add(file) print(f'Successfully compressed files') if compression == 'tar': with tarfile.open(compressed_file_name, 'w') as tar: for file in file_paths: file = file.replace(os.getcwd(), '') tar.add(file) print(f'Successfully compressed {file}') if compression == 'tar.gz': with tarfile.open(compressed_file_name, 'w:gz') as tar: for file in file_paths: file = file.replace(os.getcwd(), '') tar.add(file) print(f'Successfully compressed {file}')
def compress_files(file_paths, destination_full_path, compression): """ Compress all of the matched files using the specified compression method. """ if f'.{compression}' in destination_full_path: compressed_file_name = destination_full_path else: compressed_file_name = f'{destination_full_path}.{compression}' if compression == 'zip': with ZipFile(compressed_file_name, 'w') as zip: for file in file_paths: file = file.replace(os.getcwd(), '') zip.write(file) print(f'Successfully compressed {file}') if compression == 'tar.bz2': with tarfile.open(compressed_file_name, 'w:bz2') as tar: for file in file_paths: file = file.replace(os.getcwd(), '') tar.add(file) print(f'Successfully compressed files') if compression == 'tar': with tarfile.open(compressed_file_name, 'w') as tar: for file in file_paths: file = file.replace(os.getcwd(), '') tar.add(file) print(f'Successfully compressed {file}') if compression == 'tar.gz': with tarfile.open(compressed_file_name, 'w:gz') as tar: for file in file_paths: file = file.replace(os.getcwd(), '') tar.add(file) print(f'Successfully compressed {file}')
Python
def create_fallback_destination_file_name( source_file_name, destination_file_format): """ If a destination_file_name is not provided, uses the source_file_name with a removal of the compression extension. """ format_extensions = { "tsv": ".tsv", "psv": ".psv", "xlsx": ".xlsx", "parquet": ".parquet", "stata": ".dta", "hdf5": ".h5" } file_name = os.path.basename(source_file_name) file_name = f'{os.path.splitext(file_name)[0]}{format_extensions[destination_file_format]}' return file_name
def create_fallback_destination_file_name( source_file_name, destination_file_format): """ If a destination_file_name is not provided, uses the source_file_name with a removal of the compression extension. """ format_extensions = { "tsv": ".tsv", "psv": ".psv", "xlsx": ".xlsx", "parquet": ".parquet", "stata": ".dta", "hdf5": ".h5" } file_name = os.path.basename(source_file_name) file_name = f'{os.path.splitext(file_name)[0]}{format_extensions[destination_file_format]}' return file_name
Python
def determine_destination_file_name( source_full_path, destination_file_name, destination_file_format, file_number=None, ): """ Determine if the destination_file_name was provided, or should be extracted from the source_file_name, or should be enumerated for multiple file uploads. """ if destination_file_name: if file_number: destination_file_name = enumerate_destination_file_name( destination_file_name, file_number) else: destination_file_name = destination_file_name else: destination_file_name = create_fallback_destination_file_name( source_full_path, destination_file_format) return destination_file_name
def determine_destination_file_name( source_full_path, destination_file_name, destination_file_format, file_number=None, ): """ Determine if the destination_file_name was provided, or should be extracted from the source_file_name, or should be enumerated for multiple file uploads. """ if destination_file_name: if file_number: destination_file_name = enumerate_destination_file_name( destination_file_name, file_number) else: destination_file_name = destination_file_name else: destination_file_name = create_fallback_destination_file_name( source_full_path, destination_file_format) return destination_file_name