language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.TruncationSelectionExchange() for key, value in self.metric_strategies.items(): msg.metric_name_strategy_map[key] = value msg.truncation_k = self.truncation_k return msg
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.TruncationSelectionExchange() for key, value in self.metric_strategies.items(): msg.metric_name_strategy_map[key] = value msg.truncation_k = self.truncation_k return msg
Python
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.RegularizedEvolution() msg.metric_name = self.metric_name msg.metric_strategy = self.metric_strategy msg.mutation_strategy.CopyFrom(self.mutation_strategy.export_proto()) msg.sample_size = self.sample_size return msg
def export_proto(self): """Get a protobuf representation of this object.""" msg = AlgoProto.RegularizedEvolution() msg.metric_name = self.metric_name msg.metric_strategy = self.metric_strategy msg.mutation_strategy.CopyFrom(self.mutation_strategy.export_proto()) msg.sample_size = self.sample_size return msg
Python
def do_export_proto(self): """Get a protobuf representation of this object.""" params = AlgoProto.KFAC() first_order_optimizer_proto = self.first_order_optimizer.export_proto() first_order_optimizer_proto.parameters.Unpack(params.sgd) for key, value in self.kfac_args.items(): setattr(params, key, value) return params
def do_export_proto(self): """Get a protobuf representation of this object.""" params = AlgoProto.KFAC() first_order_optimizer_proto = self.first_order_optimizer.export_proto() first_order_optimizer_proto.parameters.Unpack(params.sgd) for key, value in self.kfac_args.items(): setattr(params, key, value) return params
Python
def construct_data_reader(): """Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ import os.path import lbann module_file = os.path.abspath(__file__) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = 'python' data_reader.role = 'train' data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.validation_percent = 0.1 data_reader.python.module = 'dataset' data_reader.python.module_dir = module_dir data_reader.python.sample_function = 'get_sample' data_reader.python.num_samples_function = 'num_samples' data_reader.python.sample_dims_function = 'sample_dims' return message
def construct_data_reader(): """Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ import os.path import lbann module_file = os.path.abspath(__file__) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = 'python' data_reader.role = 'train' data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.validation_percent = 0.1 data_reader.python.module = 'dataset' data_reader.python.module_dir = module_dir data_reader.python.sample_function = 'get_sample' data_reader.python.num_samples_function = 'num_samples' data_reader.python.sample_dims_function = 'sample_dims' return message
Python
def message(self, node_features, neighbor_features, edge_features): """Update node features and edge features. The Message stage of the convolution. Args: node_features (Layer); A 2D layer of node features of shape (num_nodes, input_channels) neighbor_features (Layer): A 3D layer of node features of shape (num_edges, 1, input_channels) edge_features (Layer): A 2D layer of edge features of shape (num_edges, edge_features) Returns: (Layer, Layer): Returns the updated node features and the messages for each node. """ ## These reshapes do not change the nn output but enables channelwise partitioning ## for distconv channelwiseFC natively node_features = lbann.Reshape(node_features, dims=str_list([self.num_nodes, 1, self.input_channels])) edge_features = lbann.Reshape(edge_features, dims=str_list([self.num_edges, 1, self.edge_input_channels])) updated_node_features = self.node_nn(node_features) edge_update = None for layer in self.edge_nn: if edge_update: edge_update = layer(edge_update) else: edge_update = layer(edge_features) edge_values = \ lbann.Reshape(edge_update, dims=str_list([self.num_edges, self.input_channels, self.output_channels]), name=self.name+"_edge_mat_reshape") edge_values = \ lbann.MatMul(neighbor_features, edge_values) return updated_node_features, edge_values
def message(self, node_features, neighbor_features, edge_features): """Update node features and edge features. The Message stage of the convolution. Args: node_features (Layer); A 2D layer of node features of shape (num_nodes, input_channels) neighbor_features (Layer): A 3D layer of node features of shape (num_edges, 1, input_channels) edge_features (Layer): A 2D layer of edge features of shape (num_edges, edge_features) Returns: (Layer, Layer): Returns the updated node features and the messages for each node. """ ## These reshapes do not change the nn output but enables channelwise partitioning ## for distconv channelwiseFC natively node_features = lbann.Reshape(node_features, dims=str_list([self.num_nodes, 1, self.input_channels])) edge_features = lbann.Reshape(edge_features, dims=str_list([self.num_edges, 1, self.edge_input_channels])) updated_node_features = self.node_nn(node_features) edge_update = None for layer in self.edge_nn: if edge_update: edge_update = layer(edge_update) else: edge_update = layer(edge_features) edge_values = \ lbann.Reshape(edge_update, dims=str_list([self.num_edges, self.input_channels, self.output_channels]), name=self.name+"_edge_mat_reshape") edge_values = \ lbann.MatMul(neighbor_features, edge_values) return updated_node_features, edge_values
Python
def aggregate(self, edge_values, edge_indices): """Aggregate the messages from the neighbors of the nodes Args: edge_values (Layer): A layer of edge features of shape (num_edges, edge_features) edge_indices (Layer): A 1D layer of node features of shape (num_edges). The indices used for reduction Returns: (Layer): A 2D layer of updated node features """ node_feature_dims = [self.num_nodes , self.output_channels] edge_feature_dims = [self.num_edges , self.output_channels] edge_values = lbann.Reshape(edge_values, dims=str_list(edge_feature_dims), name=self.name+"_neighbor_features") edge_reduce = lbann.Scatter(edge_values, edge_indices, dims=str_list(node_feature_dims), axis=0, name=self.name+"_aggregate") return edge_reduce
def aggregate(self, edge_values, edge_indices): """Aggregate the messages from the neighbors of the nodes Args: edge_values (Layer): A layer of edge features of shape (num_edges, edge_features) edge_indices (Layer): A 1D layer of node features of shape (num_edges). The indices used for reduction Returns: (Layer): A 2D layer of updated node features """ node_feature_dims = [self.num_nodes , self.output_channels] edge_feature_dims = [self.num_edges , self.output_channels] edge_values = lbann.Reshape(edge_values, dims=str_list(edge_feature_dims), name=self.name+"_neighbor_features") edge_reduce = lbann.Scatter(edge_values, edge_indices, dims=str_list(node_feature_dims), axis=0, name=self.name+"_aggregate") return edge_reduce
Python
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_train_sample', 'num_train_samples', 'sample_dims', 'train', ), tools.create_python_data_reader( lbann, current_file, 'get_val_sample', 'num_val_samples', 'sample_dims', 'validate', ), tools.create_python_data_reader( lbann, current_file, 'get_val_sample', 'num_val_samples', 'sample_dims', 'tournament', ), tools.create_python_data_reader( lbann, current_file, 'get_test_sample', 'num_test_samples', 'sample_dims', 'test', ), ]) return message
def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_train_sample', 'num_train_samples', 'sample_dims', 'train', ), tools.create_python_data_reader( lbann, current_file, 'get_val_sample', 'num_val_samples', 'sample_dims', 'validate', ), tools.create_python_data_reader( lbann, current_file, 'get_val_sample', 'num_val_samples', 'sample_dims', 'tournament', ), tools.create_python_data_reader( lbann, current_file, 'get_test_sample', 'num_test_samples', 'sample_dims', 'test', ), ]) return message
Python
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp',data_field='samples'), name='inp1') vae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None kl, recon = molvae.MolVAE(input_feature_dims, dictionary_size, embedding_size, pad_index)(input_) vae_loss.append(kl) vae_loss.append(recon) print("LEN vae loss ", len(vae_loss)) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) l2_weights = [w for w in weights if not isinstance(w.optimizer, lbann.NoOptimizer)] l2_reg = lbann.L2WeightRegularization(weights=l2_weights, scale=5e-4) obj = lbann.ObjectiveFunction(vae_loss) # Initialize check metric callback metrics = [lbann.Metric(kl, name='kl_loss'), lbann.Metric(recon, name='recon') ] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if(run_args.dump_weights_interval > 0): callbacks.append(lbann.CallbackDumpWeights(directory=run_args.dump_weights_dir, epoch_interval=run_args.dump_weights_interval)) if(run_args.ltfb): send_name = ('' if run_args.weights_to_send == 'All' else run_args.weights_to_send) #hack for Merlin empty string weights_to_ex = [w.name for w in weights if send_name in w.name] print("LTFB Weights to exchange ", weights_to_ex) callbacks.append(lbann.CallbackLTFB(batch_interval=run_args.ltfb_batch_interval,metric='recon', weights = list2str(weights_to_ex), low_score_wins=True,exchange_hyperparameters=True)) if(run_args.warmup): callbacks.append( lbann.CallbackLinearGrowthLearningRate( target=run_args.lr / 512 * run_args.batch_size, num_epochs=5)) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp',data_field='samples'), name='inp1') vae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None kl, recon = molvae.MolVAE(input_feature_dims, dictionary_size, embedding_size, pad_index)(input_) vae_loss.append(kl) vae_loss.append(recon) print("LEN vae loss ", len(vae_loss)) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) l2_weights = [w for w in weights if not isinstance(w.optimizer, lbann.NoOptimizer)] l2_reg = lbann.L2WeightRegularization(weights=l2_weights, scale=5e-4) obj = lbann.ObjectiveFunction(vae_loss) # Initialize check metric callback metrics = [lbann.Metric(kl, name='kl_loss'), lbann.Metric(recon, name='recon') ] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if(run_args.dump_weights_interval > 0): callbacks.append(lbann.CallbackDumpWeights(directory=run_args.dump_weights_dir, epoch_interval=run_args.dump_weights_interval)) if(run_args.ltfb): send_name = ('' if run_args.weights_to_send == 'All' else run_args.weights_to_send) #hack for Merlin empty string weights_to_ex = [w.name for w in weights if send_name in w.name] print("LTFB Weights to exchange ", weights_to_ex) callbacks.append(lbann.CallbackLTFB(batch_interval=run_args.ltfb_batch_interval,metric='recon', weights = list2str(weights_to_ex), low_score_wins=True,exchange_hyperparameters=True)) if(run_args.warmup): callbacks.append( lbann.CallbackLinearGrowthLearningRate( target=run_args.lr / 512 * run_args.batch_size, num_epochs=5)) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
Python
def make_data_reader(lbann): """Make Protobuf message for HRRL data reader. """ import lbann.contrib.lc.paths # Load data readers from prototext protobuf_file = os.path.join(app_path,'data', 'probies_v2.prototext') message = lbann.lbann_pb2.LbannPB() with open(protobuf_file, 'r') as f: google.protobuf.text_format.Merge(f.read(), message) message = message.data_reader # Set paths return message
def make_data_reader(lbann): """Make Protobuf message for HRRL data reader. """ import lbann.contrib.lc.paths # Load data readers from prototext protobuf_file = os.path.join(app_path,'data', 'probies_v2.prototext') message = lbann.lbann_pb2.LbannPB() with open(protobuf_file, 'r') as f: google.protobuf.text_format.Merge(f.read(), message) message = message.data_reader # Set paths return message
Python
def numpy_softmax(x): """NumPy implementation of softmax. The computation is performed with 64-bit floats. There is also an implementation of softmax in SciPy 1.2.0 (scipy.special.softmax). """ if x.dtype is not np.float64: x = x.astype(np.float64) y = np.exp(x - np.max(x)) return y / np.sum(y)
def numpy_softmax(x): """NumPy implementation of softmax. The computation is performed with 64-bit floats. There is also an implementation of softmax in SciPy 1.2.0 (scipy.special.softmax). """ if x.dtype is not np.float64: x = x.astype(np.float64) y = np.exp(x - np.max(x)) return y / np.sum(y)
Python
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann print("Dump model dir ", run_args.dump_model_dir) assert run_args.dump_model_dir, "evaluate script asssumes a pretrained WAE model" pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp',target_mode="N/A"), name='inp1') wae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=str(run_args.z_dim)) x = lbann.Slice(input_, slice_points=str_list([0, input_feature_dims])) x = lbann.Identity(x) waemodel = molwae.MolWAE(input_feature_dims, dictionary_size, embedding_size, pad_index,run_args.z_dim,save_output) x_emb = lbann.Embedding( x, num_embeddings=waemodel.dictionary_size, embedding_dim=waemodel.embedding_size, name='emb', weights=waemodel.emb_weights ) latentz = waemodel.forward_encoder(x_emb) fake_loss = lbann.MeanAbsoluteError(latentz,z) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) obj = lbann.ObjectiveFunction(fake_loss) callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] #Dump output (activation) for post processing conc_out = lbann.Concatenation([input_,latentz], name='conc_out') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', format='npy', directory=run_args.dump_outputs_dir, layers=f'{conc_out.name}')) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, callbacks=callbacks)
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann print("Dump model dir ", run_args.dump_model_dir) assert run_args.dump_model_dir, "evaluate script asssumes a pretrained WAE model" pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp',target_mode="N/A"), name='inp1') wae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=str(run_args.z_dim)) x = lbann.Slice(input_, slice_points=str_list([0, input_feature_dims])) x = lbann.Identity(x) waemodel = molwae.MolWAE(input_feature_dims, dictionary_size, embedding_size, pad_index,run_args.z_dim,save_output) x_emb = lbann.Embedding( x, num_embeddings=waemodel.dictionary_size, embedding_dim=waemodel.embedding_size, name='emb', weights=waemodel.emb_weights ) latentz = waemodel.forward_encoder(x_emb) fake_loss = lbann.MeanAbsoluteError(latentz,z) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) obj = lbann.ObjectiveFunction(fake_loss) callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] #Dump output (activation) for post processing conc_out = lbann.Concatenation([input_,latentz], name='conc_out') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', format='npy', directory=run_args.dump_outputs_dir, layers=f'{conc_out.name}')) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, callbacks=callbacks)
Python
def construct_data_reader(run_args): """ Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ module_file = os.path.abspath(run_args.data_module_file) os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir)) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = "python" data_reader.role = "train" data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.validation_percent = 0.1 data_reader.python.module = module_name data_reader.python.module_dir = module_dir data_reader.python.sample_function = "get_sample" data_reader.python.num_samples_function = "num_samples" data_reader.python.sample_dims_function = "sample_dims" return message
def construct_data_reader(run_args): """ Construct Protobuf message for Python data reader. The Python data reader will import this Python file to access the sample access functions. """ module_file = os.path.abspath(run_args.data_module_file) os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config) module_name = os.path.splitext(os.path.basename(module_file))[0] module_dir = os.path.dirname(module_file) print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir)) # Base data reader message message = lbann.reader_pb2.DataReader() # Training set data reader data_reader = message.reader.add() data_reader.name = "python" data_reader.role = "train" data_reader.shuffle = True data_reader.percent_of_data_to_use = 1.0 data_reader.validation_percent = 0.1 data_reader.python.module = module_name data_reader.python.module_dir = module_dir data_reader.python.sample_function = "get_sample" data_reader.python.num_samples_function = "num_samples" data_reader.python.sample_dims_function = "sample_dims" return message
Python
def gpus_per_node(system = system()): """Number of GPUs per node.""" if not is_lc_system(system): raise RuntimeError('unknown system (' + system + ')') return _system_params[system].gpus_per_node
def gpus_per_node(system = system()): """Number of GPUs per node.""" if not is_lc_system(system): raise RuntimeError('unknown system (' + system + ')') return _system_params[system].gpus_per_node
Python
def cores_per_node(system = system()): """Number of CPU cores per node.""" if not is_lc_system(system): raise RuntimeError('unknown system (' + system + ')') return _system_params[system].cores_per_node
def cores_per_node(system = system()): """Number of CPU cores per node.""" if not is_lc_system(system): raise RuntimeError('unknown system (' + system + ')') return _system_params[system].cores_per_node
Python
def scheduler(system = system()): """Job scheduler for LC system.""" if not is_lc_system(system): raise RuntimeError('unknown system (' + system + ')') return _system_params[system].scheduler
def scheduler(system = system()): """Job scheduler for LC system.""" if not is_lc_system(system): raise RuntimeError('unknown system (' + system + ')') return _system_params[system].scheduler
Python
def procs_per_node(system = system()): """Default number of processes per node.""" if has_gpu(system): return gpus_per_node(system) else: # Catalyst and Quartz have 2 sockets per node ### @todo Think of a smarter heuristic return 2
def procs_per_node(system = system()): """Default number of processes per node.""" if has_gpu(system): return gpus_per_node(system) else: # Catalyst and Quartz have 2 sockets per node ### @todo Think of a smarter heuristic return 2
Python
def make_data_reader(lbann): """Make Protobuf message for HRRL data reader. """ import lbann.contrib.lc.paths # Load data readers from prototext message = lbann.lbann_pb2.LbannPB() with open(data_reader_prototext, 'r') as f: google.protobuf.text_format.Merge(f.read(), message) message = message.data_reader # Use less training data for the integration test message.reader[0].percent_of_data_to_use = 0.01 # Set paths return message
def make_data_reader(lbann): """Make Protobuf message for HRRL data reader. """ import lbann.contrib.lc.paths # Load data readers from prototext message = lbann.lbann_pb2.LbannPB() with open(data_reader_prototext, 'r') as f: google.protobuf.text_format.Merge(f.read(), message) message = message.data_reader # Use less training data for the integration test message.reader[0].percent_of_data_to_use = 0.01 # Set paths return message
Python
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp',data_field='samples'), name='inp1') input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = True if run_args.dump_outputs_dir else False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) z = lbann.Gaussian(mean=run_args.g_mean,stdev=run_args.g_std, neuron_dims=str(run_args.z_dim)) recon, d1_real, d1_fake, d_adv, arg_max = molwae.MolWAE( input_feature_dims, dictionary_size, embedding_size, pad_index, run_args.z_dim, run_args.g_mean, run_args.g_std, save_output=save_output)(input_,z) zero = lbann.Constant(value=0.0,num_neurons='1',name='zero') one = lbann.Constant(value=1.0,num_neurons='1',name='one') d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce') #vae_loss.append(recon) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if(l.weights and "disc0" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2 if(l.weights and "disc1" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) l2_weights = [w for w in weights if not isinstance(w.optimizer, lbann.NoOptimizer)] l2_reg = lbann.L2WeightRegularization(weights=l2_weights, scale=1e-4) d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=run_args.lamda) obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,recon,l2_reg]) # Initialize check metric callback metrics = [ lbann.Metric(recon, name='recon') ] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if(run_args.dump_weights_interval > 0): callbacks.append(lbann.CallbackDumpWeights(directory=run_args.dump_weights_dir, epoch_interval=run_args.dump_weights_interval)) if(run_args.ltfb): send_name = ('' if run_args.weights_to_send == 'All' else run_args.weights_to_send) #hack for Merlin empty string weights_to_ex = [w.name for w in weights if send_name in w.name] print("LTFB Weights to exchange ", weights_to_ex) callbacks.append(lbann.CallbackLTFB(batch_interval=run_args.ltfb_batch_interval,metric='recon', weights = list2str(weights_to_ex), low_score_wins=True,exchange_hyperparameters=True)) callbacks.append(lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers), batch_interval=2)) #Dump final weight for inference if(run_args.dump_model_dir): callbacks.append(lbann.CallbackSaveModel(dir=run_args.dump_model_dir)) #Dump output (activation) for post processing if(run_args.dump_outputs_dir): pred_tensor = lbann.Concatenation(arg_max, name='pred_tensor') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', directory=run_args.dump_outputs_dir,layers='inp pred_tensor')) if(run_args.warmup): callbacks.append( lbann.CallbackLinearGrowthLearningRate( target=run_args.lr / 512 * run_args.batch_size, num_epochs=5)) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp',data_field='samples'), name='inp1') input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = True if run_args.dump_outputs_dir else False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) z = lbann.Gaussian(mean=run_args.g_mean,stdev=run_args.g_std, neuron_dims=str(run_args.z_dim)) recon, d1_real, d1_fake, d_adv, arg_max = molwae.MolWAE( input_feature_dims, dictionary_size, embedding_size, pad_index, run_args.z_dim, run_args.g_mean, run_args.g_std, save_output=save_output)(input_,z) zero = lbann.Constant(value=0.0,num_neurons='1',name='zero') one = lbann.Constant(value=1.0,num_neurons='1',name='one') d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce') #vae_loss.append(recon) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if(l.weights and "disc0" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2 if(l.weights and "disc1" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) l2_weights = [w for w in weights if not isinstance(w.optimizer, lbann.NoOptimizer)] l2_reg = lbann.L2WeightRegularization(weights=l2_weights, scale=1e-4) d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=run_args.lamda) obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,recon,l2_reg]) # Initialize check metric callback metrics = [ lbann.Metric(recon, name='recon') ] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if(run_args.dump_weights_interval > 0): callbacks.append(lbann.CallbackDumpWeights(directory=run_args.dump_weights_dir, epoch_interval=run_args.dump_weights_interval)) if(run_args.ltfb): send_name = ('' if run_args.weights_to_send == 'All' else run_args.weights_to_send) #hack for Merlin empty string weights_to_ex = [w.name for w in weights if send_name in w.name] print("LTFB Weights to exchange ", weights_to_ex) callbacks.append(lbann.CallbackLTFB(batch_interval=run_args.ltfb_batch_interval,metric='recon', weights = list2str(weights_to_ex), low_score_wins=True,exchange_hyperparameters=True)) callbacks.append(lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers), batch_interval=2)) #Dump final weight for inference if(run_args.dump_model_dir): callbacks.append(lbann.CallbackSaveModel(dir=run_args.dump_model_dir)) #Dump output (activation) for post processing if(run_args.dump_outputs_dir): pred_tensor = lbann.Concatenation(arg_max, name='pred_tensor') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', directory=run_args.dump_outputs_dir,layers='inp pred_tensor')) if(run_args.warmup): callbacks.append( lbann.CallbackLinearGrowthLearningRate( target=run_args.lr / 512 * run_args.batch_size, num_epochs=5)) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
Python
def export_proto(self): """Construct and return a protobuf message.""" proto = weights_pb2.Weights() proto.name = self.name # Set initializer if needed if self.initializer: proto.initializer.CopyFrom(self.initializer.export_proto()) proto.initializer.SetInParent() # Set optimizer if needed if self.optimizer: proto.optimizer.CopyFrom(self.optimizer.export_proto()) proto.optimizer.SetInParent() # Set datatype if needed if self.datatype: proto.datatype = self.datatype return proto
def export_proto(self): """Construct and return a protobuf message.""" proto = weights_pb2.Weights() proto.name = self.name # Set initializer if needed if self.initializer: proto.initializer.CopyFrom(self.initializer.export_proto()) proto.initializer.SetInParent() # Set optimizer if needed if self.optimizer: proto.optimizer.CopyFrom(self.optimizer.export_proto()) proto.optimizer.SetInParent() # Set datatype if needed if self.datatype: proto.datatype = self.datatype return proto
Python
def graph_data_splitter(_input, NUM_NODES, NUM_EDGES, NUM_NODE_FEATURES, NUM_EDGE_FEATURES, EMBEDDING_DIM, EDGE_EMBEDDING_DIM): """Helper function to split the input data into Args: NUM_NODES (int): The number of nodes in the largest graph in the dataset (51 for LSC-PPQM4M) NUM_EDGES (int): The number of edges in the largest graph in the dataset (118 for LSC-PPQM4M) NUM_NODE_FEATURES (int): The dimensionality of the input node features vector (9 for LSC-PPQM4M) NUM_EDGE_FEATURES (int): The dimensionality of the input edge feature vectors (3 for LSC-PPQM4M) EMBEDDING_DIM (int): The embedding dimensionality of the node feature vector EDGE_EMBEDDING_DIM (int): The embedding dimensionality of the edge feature vector Returns: (Layer, Layer, Layer, Layer, Layer): Returns 5 Layers. The embedded node feature matrix, the neighbord nodes feature tensor, the embedded edge feature matrix, the source node index vector, and the label """ split_indices = [] start_index = 0 split_indices.append(start_index) node_feature = [NUM_NODES for i in range(1, NUM_NODE_FEATURES + 1)] split_indices.extend(node_feature) edge_features = [NUM_EDGES for i in range(1, NUM_EDGE_FEATURES + 1)] split_indices.extend(edge_features) edge_indices_sources = NUM_EDGES split_indices.append(edge_indices_sources) edge_indices_targets = NUM_EDGES split_indices.append(edge_indices_targets) target = 1 split_indices.append(target) for i in range(1, len(split_indices)): split_indices[i] = split_indices[i] + split_indices[i - 1] graph_input = lbann.Slice(_input, axis=0, slice_points=str_list(split_indices)) neighbor_feature_dims = str_list([NUM_EDGES, 1, EMBEDDING_DIM]) node_feature_columns = [lbann.Reshape(lbann.Identity(graph_input), dims=str_list([NUM_NODES]), name="node_ft_{}_col".format(x)) for x in range(NUM_NODE_FEATURES)] edge_feature_columns = [lbann.Reshape(lbann.Identity(graph_input), dims=str_list([NUM_EDGES]), name="edge_ft_{}_col".format(x)) for x in range(NUM_EDGE_FEATURES)] source_nodes = lbann.Reshape(lbann.Identity(graph_input), dims=str_list([NUM_EDGES]), name="source_nodes") target_nodes = lbann.Reshape(lbann.Identity(graph_input), dims=str_list([NUM_EDGES]), name="target_nodes") label = lbann.Reshape(lbann.Identity(graph_input), dims=str_list([1]), name="Graph_Label") embedded_node_features = AtomEncoder(node_feature_columns, EMBEDDING_DIM) embedded_edge_features = BondEncoder(edge_feature_columns, EDGE_EMBEDDING_DIM) neighbor_features = lbann.Gather(embedded_node_features, target_nodes, axis=0) neighbor_feature_mat = lbann.Reshape(neighbor_features, dims=neighbor_feature_dims) return \ embedded_node_features, neighbor_feature_mat, embedded_edge_features, source_nodes, label
def graph_data_splitter(_input, NUM_NODES, NUM_EDGES, NUM_NODE_FEATURES, NUM_EDGE_FEATURES, EMBEDDING_DIM, EDGE_EMBEDDING_DIM): """Helper function to split the input data into Args: NUM_NODES (int): The number of nodes in the largest graph in the dataset (51 for LSC-PPQM4M) NUM_EDGES (int): The number of edges in the largest graph in the dataset (118 for LSC-PPQM4M) NUM_NODE_FEATURES (int): The dimensionality of the input node features vector (9 for LSC-PPQM4M) NUM_EDGE_FEATURES (int): The dimensionality of the input edge feature vectors (3 for LSC-PPQM4M) EMBEDDING_DIM (int): The embedding dimensionality of the node feature vector EDGE_EMBEDDING_DIM (int): The embedding dimensionality of the edge feature vector Returns: (Layer, Layer, Layer, Layer, Layer): Returns 5 Layers. The embedded node feature matrix, the neighbord nodes feature tensor, the embedded edge feature matrix, the source node index vector, and the label """ split_indices = [] start_index = 0 split_indices.append(start_index) node_feature = [NUM_NODES for i in range(1, NUM_NODE_FEATURES + 1)] split_indices.extend(node_feature) edge_features = [NUM_EDGES for i in range(1, NUM_EDGE_FEATURES + 1)] split_indices.extend(edge_features) edge_indices_sources = NUM_EDGES split_indices.append(edge_indices_sources) edge_indices_targets = NUM_EDGES split_indices.append(edge_indices_targets) target = 1 split_indices.append(target) for i in range(1, len(split_indices)): split_indices[i] = split_indices[i] + split_indices[i - 1] graph_input = lbann.Slice(_input, axis=0, slice_points=str_list(split_indices)) neighbor_feature_dims = str_list([NUM_EDGES, 1, EMBEDDING_DIM]) node_feature_columns = [lbann.Reshape(lbann.Identity(graph_input), dims=str_list([NUM_NODES]), name="node_ft_{}_col".format(x)) for x in range(NUM_NODE_FEATURES)] edge_feature_columns = [lbann.Reshape(lbann.Identity(graph_input), dims=str_list([NUM_EDGES]), name="edge_ft_{}_col".format(x)) for x in range(NUM_EDGE_FEATURES)] source_nodes = lbann.Reshape(lbann.Identity(graph_input), dims=str_list([NUM_EDGES]), name="source_nodes") target_nodes = lbann.Reshape(lbann.Identity(graph_input), dims=str_list([NUM_EDGES]), name="target_nodes") label = lbann.Reshape(lbann.Identity(graph_input), dims=str_list([1]), name="Graph_Label") embedded_node_features = AtomEncoder(node_feature_columns, EMBEDDING_DIM) embedded_edge_features = BondEncoder(edge_feature_columns, EDGE_EMBEDDING_DIM) neighbor_features = lbann.Gather(embedded_node_features, target_nodes, axis=0) neighbor_feature_mat = lbann.Reshape(neighbor_features, dims=neighbor_feature_dims) return \ embedded_node_features, neighbor_feature_mat, embedded_edge_features, source_nodes, label
Python
def create_parallel_strategy(num_channel_groups): """Helper function to create channelwise fully connected layer distconv parallel strategy """ if (num_channel_groups > 0): return {"channel_groups": num_channel_groups, "filter_groups": num_channel_groups} else: return {}
def create_parallel_strategy(num_channel_groups): """Helper function to create channelwise fully connected layer distconv parallel strategy """ if (num_channel_groups > 0): return {"channel_groups": num_channel_groups, "filter_groups": num_channel_groups} else: return {}
Python
def make_model(NUM_NODES, NUM_EDGES, NUM_NODES_FEATURES, NUM_EDGE_FEATURES, EMBEDDING_DIM, EDGE_EMBEDDING_DIM, NUM_OUT_FEATURES, NUM_EPOCHS, NUM_GROUPS=0): """ Creates an LBANN model for the OGB-LSC PPQM4M Dataset Args: NUM_NODES (int): The number of nodes in the largest graph in the dataset (51 for LSC-PPQM4M) NUM_EDGES (int): The number of edges in the largest graph in the dataset (118 for LSC-PPQM4M) NUM_NODES_FEATURES (int): The dimensionality of the input node features vector (9 for LSC-PPQM4M) NUM_EDGE_FEATURES (int): The dimensionality of the input edge feature vectors (3 for LSC-PPQM4M) EMBEDDING_DIM (int): The embedding dimensionality of the node feature vector EDGE_EMBEDDING_DIM (int): The embedding dimensionality of the edge feature vector NUM_OUT_FEATURES (int): The dimensionality of the node feature vectors after graph convolutions NUM_EPOCHS (int): The number of epochs to train the network NUM_GROUPS (int): The number of channel groups for distconv channelwise fully connected layer (default : 0) Returns: (Model): lbann model object """ in_channel = EMBEDDING_DIM out_channel = NUM_OUT_FEATURES output_dimension = 1 _input = lbann.Input(data_field='samples') node_feature_mat, neighbor_feature_mat, edge_feature_mat, edge_indices, target = \ graph_data_splitter(_input, NUM_NODES, NUM_EDGES, NUM_NODES_FEATURES, NUM_EDGE_FEATURES, EMBEDDING_DIM, EDGE_EMBEDDING_DIM) x = NNConvLayer(node_feature_mat, neighbor_feature_mat, edge_feature_mat, edge_indices, in_channel, out_channel, EDGE_EMBEDDING_DIM, NUM_NODES, NUM_EDGES, NUM_GROUPS) for i, num_neurons in enumerate([256, 128, 32, 8], 1): x = lbann.FullyConnected(x, num_neurons=num_neurons, name="hidden_layer_{}".format(i)) x = lbann.Relu(x, name='hidden_layer_{}_activation'.format(i)) x = lbann.FullyConnected(x, num_neurons=output_dimension, name="output") loss = lbann.MeanAbsoluteError(x, target) layers = lbann.traverse_layer_graph(_input) training_output = lbann.CallbackPrint(interval=1, print_global_stat_only=False) gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [training_output, gpu_usage, timer] model = lbann.Model(NUM_EPOCHS, layers=layers, objective_function=loss, callbacks=callbacks) return model
def make_model(NUM_NODES, NUM_EDGES, NUM_NODES_FEATURES, NUM_EDGE_FEATURES, EMBEDDING_DIM, EDGE_EMBEDDING_DIM, NUM_OUT_FEATURES, NUM_EPOCHS, NUM_GROUPS=0): """ Creates an LBANN model for the OGB-LSC PPQM4M Dataset Args: NUM_NODES (int): The number of nodes in the largest graph in the dataset (51 for LSC-PPQM4M) NUM_EDGES (int): The number of edges in the largest graph in the dataset (118 for LSC-PPQM4M) NUM_NODES_FEATURES (int): The dimensionality of the input node features vector (9 for LSC-PPQM4M) NUM_EDGE_FEATURES (int): The dimensionality of the input edge feature vectors (3 for LSC-PPQM4M) EMBEDDING_DIM (int): The embedding dimensionality of the node feature vector EDGE_EMBEDDING_DIM (int): The embedding dimensionality of the edge feature vector NUM_OUT_FEATURES (int): The dimensionality of the node feature vectors after graph convolutions NUM_EPOCHS (int): The number of epochs to train the network NUM_GROUPS (int): The number of channel groups for distconv channelwise fully connected layer (default : 0) Returns: (Model): lbann model object """ in_channel = EMBEDDING_DIM out_channel = NUM_OUT_FEATURES output_dimension = 1 _input = lbann.Input(data_field='samples') node_feature_mat, neighbor_feature_mat, edge_feature_mat, edge_indices, target = \ graph_data_splitter(_input, NUM_NODES, NUM_EDGES, NUM_NODES_FEATURES, NUM_EDGE_FEATURES, EMBEDDING_DIM, EDGE_EMBEDDING_DIM) x = NNConvLayer(node_feature_mat, neighbor_feature_mat, edge_feature_mat, edge_indices, in_channel, out_channel, EDGE_EMBEDDING_DIM, NUM_NODES, NUM_EDGES, NUM_GROUPS) for i, num_neurons in enumerate([256, 128, 32, 8], 1): x = lbann.FullyConnected(x, num_neurons=num_neurons, name="hidden_layer_{}".format(i)) x = lbann.Relu(x, name='hidden_layer_{}_activation'.format(i)) x = lbann.FullyConnected(x, num_neurons=output_dimension, name="output") loss = lbann.MeanAbsoluteError(x, target) layers = lbann.traverse_layer_graph(_input) training_output = lbann.CallbackPrint(interval=1, print_global_stat_only=False) gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [training_output, gpu_usage, timer] model = lbann.Model(NUM_EPOCHS, layers=layers, objective_function=loss, callbacks=callbacks) return model
Python
def export_proto(self): """Construct and return a protobuf message.""" proto = layers_pb2.Layer() proto.parents = ' '.join([l.name for l in self.parents]) proto.children = ' '.join([l.name for l in self.children]) proto.weights = ' '.join([w.name for w in self.weights]) proto.name = self.name if self.device: proto.device_allocation = self.device if self.data_layout: proto.data_layout = self.data_layout if self.datatype: proto.datatype = self.datatype if self.hint_layer: proto.hint_layer = self.hint_layer.name for k, v in self.parallel_strategy.items(): setattr(proto.parallel_strategy, k, v) return proto
def export_proto(self): """Construct and return a protobuf message.""" proto = layers_pb2.Layer() proto.parents = ' '.join([l.name for l in self.parents]) proto.children = ' '.join([l.name for l in self.children]) proto.weights = ' '.join([w.name for w in self.weights]) proto.name = self.name if self.device: proto.device_allocation = self.device if self.data_layout: proto.data_layout = self.data_layout if self.datatype: proto.datatype = self.datatype if self.hint_layer: proto.hint_layer = self.hint_layer.name for k, v in self.parallel_strategy.items(): setattr(proto.parallel_strategy, k, v) return proto
Python
def add_parent(self, parent): """This layer will receive an input tensor from `parent`.""" for p in make_iterable(parent): self.parents.append(p) p.children.append(self)
def add_parent(self, parent): """This layer will receive an input tensor from `parent`.""" for p in make_iterable(parent): self.parents.append(p) p.children.append(self)
Python
def add_child(self, child): """"This layer will send an output tensor to `child`.""" for c in make_iterable(child): self.children.append(c) c.parents.append(self)
def add_child(self, child): """"This layer will send an output tensor to `child`.""" for c in make_iterable(child): self.children.append(c) c.parents.append(self)
Python
def traverse_layer_graph(layers): """Topologically ordered traversal of layer graph. All layers that are connected to `layers` will be traversed. The layer graph is assumed to be acyclic. No checks are made for cycles and strange things may happen if one exists. Args: layers (Layer or Iterator of Layer): Node(s) in layer graph. Yields: Layer: Node in layer graph, in a topological order. """ # DFS to find root nodes in layer graph roots = [] visited = set() stack = list(make_iterable(layers)) while stack: l = stack.pop() if l not in visited: visited.add(l) stack.extend(l.parents) stack.extend(l.children) if not l.parents: roots.append(l) # DFS to traverse layer graph in topological order visited = set() stack = roots while stack: l = stack.pop() if (l not in visited and all([(p in visited) for p in l.parents])): visited.add(l) stack.extend(l.children) yield l
def traverse_layer_graph(layers): """Topologically ordered traversal of layer graph. All layers that are connected to `layers` will be traversed. The layer graph is assumed to be acyclic. No checks are made for cycles and strange things may happen if one exists. Args: layers (Layer or Iterator of Layer): Node(s) in layer graph. Yields: Layer: Node in layer graph, in a topological order. """ # DFS to find root nodes in layer graph roots = [] visited = set() stack = list(make_iterable(layers)) while stack: l = stack.pop() if l not in visited: visited.add(l) stack.extend(l.parents) stack.extend(l.children) if not l.parents: roots.append(l) # DFS to traverse layer graph in topological order visited = set() stack = roots while stack: l = stack.pop() if (l not in visited and all([(p in visited) for p in l.parents])): visited.add(l) stack.extend(l.children) yield l
Python
def Graph_Data_Parser(_lbann_input_, num_nodes, node_feature_size, max_edges, num_classes = 1): """ A parser for graph structured data with node features, source and target node indices (COO) format, and a target Args: _lbann_input_ (Layer): The input layer of the LBANN model num_nodes (int): The maximum number of nodes in the dataset node_features_size (int): The dimensionality of the node features matrix max_edges (int): The maximum number of edges in the dataset num_classes (int): The number of classes in the target or 1 for regression (default : 1) Returns: (dictionary) Returns a dictionary with the keys: node_features, source_indices, target_indices, and targets """ slice_points = [0, num_nodes*node_feature_size, max_edges, max_edges, num_classes] shifted_slice_points = list(accumulate(slice_points)) sliced_input = lbann.Slice(_lbann_input_, slice_points=str_list(shifted_slice_points), name="Sliced_Graph_Input") node_features = lbann.Reshape(lbann.Identity(sliced_input), dims=str_list([num_nodes, node_feature_size]), name="Node_Feature_Matrix") source_indices = lbann.Identity(sliced_input) target_indices = lbann.Identity(sliced_input) targets = lbann.Identity(sliced_input) graph_data = {"node_features":node_features, "source_indices":source_indices, "target_indices":target_indices, "target":targets} return graph_data
def Graph_Data_Parser(_lbann_input_, num_nodes, node_feature_size, max_edges, num_classes = 1): """ A parser for graph structured data with node features, source and target node indices (COO) format, and a target Args: _lbann_input_ (Layer): The input layer of the LBANN model num_nodes (int): The maximum number of nodes in the dataset node_features_size (int): The dimensionality of the node features matrix max_edges (int): The maximum number of edges in the dataset num_classes (int): The number of classes in the target or 1 for regression (default : 1) Returns: (dictionary) Returns a dictionary with the keys: node_features, source_indices, target_indices, and targets """ slice_points = [0, num_nodes*node_feature_size, max_edges, max_edges, num_classes] shifted_slice_points = list(accumulate(slice_points)) sliced_input = lbann.Slice(_lbann_input_, slice_points=str_list(shifted_slice_points), name="Sliced_Graph_Input") node_features = lbann.Reshape(lbann.Identity(sliced_input), dims=str_list([num_nodes, node_feature_size]), name="Node_Feature_Matrix") source_indices = lbann.Identity(sliced_input) target_indices = lbann.Identity(sliced_input) targets = lbann.Identity(sliced_input) graph_data = {"node_features":node_features, "source_indices":source_indices, "target_indices":target_indices, "target":targets} return graph_data
Python
def GINConvLayer(node_features, source_indices, target_indices, num_nodes, num_edges, input_channels, output_channels): """An example GIN kernel with 4 layer deep sequential nn. Args: node_feature (Layer): Node feature matrix with the shape of (num_nodes,input_channels) source_indices (Layer): Source node indices of the edges with shape (num_nodes) target_indices (Layer): Target node indices of the edges with shape (num_nodes) num_nodes (int): Number of vertices in the graph input_channels (int): The size of the input node features output_channels (int): The number of output channels of the node features Returns: (GraphVertexData): Returns the new embedding of the node features """ FC = lbann.modules.ChannelwiseFullyConnectedModule sequential_nn = \ [FC(128), lbann.Relu, FC(64), lbann.Relu, FC(32), lbann.Relu, FC(output_channels), lbann.Relu] gin = GINConv(sequential_nn, input_channels = input_channels, output_channels = output_channels, num_nodes = num_nodes, num_edges = num_edges) return gin(node_features, source_indices, target_indices)
def GINConvLayer(node_features, source_indices, target_indices, num_nodes, num_edges, input_channels, output_channels): """An example GIN kernel with 4 layer deep sequential nn. Args: node_feature (Layer): Node feature matrix with the shape of (num_nodes,input_channels) source_indices (Layer): Source node indices of the edges with shape (num_nodes) target_indices (Layer): Target node indices of the edges with shape (num_nodes) num_nodes (int): Number of vertices in the graph input_channels (int): The size of the input node features output_channels (int): The number of output channels of the node features Returns: (GraphVertexData): Returns the new embedding of the node features """ FC = lbann.modules.ChannelwiseFullyConnectedModule sequential_nn = \ [FC(128), lbann.Relu, FC(64), lbann.Relu, FC(32), lbann.Relu, FC(output_channels), lbann.Relu] gin = GINConv(sequential_nn, input_channels = input_channels, output_channels = output_channels, num_nodes = num_nodes, num_edges = num_edges) return gin(node_features, source_indices, target_indices)
Python
def GCNConvLayer(node_features, source_indices, target_indices, num_nodes, num_edges, input_channels, output_channels): """An example 2-layer GCN kernel. Args: node_feature (Layer): Node feature matrix with the shape of (num_nodes,input_channels) source_indices (Layer): Source node indices of the edges with shape (num_nodes) target_indices (Layer): Target node indices of the edges with shape (num_nodes) num_nodes (int): Number of vertices in the graph input_channels (int): The size of the input node features output_channels (int): The number of output channels of the node features Returns: (Layer) : The resultant node features after message passing kernel ops """ input_channels_1 = input_channels out_channels_1 = 8 input_channels_2 = out_channels_1 out_channels_2 = output_channels gcn_1 = GCNConv(input_channels_1,out_channels_1, num_nodes, bias = True, activation = lbann.Relu, name = 'GCN_1') gcn_2 = GCNConv(input_channels_2,out_channels_2, num_nodes, bias = True, activation = lbann.Relu, name = 'GCN_2') X = gcn_1(node_features,source_indices, target_indices) return gcn_2(X,source_indices, target_indices)
def GCNConvLayer(node_features, source_indices, target_indices, num_nodes, num_edges, input_channels, output_channels): """An example 2-layer GCN kernel. Args: node_feature (Layer): Node feature matrix with the shape of (num_nodes,input_channels) source_indices (Layer): Source node indices of the edges with shape (num_nodes) target_indices (Layer): Target node indices of the edges with shape (num_nodes) num_nodes (int): Number of vertices in the graph input_channels (int): The size of the input node features output_channels (int): The number of output channels of the node features Returns: (Layer) : The resultant node features after message passing kernel ops """ input_channels_1 = input_channels out_channels_1 = 8 input_channels_2 = out_channels_1 out_channels_2 = output_channels gcn_1 = GCNConv(input_channels_1,out_channels_1, num_nodes, bias = True, activation = lbann.Relu, name = 'GCN_1') gcn_2 = GCNConv(input_channels_2,out_channels_2, num_nodes, bias = True, activation = lbann.Relu, name = 'GCN_2') X = gcn_1(node_features,source_indices, target_indices) return gcn_2(X,source_indices, target_indices)
Python
def GraphConvLayer(node_features, source_indices, target_indices, num_nodes, num_edges, input_channels, output_channels): """An example 2-layer Graph kernel. Args: node_feature (Layer): Node feature matrix with the shape of (num_nodes,input_channels) source_indices (Layer): Source node indices of the edges with shape (num_nodes) target_indices (Layer): Target node indices of the edges with shape (num_nodes) num_nodes (int): Number of vertices in the graph input_channels (int): The size of the input node features output_channels (int): The number of output channels of the node features Returns: (Layer) : The resultant node features after message passing kernel ops """ input_channels_1 = input_channels out_channels_1 = 8 input_channels_2 = out_channels_1 out_channels_2 = output_channels graph_1 = GraphConv(input_channels_1, out_channels_1, num_nodes, bias = True, activation = lbann.Relu, name = 'Graph_kernel_1') graph_2 = GraphConv(input_channels_2, out_channels_2, num_nodes, bias = True, activation = lbann.Relu, name = 'Graph_Kernel_2') X = graph_1(node_features,source_indices, target_indices) return graph_2(X,source_indices, target_indices)
def GraphConvLayer(node_features, source_indices, target_indices, num_nodes, num_edges, input_channels, output_channels): """An example 2-layer Graph kernel. Args: node_feature (Layer): Node feature matrix with the shape of (num_nodes,input_channels) source_indices (Layer): Source node indices of the edges with shape (num_nodes) target_indices (Layer): Target node indices of the edges with shape (num_nodes) num_nodes (int): Number of vertices in the graph input_channels (int): The size of the input node features output_channels (int): The number of output channels of the node features Returns: (Layer) : The resultant node features after message passing kernel ops """ input_channels_1 = input_channels out_channels_1 = 8 input_channels_2 = out_channels_1 out_channels_2 = output_channels graph_1 = GraphConv(input_channels_1, out_channels_1, num_nodes, bias = True, activation = lbann.Relu, name = 'Graph_kernel_1') graph_2 = GraphConv(input_channels_2, out_channels_2, num_nodes, bias = True, activation = lbann.Relu, name = 'Graph_Kernel_2') X = graph_1(node_features,source_indices, target_indices) return graph_2(X,source_indices, target_indices)
Python
def GATConvLayer(node_features, source_indices, target_indices, num_nodes, num_edges, input_channels, output_channels): """An example single layer GatedGraph kernel. Args: node_feature (Layer): Node feature matrix with the shape of (num_nodes,input_channels) source_indices (Layer): Source node indices of the edges with shape (num_nodes) target_indices (Layer): Target node indices of the edges with shape (num_nodes) num_nodes (int): Number of vertices in the graph input_channels (int): The size of the input node features output_channels (int): The number of output channels of the node features Returns: (Layer) : The resultant node features after message passing kernel ops """ num_layers = 3 name = 'GatedGraph' data_layout = 'data_parallel' graph_kernel = GatedGraphConv(input_channels, output_channels, num_nodes, num_layers = num_layers, name = name) return graph_kernel(node_features,source_indices, target_indices)
def GATConvLayer(node_features, source_indices, target_indices, num_nodes, num_edges, input_channels, output_channels): """An example single layer GatedGraph kernel. Args: node_feature (Layer): Node feature matrix with the shape of (num_nodes,input_channels) source_indices (Layer): Source node indices of the edges with shape (num_nodes) target_indices (Layer): Target node indices of the edges with shape (num_nodes) num_nodes (int): Number of vertices in the graph input_channels (int): The size of the input node features output_channels (int): The number of output channels of the node features Returns: (Layer) : The resultant node features after message passing kernel ops """ num_layers = 3 name = 'GatedGraph' data_layout = 'data_parallel' graph_kernel = GatedGraphConv(input_channels, output_channels, num_nodes, num_layers = num_layers, name = name) return graph_kernel(node_features,source_indices, target_indices)
Python
def make_model(num_vertices = None, node_features = None, num_classes = None, kernel_type = 'GCN', callbacks = None, num_epochs = 1): '''Construct a model DAG using one of the Graph Kernels Args: num_vertices (int): Number of vertices of each graph (default: None) node_features (int): Number of features per noded (default: None) num_classes (int): Number of classes as targets (default: None) kernel_type (str): Graph Kernel to use in model. Expected one of GCN, GIN, Graph, or GatedGraph (deafult: GCN) callbacks (list): Callbacks for the model. If set to None the model description, GPU usage, training_output, and timer is reported. (default: None) num_epochs (int): Number of epochs to run (default: 1) Returns: (lbann.Model) : A model object with the supplied callbacks, dataset presets, and graph kernels. ''' num_vertices = 100 num_classes = 2 node_feature_size = 3 max_edges = 415 #---------------------------------- # Reshape and Slice Input Tensor #---------------------------------- input_ = lbann.Input(data_field='samples') # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes ) data = Graph_Data_Parser(input_, num_vertices, node_feature_size, max_edges, num_classes) feature_matrix = data['node_features'] source_indices = data['source_indices'] target_indices = data['target_indices'] target = data['target'] #---------------------------------- # Select Graph Convolution #---------------------------------- output_channels = 16 graph_kernel_op = None if kernel_type == 'GIN': graph_kernel_op = GINConvLayer elif kernel_type == 'GCN': graph_kernel_op = GCNConvLayer elif kernel_type == 'Graph': graph_kernel_op = GraphConvLayer elif kernel_type == 'GatedGraph': graph_kernel_op = GATConvLayer else: raise ValueError('Invalid Graph kernel specifier "{}" recieved. Expected one of:\ GIN,GCN,Graph or GatedGraph'.format(kernel_type)) #---------------------------------- # Perform Graph Convolution #---------------------------------- x = graph_kernel_op(feature_matrix, source_indices, target_indices, num_vertices, max_edges, node_feature_size, output_channels) #---------------------------------- # Apply Reduction on Node Features #---------------------------------- average_vector = lbann.Constant(value = 1/num_vertices, num_neurons = str_list([1,num_vertices]), name="Average_Vector") x = lbann.MatMul(average_vector,x, name="Node_Feature_Reduction") # X is now a vector with output_channel dimensions x = lbann.Reshape(x, dims = str_list([output_channels]), name = "Squeeze") x = lbann.FullyConnected(x, num_neurons = 64, name = "hidden_layer_1") x = lbann.Relu(x, name = "hidden_layer_1_activation") x = lbann.FullyConnected(x, num_neurons = num_classes, name="Output_Fully_Connected") #---------------------------------- # Loss Function and Accuracy s #---------------------------------- probs = lbann.Softmax(x, name="Softmax") loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss") accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy") layers = lbann.traverse_layer_graph(input_) if callbacks is None: print_model = lbann.CallbackPrintModelDescription() #Prints initial Model after Setup training_output = lbann.CallbackPrint( interval = 1, print_global_stat_only = False) #Prints training progress gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [print_model, training_output, gpu_usage, timer] else: if isinstance (callbacks, list): callbacks = callbacks metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")] model = lbann.Model(num_epochs, layers = layers, objective_function = loss, metrics = metrics, callbacks = callbacks ) return model
def make_model(num_vertices = None, node_features = None, num_classes = None, kernel_type = 'GCN', callbacks = None, num_epochs = 1): '''Construct a model DAG using one of the Graph Kernels Args: num_vertices (int): Number of vertices of each graph (default: None) node_features (int): Number of features per noded (default: None) num_classes (int): Number of classes as targets (default: None) kernel_type (str): Graph Kernel to use in model. Expected one of GCN, GIN, Graph, or GatedGraph (deafult: GCN) callbacks (list): Callbacks for the model. If set to None the model description, GPU usage, training_output, and timer is reported. (default: None) num_epochs (int): Number of epochs to run (default: 1) Returns: (lbann.Model) : A model object with the supplied callbacks, dataset presets, and graph kernels. ''' num_vertices = 100 num_classes = 2 node_feature_size = 3 max_edges = 415 #---------------------------------- # Reshape and Slice Input Tensor #---------------------------------- input_ = lbann.Input(data_field='samples') # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes ) data = Graph_Data_Parser(input_, num_vertices, node_feature_size, max_edges, num_classes) feature_matrix = data['node_features'] source_indices = data['source_indices'] target_indices = data['target_indices'] target = data['target'] #---------------------------------- # Select Graph Convolution #---------------------------------- output_channels = 16 graph_kernel_op = None if kernel_type == 'GIN': graph_kernel_op = GINConvLayer elif kernel_type == 'GCN': graph_kernel_op = GCNConvLayer elif kernel_type == 'Graph': graph_kernel_op = GraphConvLayer elif kernel_type == 'GatedGraph': graph_kernel_op = GATConvLayer else: raise ValueError('Invalid Graph kernel specifier "{}" recieved. Expected one of:\ GIN,GCN,Graph or GatedGraph'.format(kernel_type)) #---------------------------------- # Perform Graph Convolution #---------------------------------- x = graph_kernel_op(feature_matrix, source_indices, target_indices, num_vertices, max_edges, node_feature_size, output_channels) #---------------------------------- # Apply Reduction on Node Features #---------------------------------- average_vector = lbann.Constant(value = 1/num_vertices, num_neurons = str_list([1,num_vertices]), name="Average_Vector") x = lbann.MatMul(average_vector,x, name="Node_Feature_Reduction") # X is now a vector with output_channel dimensions x = lbann.Reshape(x, dims = str_list([output_channels]), name = "Squeeze") x = lbann.FullyConnected(x, num_neurons = 64, name = "hidden_layer_1") x = lbann.Relu(x, name = "hidden_layer_1_activation") x = lbann.FullyConnected(x, num_neurons = num_classes, name="Output_Fully_Connected") #---------------------------------- # Loss Function and Accuracy s #---------------------------------- probs = lbann.Softmax(x, name="Softmax") loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss") accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy") layers = lbann.traverse_layer_graph(input_) if callbacks is None: print_model = lbann.CallbackPrintModelDescription() #Prints initial Model after Setup training_output = lbann.CallbackPrint( interval = 1, print_global_stat_only = False) #Prints training progress gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [print_model, training_output, gpu_usage, timer] else: if isinstance (callbacks, list): callbacks = callbacks metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")] model = lbann.Model(num_epochs, layers = layers, objective_function = loss, metrics = metrics, callbacks = callbacks ) return model
Python
def addState(self, state, stateName): """ Adds a state to the state machine object """ self.States[stateName] = state
def addState(self, state, stateName): """ Adds a state to the state machine object """ self.States[stateName] = state
Python
def num_node(self, node_type: EntityType) -> int: """ Count number of node of a type Args: node_type: The type of the node Returns: The count """ return sum( map(lambda node: 1 if node.type == node_type else 0, self._nodes))
def num_node(self, node_type: EntityType) -> int: """ Count number of node of a type Args: node_type: The type of the node Returns: The count """ return sum( map(lambda node: 1 if node.type == node_type else 0, self._nodes))
Python
def check_node_exist(self, url: Url) -> bool: """ Check whether a node is already there Args: url: The url of the node Returns: True or false """ return url in self._url_to_node
def check_node_exist(self, url: Url) -> bool: """ Check whether a node is already there Args: url: The url of the node Returns: True or false """ return url in self._url_to_node
Python
def add_node(self, node: NodeBase, set_id: bool = True) -> bool: """ Add a node to the graph. True if successful, false otherwise. """ url = node.url if self.check_node_exist(url): return False if set_id: node.node_id = len(self._nodes) self._nodes.append(node) self._url_to_node[url] = node return True
def add_node(self, node: NodeBase, set_id: bool = True) -> bool: """ Add a node to the graph. True if successful, false otherwise. """ url = node.url if self.check_node_exist(url): return False if set_id: node.node_id = len(self._nodes) self._nodes.append(node) self._url_to_node[url] = node return True
Python
def parse_staring(self, infobox: Dict[str, Tag]) -> List[Url]: """ Parse the list of staring actors Args: infobox: The infobox Returns: A list of actors """ urls = [] if 'Starring' in infobox: for link in infobox['Starring'].find_all( 'a', href=re.compile('/wiki/')): urls.append(link.attrs['href']) else: logger.log(logging.WARN, 'Starring not found for %s' % self.url) return urls
def parse_staring(self, infobox: Dict[str, Tag]) -> List[Url]: """ Parse the list of staring actors Args: infobox: The infobox Returns: A list of actors """ urls = [] if 'Starring' in infobox: for link in infobox['Starring'].find_all( 'a', href=re.compile('/wiki/')): urls.append(link.attrs['href']) else: logger.log(logging.WARN, 'Starring not found for %s' % self.url) return urls
Python
def parse_cast(self, html: Tag) -> List[Url]: """ Parse the list of casting actors Args: html: The page Returns: A list of actors """ urls: List[Url] = [] cast_h2 = html.find_all( lambda tag: tag.name == 'h2' and tag.find_all('span', id='Cast')) if len(cast_h2) != 1: logger.log(logging.WARN, '%d cast section found for %s' % ( len(cast_h2), self.url)) return urls cast_list = None for sibling in cast_h2[0].next_siblings: if hasattr(sibling, 'name') and sibling.name == 'ul': cast_list = sibling break elif (hasattr(sibling, 'name') and sibling.name == 'div' and sibling.ul is not None): cast_list = sibling.ul break if cast_list: for li in cast_list.find_all('li'): link = li.find('a', href=re.compile('/wiki/')) if link: urls.append(link.attrs['href']) else: logger.log(logging.WARN, 'No cast list for %s' % self.url) return urls
def parse_cast(self, html: Tag) -> List[Url]: """ Parse the list of casting actors Args: html: The page Returns: A list of actors """ urls: List[Url] = [] cast_h2 = html.find_all( lambda tag: tag.name == 'h2' and tag.find_all('span', id='Cast')) if len(cast_h2) != 1: logger.log(logging.WARN, '%d cast section found for %s' % ( len(cast_h2), self.url)) return urls cast_list = None for sibling in cast_h2[0].next_siblings: if hasattr(sibling, 'name') and sibling.name == 'ul': cast_list = sibling break elif (hasattr(sibling, 'name') and sibling.name == 'div' and sibling.ul is not None): cast_list = sibling.ul break if cast_list: for li in cast_list.find_all('li'): link = li.find('a', href=re.compile('/wiki/')) if link: urls.append(link.attrs['href']) else: logger.log(logging.WARN, 'No cast list for %s' % self.url) return urls
Python
def grossing(self) -> float: """ Get the total grossing for the actor Returns: the grossing """ total = 0.0 for edge in self.get_edges(): for node in edge.ends: if node != self and isinstance(node, Movie): total += node.total_grossing * edge.weight return total
def grossing(self) -> float: """ Get the total grossing for the actor Returns: the grossing """ total = 0.0 for edge in self.get_edges(): for node in edge.ends: if node != self and isinstance(node, Movie): total += node.total_grossing * edge.weight return total
Python
def parse_page_type_get_infobox( html: Tag) -> Tuple[PageType, Optional[Dict[str, Tag]]]: """ Find the type of page (MOVIE vs ACTOR vs OTHER) Args: html: The page Returns: The type of the page """ infoboxes = html.find_all('table', class_='infobox') if len(infoboxes) == 1: infobox_dict = parse_infobox(infoboxes[0]) # Check if movie image_caption = infobox_dict.get('_image_caption', '') if 'theatrical release poster' in image_caption.lower(): return PageType.MOVIE, infobox_dict # Check if actor if 'Occupation' in infobox_dict: occupation = infobox_dict['Occupation'] if occupation(text=re.compile('(Actor|actor|Actress|actress)')): return PageType.ACTOR, infobox_dict return PageType.OTHER, None
def parse_page_type_get_infobox( html: Tag) -> Tuple[PageType, Optional[Dict[str, Tag]]]: """ Find the type of page (MOVIE vs ACTOR vs OTHER) Args: html: The page Returns: The type of the page """ infoboxes = html.find_all('table', class_='infobox') if len(infoboxes) == 1: infobox_dict = parse_infobox(infoboxes[0]) # Check if movie image_caption = infobox_dict.get('_image_caption', '') if 'theatrical release poster' in image_caption.lower(): return PageType.MOVIE, infobox_dict # Check if actor if 'Occupation' in infobox_dict: occupation = infobox_dict['Occupation'] if occupation(text=re.compile('(Actor|actor|Actress|actress)')): return PageType.ACTOR, infobox_dict return PageType.OTHER, None
Python
def prerelease_local_scheme(version): """Return local scheme version unless building on master in CircleCI. This function returns the local scheme version number (e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a pre-release in which case it ignores the hash and produces a PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>). """ from setuptools_scm.version import get_local_node_and_date if 'CIRCLE_BRANCH' in os.environ and \ os.environ['CIRCLE_BRANCH'] == 'master': return '' else: return get_local_node_and_date(version)
def prerelease_local_scheme(version): """Return local scheme version unless building on master in CircleCI. This function returns the local scheme version number (e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a pre-release in which case it ignores the hash and produces a PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>). """ from setuptools_scm.version import get_local_node_and_date if 'CIRCLE_BRANCH' in os.environ and \ os.environ['CIRCLE_BRANCH'] == 'master': return '' else: return get_local_node_and_date(version)
Python
def validTransitions(event): """Allow our custom job transitions.""" states = None if event.info['job']['handler'] == 'worker_handler': states = CustomJobStatus.validTransitionsWorker(event.info['status']) elif event.info['job']['handler'] == 'celery_handler': states = CustomJobStatus.validTransitionsCelery(event.info['status']) if states is not None: event.preventDefault().addResponse(states)
def validTransitions(event): """Allow our custom job transitions.""" states = None if event.info['job']['handler'] == 'worker_handler': states = CustomJobStatus.validTransitionsWorker(event.info['status']) elif event.info['job']['handler'] == 'celery_handler': states = CustomJobStatus.validTransitionsCelery(event.info['status']) if states is not None: event.preventDefault().addResponse(states)
Python
def schedule(event): """ This is bound to the "jobs.schedule" event, and will be triggered any time a job is scheduled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] == 'worker_handler': task = job.get('celeryTaskName', 'girder_worker.run') # Set the job status to queued Job().updateJob(job, status=JobStatus.QUEUED) # Send the task to celery asyncResult = getCeleryApp().send_task( task, job['args'], job['kwargs'], queue=job.get('celeryQueue'), headers={ 'jobInfoSpec': jobInfoSpec(job, job.get('token', None)), 'apiUrl': getWorkerApiUrl() }) # Record the task ID from celery. Job().updateJob(job, otherFields={ 'celeryTaskId': asyncResult.task_id }) # Stop event propagation since we have taken care of scheduling. event.stopPropagation()
def schedule(event): """ This is bound to the "jobs.schedule" event, and will be triggered any time a job is scheduled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] == 'worker_handler': task = job.get('celeryTaskName', 'girder_worker.run') # Set the job status to queued Job().updateJob(job, status=JobStatus.QUEUED) # Send the task to celery asyncResult = getCeleryApp().send_task( task, job['args'], job['kwargs'], queue=job.get('celeryQueue'), headers={ 'jobInfoSpec': jobInfoSpec(job, job.get('token', None)), 'apiUrl': getWorkerApiUrl() }) # Record the task ID from celery. Job().updateJob(job, otherFields={ 'celeryTaskId': asyncResult.task_id }) # Stop event propagation since we have taken care of scheduling. event.stopPropagation()
Python
def cancel(event): """ This is bound to the "jobs.cancel" event, and will be triggered any time a job is canceled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] in ['worker_handler', 'celery_handler']: # Stop event propagation and prevent default, we are using a custom state event.stopPropagation().preventDefault() celeryTaskId = job.get('celeryTaskId') if celeryTaskId is None: msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id." % job['_id']) logger.warn(msg) return should_revoke = False if job['status'] == JobStatus.INACTIVE: # Move inactive jobs directly to canceled state Job().updateJob(job, status=JobStatus.CANCELED) should_revoke = True elif job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED, JobStatus.SUCCESS, JobStatus.ERROR]: # Give active jobs a chance to be canceled by their runner Job().updateJob(job, status=CustomJobStatus.CANCELING) should_revoke = True if should_revoke: # Send the revoke request. asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp()) asyncResult.revoke()
def cancel(event): """ This is bound to the "jobs.cancel" event, and will be triggered any time a job is canceled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] in ['worker_handler', 'celery_handler']: # Stop event propagation and prevent default, we are using a custom state event.stopPropagation().preventDefault() celeryTaskId = job.get('celeryTaskId') if celeryTaskId is None: msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id." % job['_id']) logger.warn(msg) return should_revoke = False if job['status'] == JobStatus.INACTIVE: # Move inactive jobs directly to canceled state Job().updateJob(job, status=JobStatus.CANCELED) should_revoke = True elif job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED, JobStatus.SUCCESS, JobStatus.ERROR]: # Give active jobs a chance to be canceled by their runner Job().updateJob(job, status=CustomJobStatus.CANCELING) should_revoke = True if should_revoke: # Send the revoke request. asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp()) asyncResult.revoke()
Python
def attachParentJob(event): """Attach parentJob before a model is saved.""" job = event.info if job.get('celeryParentTaskId'): celeryParentTaskId = job['celeryParentTaskId'] parentJob = Job().findOne({'celeryTaskId': celeryParentTaskId}) event.info['parentId'] = parentJob['_id']
def attachParentJob(event): """Attach parentJob before a model is saved.""" job = event.info if job.get('celeryParentTaskId'): celeryParentTaskId = job['celeryParentTaskId'] parentJob = Job().findOne({'celeryTaskId': celeryParentTaskId}) event.info['parentId'] = parentJob['_id']
Python
def attachJobInfoSpec(event): """Attach jobInfoSpec after a model is saved.""" job = event.info # Local jobs have a module key if not job.get('module'): Job().updateJob(job, otherFields={'jobInfoSpec': jobInfoSpec(job)})
def attachJobInfoSpec(event): """Attach jobInfoSpec after a model is saved.""" job = event.info # Local jobs have a module key if not job.get('module'): Job().updateJob(job, otherFields={'jobInfoSpec': jobInfoSpec(job)})
Python
def canceled(self): """ A property to indicate if a task has been canceled. :return: True is this task has been canceled, False otherwise. :rtype: bool """ return is_revoked(self)
def canceled(self): """ A property to indicate if a task has been canceled. :return: True is this task has been canceled, False otherwise. :rtype: bool """ return is_revoked(self)
Python
def deepset(mapping, path, value): """Define deep entry in dict.""" if ':' not in path: mapping[path] = value else: key, sub = path.split(':', 1) submapping = mapping.setdefault(key, {}) deepset(submapping, sub, value)
def deepset(mapping, path, value): """Define deep entry in dict.""" if ':' not in path: mapping[path] = value else: key, sub = path.split(':', 1) submapping = mapping.setdefault(key, {}) deepset(submapping, sub, value)
Python
def fields(self): """Gather all reference fields in all formats.""" return [ field for spec in self for field in spec.fields ]
def fields(self): """Gather all reference fields in all formats.""" return [ field for spec in self for field in spec.fields ]
Python
def poverty_scale_get_income_limit(household_size:int=1, multiplier:float=1.0, state=None)->Union[int, None]: """ Return the income limit matching the given household size. """ ps_data = get_poverty_scale_data() if not ps_data: return None if state and state.lower() == 'hi': poverty_base = int(ps_data.get("poverty_base_hi")) poverty_increment = int(ps_data.get("poverty_increment_hi")) elif state and state.lower() == 'ak': poverty_base = int(ps_data.get("poverty_base_ak")) poverty_increment = int(ps_data.get("poverty_increment_ak")) else: poverty_base = int(ps_data.get("poverty_base")) poverty_increment = int(ps_data.get("poverty_increment")) additional_income_allowed = household_size * poverty_increment household_income_limit = (poverty_base + additional_income_allowed) * multiplier return int(household_income_limit)
def poverty_scale_get_income_limit(household_size:int=1, multiplier:float=1.0, state=None)->Union[int, None]: """ Return the income limit matching the given household size. """ ps_data = get_poverty_scale_data() if not ps_data: return None if state and state.lower() == 'hi': poverty_base = int(ps_data.get("poverty_base_hi")) poverty_increment = int(ps_data.get("poverty_increment_hi")) elif state and state.lower() == 'ak': poverty_base = int(ps_data.get("poverty_base_ak")) poverty_increment = int(ps_data.get("poverty_increment_ak")) else: poverty_base = int(ps_data.get("poverty_base")) poverty_increment = int(ps_data.get("poverty_increment")) additional_income_allowed = household_size * poverty_increment household_income_limit = (poverty_base + additional_income_allowed) * multiplier return int(household_income_limit)
Python
def poverty_scale_income_qualifies(total_monthly_income:float, household_size:int=1, multiplier:float=1.0, state=None)->Union[bool,None]: """ Given monthly income, household size, and an optional multiplier, return whether an individual is at or below the federal poverty level. Returns None if the poverty level data JSON could not be loaded. """ # Globals: poverty_increment and poverty_base household_income_limit = poverty_scale_get_income_limit(household_size=household_size, multiplier=multiplier, state=state) if not household_income_limit: return None return int((household_income_limit)/12) >= int(total_monthly_income)
def poverty_scale_income_qualifies(total_monthly_income:float, household_size:int=1, multiplier:float=1.0, state=None)->Union[bool,None]: """ Given monthly income, household size, and an optional multiplier, return whether an individual is at or below the federal poverty level. Returns None if the poverty level data JSON could not be loaded. """ # Globals: poverty_increment and poverty_base household_income_limit = poverty_scale_get_income_limit(household_size=household_size, multiplier=multiplier, state=state) if not household_income_limit: return None return int((household_income_limit)/12) >= int(total_monthly_income)
Python
def input_fn_builder(examples, window_size, stride, tokenizer): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] d = tf.data.Dataset.from_generator( functools.partial(convert_examples_to_features, examples=examples, window_size=window_size, stride=stride, tokenizer=tokenizer), dict(unique_ids=tf.int32, input_ids=tf.int32, input_mask=tf.int32, input_type_ids=tf.int32, extract_indices=tf.int32), dict(unique_ids=tf.TensorShape([]), input_ids=tf.TensorShape([window_size]), input_mask=tf.TensorShape([window_size]), input_type_ids=tf.TensorShape([window_size]), extract_indices=tf.TensorShape([window_size]))) d = d.batch(batch_size=batch_size, drop_remainder=False) return d return input_fn
def input_fn_builder(examples, window_size, stride, tokenizer): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] d = tf.data.Dataset.from_generator( functools.partial(convert_examples_to_features, examples=examples, window_size=window_size, stride=stride, tokenizer=tokenizer), dict(unique_ids=tf.int32, input_ids=tf.int32, input_mask=tf.int32, input_type_ids=tf.int32, extract_indices=tf.int32), dict(unique_ids=tf.TensorShape([]), input_ids=tf.TensorShape([window_size]), input_mask=tf.TensorShape([window_size]), input_type_ids=tf.TensorShape([window_size]), extract_indices=tf.TensorShape([window_size]))) d = d.batch(batch_size=batch_size, drop_remainder=False) return d return input_fn
Python
def count_rating(self): """ Returns number of likes minus dislikes """ likes = 0 dislikes = 0 try: likes = self.comment_like.users.count() except Comment.comment_like.RelatedObjectDoesNotExist as identifier: CommentLike.objects.create(comment=self) try: dislikes = self.comment_dislike.users.count() except Comment.comment_dislike.RelatedObjectDoesNotExist as identifier: CommentDislike.objects.create(comment=self) return likes - dislikes
def count_rating(self): """ Returns number of likes minus dislikes """ likes = 0 dislikes = 0 try: likes = self.comment_like.users.count() except Comment.comment_like.RelatedObjectDoesNotExist as identifier: CommentLike.objects.create(comment=self) try: dislikes = self.comment_dislike.users.count() except Comment.comment_dislike.RelatedObjectDoesNotExist as identifier: CommentDislike.objects.create(comment=self) return likes - dislikes
Python
def HandleCoaPacket(self, pkt): """Accounting packet handler. Function that is called when a valid accounting packet has been received. :param pkt: packet to process :type pkt: Packet class instance """ print("Received a coa request %d" % pkt.code) print(" Attributes: ") for attr in pkt.keys(): print(" %s: %s" % (attr, pkt[attr])) reply = self.CreateReplyPacket(pkt) # try ACK or NACK # reply.code = packet.CoANAK if "Framed-IP-Netmask" in pkt.keys(): reply.code = packet.CoANAK else: reply.code = packet.CoAACK self.SendReplyPacket(pkt.fd, reply)
def HandleCoaPacket(self, pkt): """Accounting packet handler. Function that is called when a valid accounting packet has been received. :param pkt: packet to process :type pkt: Packet class instance """ print("Received a coa request %d" % pkt.code) print(" Attributes: ") for attr in pkt.keys(): print(" %s: %s" % (attr, pkt[attr])) reply = self.CreateReplyPacket(pkt) # try ACK or NACK # reply.code = packet.CoANAK if "Framed-IP-Netmask" in pkt.keys(): reply.code = packet.CoANAK else: reply.code = packet.CoAACK self.SendReplyPacket(pkt.fd, reply)
Python
def skewt(p, T, Td, u, v): """ Adapted from the Metpy advanced sounding example (https://unidata.github.io/MetPy/latest/examples/Advanced_Sounding.html#sphx-glr-examples-advanced-sounding-py) """ fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) # Plot the data using normal plotting functions, in this case using # log scaling in Y, as dictated by the typical meteorological plot. skew.plot(p, T, "r") skew.plot(p, Td, "g") # Calculate LCL height and plot as black dot. Because `p`'s first value is # ~1000 mb and its last value is ~250 mb, the `0` index is selected for # `p`, `T`, and `Td` to lift the parcel from the surface. If `p` was inverted, # i.e. start from low value, 250 mb, to a high value, 1000 mb, the `-1` index # should be selected. # lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0]) # skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black') # Calculate full parcel profile and add to plot as black line # prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC') # skew.plot(p, prof, 'k', linewidth=2) # Shade areas of CAPE and CIN # skew.shade_cin(p, T, prof, Td) # skew.shade_cape(p, T, prof) # An example of a slanted line at constant T -- in this case the 0 # isotherm # skew.ax.axvline(0, color='c', linestyle='--', linewidth=2) # Add the relevant special lines skew.plot_dry_adiabats() skew.plot_moist_adiabats() skew.plot_mixing_lines() skew.ax.set_ylim(1050, p.min()) skew.ax.set_xlim(-10, 30) return fig
def skewt(p, T, Td, u, v): """ Adapted from the Metpy advanced sounding example (https://unidata.github.io/MetPy/latest/examples/Advanced_Sounding.html#sphx-glr-examples-advanced-sounding-py) """ fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig) # Plot the data using normal plotting functions, in this case using # log scaling in Y, as dictated by the typical meteorological plot. skew.plot(p, T, "r") skew.plot(p, Td, "g") # Calculate LCL height and plot as black dot. Because `p`'s first value is # ~1000 mb and its last value is ~250 mb, the `0` index is selected for # `p`, `T`, and `Td` to lift the parcel from the surface. If `p` was inverted, # i.e. start from low value, 250 mb, to a high value, 1000 mb, the `-1` index # should be selected. # lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0]) # skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black') # Calculate full parcel profile and add to plot as black line # prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC') # skew.plot(p, prof, 'k', linewidth=2) # Shade areas of CAPE and CIN # skew.shade_cin(p, T, prof, Td) # skew.shade_cape(p, T, prof) # An example of a slanted line at constant T -- in this case the 0 # isotherm # skew.ax.axvline(0, color='c', linestyle='--', linewidth=2) # Add the relevant special lines skew.plot_dry_adiabats() skew.plot_moist_adiabats() skew.plot_mixing_lines() skew.ax.set_ylim(1050, p.min()) skew.ax.set_xlim(-10, 30) return fig
Python
def load_csv(fn, n_header_lines=3): """ Load CCN datafile with filename `fn` and return as xarray.Dataset """ meta = _load_meta(fn, nlines=n_header_lines) df = pd.read_csv(fn, skiprows=n_header_lines) # cleanup column names df.columns = [s.strip().lower().replace(" ", "_") for s in df.columns] # make times into datetimes df["time"] = meta["date"] + "T" + df["time"] + "Z" df["time"] = pd.to_datetime(df["time"], format="%m/%d/%yT%H:%M:%SZ", utc=True) ds = xr.Dataset.from_dataframe(df) ds = ds.swap_dims(dict(index="time")) ds.attrs.update(meta) return ds
def load_csv(fn, n_header_lines=3): """ Load CCN datafile with filename `fn` and return as xarray.Dataset """ meta = _load_meta(fn, nlines=n_header_lines) df = pd.read_csv(fn, skiprows=n_header_lines) # cleanup column names df.columns = [s.strip().lower().replace(" ", "_") for s in df.columns] # make times into datetimes df["time"] = meta["date"] + "T" + df["time"] + "Z" df["time"] = pd.to_datetime(df["time"], format="%m/%d/%yT%H:%M:%SZ", utc=True) ds = xr.Dataset.from_dataframe(df) ds = ds.swap_dims(dict(index="time")) ds.attrs.update(meta) return ds
Python
def load_all(data_path): """ Load all CCN files in `data_path` and combine into single xarray.Dataset """ all_ds = [load_csv(fn=fn) for fn in Path(data_path).glob("*.csv")] if len(all_ds) == 0: raise Exception("No CCN data found in `{}`".format(data_path)) else: return xr.concat(all_ds, dim="time")
def load_all(data_path): """ Load all CCN files in `data_path` and combine into single xarray.Dataset """ all_ds = [load_csv(fn=fn) for fn in Path(data_path).glob("*.csv")] if len(all_ds) == 0: raise Exception("No CCN data found in `{}`".format(data_path)) else: return xr.concat(all_ds, dim="time")
Python
def load_nc(path, time): """Load the netCDF dataset corresponding to the given time This function finds files matching the AERIS :data:`nc_filename` formatted for the given time Args: path (str): The directory containing GOES netCDF files time (datetime.datetime): The time of the file to load Returns: xarray.dataset: The loaded netCDF file with values filtered where the coordinates are NaN Raises: FileNotFoundError: If the netCDF is not present FileExistsError: If multiple matching netCDF files are found with the same name """ filename = nc_filename.format( year=time.year, day=time.timetuple().tm_yday, hour=time.hour, minute=time.minute, something="*", ) # Find matching filenames in the GOES folder file_path = list(pathlib.Path(path).rglob(filename)) if len(file_path) == 0: raise FileNotFoundError("No GOES data found in {} for {}".format(path, time)) elif len(file_path) > 1: raise FileExistsError( "More than one file found in {} for {}".format(path, time) ) dataset = xr.load_dataset(str(file_path[0])) # Remove values where the coordinates are NaNs dataset = dataset.where(~dataset.longitude.isnull(), drop=True) return dataset
def load_nc(path, time): """Load the netCDF dataset corresponding to the given time This function finds files matching the AERIS :data:`nc_filename` formatted for the given time Args: path (str): The directory containing GOES netCDF files time (datetime.datetime): The time of the file to load Returns: xarray.dataset: The loaded netCDF file with values filtered where the coordinates are NaN Raises: FileNotFoundError: If the netCDF is not present FileExistsError: If multiple matching netCDF files are found with the same name """ filename = nc_filename.format( year=time.year, day=time.timetuple().tm_yday, hour=time.hour, minute=time.minute, something="*", ) # Find matching filenames in the GOES folder file_path = list(pathlib.Path(path).rglob(filename)) if len(file_path) == 0: raise FileNotFoundError("No GOES data found in {} for {}".format(path, time)) elif len(file_path) > 1: raise FileExistsError( "More than one file found in {} for {}".format(path, time) ) dataset = xr.load_dataset(str(file_path[0])) # Remove values where the coordinates are NaNs dataset = dataset.where(~dataset.longitude.isnull(), drop=True) return dataset
Python
def colored_line_plot( ax, x, y, color, vmin=None, vmax=None, cmap="gray", cmap_steps=0, **kwargs ): """Add a multicolored line to an existing plot Args: x (np.array): The x points of the plot y (np.array): The y points of the plot color (np.array): The color of the line at the xy points vmin (scalar, optional): The minimum of the colorscale. Defaults to the minimum of the color array. vmax (scalar, optional): The maximum of the colorscale. Defaults to the maximum of the color array. cmap (str, optional): Colormap to plot. Default is grey. cmap_steps (int, optional): Number of discrete steps in the colorscale. Defaults is zero for a continuous colorscale. kwargs: Other keyword arguments to pass to LineCollection returns: matplotlib.collections.LineCollection: The plotted LineCollection. Required as argument to :py:func:`matplotlib.pyplot.colorbar` """ # Set the color scalings if vmin is None: vmin = color.min() if vmax is None: vmax = color.max() # Break the xy points up in to line segments segments = np.array([(x[:-1].values, x[1:].values), (y[:-1].values, y[1:].values)]) segments = np.transpose(segments, axes=(2, 1, 0)) # Create discretised colourmap cmap = plt.get_cmap(cmap) if cmap_steps != 0: cmap = mpl.colors.ListedColormap( [cmap(n / (cmap_steps - 1)) for n in range(cmap_steps)] ) # Collect the line segments lc = LineCollection(segments, cmap=cmap, norm=plt.Normalize(vmin, vmax), **kwargs) # Set the line color to the specified array lc.set_array(color) # Add the colored line to the existing plot ax.add_collection(lc) # autoscale if limits haven't already been set so that the linecollection # is visible if ax.get_xlim() == (0, 1) and ax.get_ylim() == (0, 1): ax.autoscale() return lc
def colored_line_plot( ax, x, y, color, vmin=None, vmax=None, cmap="gray", cmap_steps=0, **kwargs ): """Add a multicolored line to an existing plot Args: x (np.array): The x points of the plot y (np.array): The y points of the plot color (np.array): The color of the line at the xy points vmin (scalar, optional): The minimum of the colorscale. Defaults to the minimum of the color array. vmax (scalar, optional): The maximum of the colorscale. Defaults to the maximum of the color array. cmap (str, optional): Colormap to plot. Default is grey. cmap_steps (int, optional): Number of discrete steps in the colorscale. Defaults is zero for a continuous colorscale. kwargs: Other keyword arguments to pass to LineCollection returns: matplotlib.collections.LineCollection: The plotted LineCollection. Required as argument to :py:func:`matplotlib.pyplot.colorbar` """ # Set the color scalings if vmin is None: vmin = color.min() if vmax is None: vmax = color.max() # Break the xy points up in to line segments segments = np.array([(x[:-1].values, x[1:].values), (y[:-1].values, y[1:].values)]) segments = np.transpose(segments, axes=(2, 1, 0)) # Create discretised colourmap cmap = plt.get_cmap(cmap) if cmap_steps != 0: cmap = mpl.colors.ListedColormap( [cmap(n / (cmap_steps - 1)) for n in range(cmap_steps)] ) # Collect the line segments lc = LineCollection(segments, cmap=cmap, norm=plt.Normalize(vmin, vmax), **kwargs) # Set the line color to the specified array lc.set_array(color) # Add the colored line to the existing plot ax.add_collection(lc) # autoscale if limits haven't already been set so that the linecollection # is visible if ax.get_xlim() == (0, 1) and ax.get_ylim() == (0, 1): ax.autoscale() return lc
Python
def load_segments(filename): """Read a segments yaml file created with twinotter.plots.interactive_flight_track Args: filename (str): Returns: dict: """ with open(filename, "r") as data: segments = yaml.load(data, yaml.CLoader) return segments
def load_segments(filename): """Read a segments yaml file created with twinotter.plots.interactive_flight_track Args: filename (str): Returns: dict: """ with open(filename, "r") as data: segments = yaml.load(data, yaml.CLoader) return segments
Python
def count_segments(segments, segment_type): """Return the number of flight segments of the requested segment_type Args: segments (dict): Flight segments description from load_segments segment_type (str): The label of a segment type Returns: int: """ return len(_matching_segments(segments, segment_type))
def count_segments(segments, segment_type): """Return the number of flight segments of the requested segment_type Args: segments (dict): Flight segments description from load_segments segment_type (str): The label of a segment type Returns: int: """ return len(_matching_segments(segments, segment_type))
Python
def extract_segments(ds, segments, segment_type, segment_idx=None): """Extract a subset of the given dataset with the segments requested Args: ds (xarray.DataSet): Flight dataset segments (dict): Flight segments description from load_segments segment_type (str): The label of a segment type segment_idx (int): The index of the segment within the flight (starts at zero) If the default of None is given then the returned dataset will contain all matching segments concatenated. Returns: xarray.DataSet: """ # All segments of the requested type matching_segments = _matching_segments(segments, segment_type) # If a single index is requested return that index of legs with the requested type if segment_idx is not None: segment = matching_segments[segment_idx] return ds.sel(Time=slice(segment["start"], segment["end"])) # Otherwise merge all legs with the requested type else: ds_matching = [] for segment in matching_segments: ds_matching.append(ds.sel(Time=slice(segment["start"], segment["end"]))) return xr.concat(ds_matching, dim="Time")
def extract_segments(ds, segments, segment_type, segment_idx=None): """Extract a subset of the given dataset with the segments requested Args: ds (xarray.DataSet): Flight dataset segments (dict): Flight segments description from load_segments segment_type (str): The label of a segment type segment_idx (int): The index of the segment within the flight (starts at zero) If the default of None is given then the returned dataset will contain all matching segments concatenated. Returns: xarray.DataSet: """ # All segments of the requested type matching_segments = _matching_segments(segments, segment_type) # If a single index is requested return that index of legs with the requested type if segment_idx is not None: segment = matching_segments[segment_idx] return ds.sel(Time=slice(segment["start"], segment["end"])) # Otherwise merge all legs with the requested type else: ds_matching = [] for segment in matching_segments: ds_matching.append(ds.sel(Time=slice(segment["start"], segment["end"]))) return xr.concat(ds_matching, dim="Time")
Python
def geocolor(ax, ds, projection): """ Follows https://unidata.github.io/python-gallery/examples/mapping_GOES16_TrueColor.html Use origin="lower" for imshow because we are using an interpolated grid of data not the native layout for data used in the link. """ maxval = 120 gamma = 2.2 red = (ds.refl_0_65um_nom / maxval) ** (1 / gamma) green = (ds.refl_0_86um_nom / maxval) ** (1 / gamma) blue = (ds.refl_0_47um_nom / maxval) ** (1 / gamma) true_green = 0.45 * red + 0.1 * green + 0.45 * blue color_array = np.dstack([red, true_green, blue]) x = ds.longitude y = ds.latitude return ax.imshow( color_array, origin="lower", extent=[x.min(), x.max(), y.min(), y.max()], transform=projection, )
def geocolor(ax, ds, projection): """ Follows https://unidata.github.io/python-gallery/examples/mapping_GOES16_TrueColor.html Use origin="lower" for imshow because we are using an interpolated grid of data not the native layout for data used in the link. """ maxval = 120 gamma = 2.2 red = (ds.refl_0_65um_nom / maxval) ** (1 / gamma) green = (ds.refl_0_86um_nom / maxval) ** (1 / gamma) blue = (ds.refl_0_47um_nom / maxval) ** (1 / gamma) true_green = 0.45 * red + 0.1 * green + 0.45 * blue color_array = np.dstack([red, true_green, blue]) x = ds.longitude y = ds.latitude return ax.imshow( color_array, origin="lower", extent=[x.min(), x.max(), y.min(), y.max()], transform=projection, )
Python
def new_child(self, pid, pipe): """Invoked when a new child is spawned. Associates an event manager with this child, maintains the map. Manages the process. If this process is killed, the event_manager is assigned to new process. :param process: Context of new process. :param pipe: Pipe to communicate with this child. """ ev_manager = NfpEventManager( self._conf, self._controller, self._event_sequencer, pipe=pipe, pid=pid) self._resource_map.update(dict({pid: ev_manager})) super(NfpResourceManager, self).new_child(pid, pipe)
def new_child(self, pid, pipe): """Invoked when a new child is spawned. Associates an event manager with this child, maintains the map. Manages the process. If this process is killed, the event_manager is assigned to new process. :param process: Context of new process. :param pipe: Pipe to communicate with this child. """ ev_manager = NfpEventManager( self._conf, self._controller, self._event_sequencer, pipe=pipe, pid=pid) self._resource_map.update(dict({pid: ev_manager})) super(NfpResourceManager, self).new_child(pid, pipe)
Python
def manager_run(self): """Invoked periodically to check on resources. a) Checks if childrens are active or any killed. b) Checks if there are messages from any of workers. c) Dispatches the events ready to be handled to workers. """ self._child_watcher() self._event_watcher()
def manager_run(self): """Invoked periodically to check on resources. a) Checks if childrens are active or any killed. b) Checks if there are messages from any of workers. c) Dispatches the events ready to be handled to workers. """ self._child_watcher() self._event_watcher()
Python
def _event_acked(self, event): """Post handling after event is dispatched to worker. """ if event.lifetime: message = "(event - %s) - dispatched, polling for expiry" % ( event.identify()) LOG.debug(message) self._controller.poll_add( event, event.lifetime, self._event_life_timedout)
def _event_acked(self, event): """Post handling after event is dispatched to worker. """ if event.lifetime: message = "(event - %s) - dispatched, polling for expiry" % ( event.identify()) LOG.debug(message) self._controller.poll_add( event, event.lifetime, self._event_life_timedout)
Python
def _dispatch_event(self, event): """Dispatch event to a worker. """ load_info = self._load_init() event_manager, load_info = self._get_min_loaded_em(load_info) event_manager.dispatch_event(event)
def _dispatch_event(self, event): """Dispatch event to a worker. """ load_info = self._load_init() event_manager, load_info = self._get_min_loaded_em(load_info) event_manager.dispatch_event(event)
Python
def process_events(self, events): """Process the consumed event. Based on the event type, new event will be added to cache, completed event is removed from cache, poll event is added to pollq. """ for event in events: message = "%s - processing event" % (event.identify()) LOG.debug(message) if IS_EVENT_GRAPH(event): self._execute_event_graph(event) elif IS_SCHEDULED_EVENT_GRAPHEVENT(event): self._scheduled_event_graph(event) elif IS_SCHEDULED_EVENT_ACK(event): self._scheduled_event_ack(event) elif IS_SCHEDULED_NEW_EVENT(event): self._scheduled_new_event(event) elif IS_EVENT_COMPLETE(event): self._scheduled_event_complete(event) else: self._non_schedule_event(event)
def process_events(self, events): """Process the consumed event. Based on the event type, new event will be added to cache, completed event is removed from cache, poll event is added to pollq. """ for event in events: message = "%s - processing event" % (event.identify()) LOG.debug(message) if IS_EVENT_GRAPH(event): self._execute_event_graph(event) elif IS_SCHEDULED_EVENT_GRAPHEVENT(event): self._scheduled_event_graph(event) elif IS_SCHEDULED_EVENT_ACK(event): self._scheduled_event_ack(event) elif IS_SCHEDULED_NEW_EVENT(event): self._scheduled_new_event(event) elif IS_EVENT_COMPLETE(event): self._scheduled_event_complete(event) else: self._non_schedule_event(event)
Python
def _event_watcher(self): """Watches for events for each event manager. Invokes each event manager to get events from workers. Also checks parent process event manager. """ events = [] # Get events from sequencer events = self._event_sequencer.run() for pid, event_manager in self._resource_map.iteritems(): events += event_manager.event_watcher(timeout=0.01) # Process the type of events received, dispatch only the # required ones. self.process_events(events)
def _event_watcher(self): """Watches for events for each event manager. Invokes each event manager to get events from workers. Also checks parent process event manager. """ events = [] # Get events from sequencer events = self._event_sequencer.run() for pid, event_manager in self._resource_map.iteritems(): events += event_manager.event_watcher(timeout=0.01) # Process the type of events received, dispatch only the # required ones. self.process_events(events)
Python
def _load_init(self): """Intializes load with current information. """ load_info = [] for pid, event_manager in self._resource_map.iteritems(): load = event_manager.get_load() load_info.append([event_manager, load, pid]) return load_info
def _load_init(self): """Intializes load with current information. """ load_info = [] for pid, event_manager in self._resource_map.iteritems(): load = event_manager.get_load() load_info.append([event_manager, load, pid]) return load_info
Python
def _get_min_loaded_em(self, load_info): """Returns the min loaded event_manager. """ minloaded = min(load_info, key=lambda x: x[1]) load = minloaded[1] + 1 load_info[load_info.index(minloaded)][1] = load return minloaded[0], load_info
def _get_min_loaded_em(self, load_info): """Returns the min loaded event_manager. """ minloaded = min(load_info, key=lambda x: x[1]) load = minloaded[1] + 1 load_info[load_info.index(minloaded)][1] = load return minloaded[0], load_info
Python
def _get_event_manager(self, pid): """Returns event manager of a process. """ if pid == self._distributor_process_id: return self else: return self._resource_map.get(pid)
def _get_event_manager(self, pid): """Returns event manager of a process. """ if pid == self._distributor_process_id: return self else: return self._resource_map.get(pid)
Python
def _event_life_timedout(self, event): """Callback for poller when event expires. """ message = "(event - %s) - expired" % (event.identify()) LOG.debug(message) self._scheduled_event_complete(event, expired=True)
def _event_life_timedout(self, event): """Callback for poller when event expires. """ message = "(event - %s) - expired" % (event.identify()) LOG.debug(message) self._scheduled_event_complete(event, expired=True)
Python
def _event_timedout(self, event): """Callback for poller when event timesout. """ message = "(event - %s) - timedout" % (event.identify()) LOG.debug(message) try: ref_event = self._event_cache[event.desc.poll_desc.ref] evmanager = self._get_event_manager(ref_event.desc.worker) assert evmanager evmanager.dispatch_event( event, event_type=nfp_event.POLL_EVENT, inc_load=False, cache=False) except KeyError as err: err = err message = "(event - %s) - timedout, not in cache" % ( event.identify()) LOG.error(message) except AssertionError as aerr: aerr = aerr # Process associated with event could be killed. # Ignore. pass
def _event_timedout(self, event): """Callback for poller when event timesout. """ message = "(event - %s) - timedout" % (event.identify()) LOG.debug(message) try: ref_event = self._event_cache[event.desc.poll_desc.ref] evmanager = self._get_event_manager(ref_event.desc.worker) assert evmanager evmanager.dispatch_event( event, event_type=nfp_event.POLL_EVENT, inc_load=False, cache=False) except KeyError as err: err = err message = "(event - %s) - timedout, not in cache" % ( event.identify()) LOG.error(message) except AssertionError as aerr: aerr = aerr # Process associated with event could be killed. # Ignore. pass
Python
def _stitch_proxy_ptg_to_l3p(self, context, ptg, l3p, subnet_ids): """Attach the Proxy PTG properly. When a proxy PTG is set, the proxied PTG needs to be detached from the current L3P. The proxied PTG will be attached instead on the proper subnets. This will completely isolate the proxied PTG, therefore the expectation is for a third entity (eg. service chain driver) to create a bridging service across the proxy and the proxied PTG. This will guarantee that all the traffic goes through the proxy PTG before reaching the destination. """ proxied = context._plugin.get_policy_target_group( context._plugin_context, ptg['proxied_group_id']) try: # If the detached PTG is a proxy itself and has a proxy # gateway, then the routes should be removed from the L3P and # added to the current proxy subnet instead. gateway_pt = None if proxied.get('proxied_group_id'): # Verify if a gateway PT exists gateway_pt = context._plugin.get_policy_targets( context._plugin_context.elevated(), {'policy_target_group_id': [proxied['id']], 'proxy_gateway': [True]}) if gateway_pt: self._unset_proxy_gateway_routes(context, gateway_pt[0]) # Detach Proxied PTG for subnet_id in proxied['subnets']: self._remove_router_interface( context._plugin_context, l3p['routers'][0], {'subnet_id': subnet_id}) # Attach Proxy PTG for subnet_id in subnet_ids: self._plug_router_to_subnet( context._plugin_context, subnet_id, l3p['routers'][0]) # Reset the proxy gateway PT routes if gateway_pt: self._set_proxy_gateway_routes(context, gateway_pt[0]) except n_exc.InvalidInput: # This exception is not expected. # TODO(ivar): find a better way to rollback LOG.exception(_LE("adding subnet to router failed")) for subnet_id in subnet_ids: self._delete_subnet(context._plugin_context, subnet_id) raise exc.GroupPolicyInternalError()
def _stitch_proxy_ptg_to_l3p(self, context, ptg, l3p, subnet_ids): """Attach the Proxy PTG properly. When a proxy PTG is set, the proxied PTG needs to be detached from the current L3P. The proxied PTG will be attached instead on the proper subnets. This will completely isolate the proxied PTG, therefore the expectation is for a third entity (eg. service chain driver) to create a bridging service across the proxy and the proxied PTG. This will guarantee that all the traffic goes through the proxy PTG before reaching the destination. """ proxied = context._plugin.get_policy_target_group( context._plugin_context, ptg['proxied_group_id']) try: # If the detached PTG is a proxy itself and has a proxy # gateway, then the routes should be removed from the L3P and # added to the current proxy subnet instead. gateway_pt = None if proxied.get('proxied_group_id'): # Verify if a gateway PT exists gateway_pt = context._plugin.get_policy_targets( context._plugin_context.elevated(), {'policy_target_group_id': [proxied['id']], 'proxy_gateway': [True]}) if gateway_pt: self._unset_proxy_gateway_routes(context, gateway_pt[0]) # Detach Proxied PTG for subnet_id in proxied['subnets']: self._remove_router_interface( context._plugin_context, l3p['routers'][0], {'subnet_id': subnet_id}) # Attach Proxy PTG for subnet_id in subnet_ids: self._plug_router_to_subnet( context._plugin_context, subnet_id, l3p['routers'][0]) # Reset the proxy gateway PT routes if gateway_pt: self._set_proxy_gateway_routes(context, gateway_pt[0]) except n_exc.InvalidInput: # This exception is not expected. # TODO(ivar): find a better way to rollback LOG.exception(_LE("adding subnet to router failed")) for subnet_id in subnet_ids: self._delete_subnet(context._plugin_context, subnet_id) raise exc.GroupPolicyInternalError()
Python
def create_policy_rule_set_postcommit(self, context): """Each Policy Rule Set is mapped to a contract, with a single clause Each included Policy Rule will be mapped to one subject, which includes one rule. The clause has no matcher, but refers to all the subjects """ subjects = [] subject_names = [] for rule_id in context.current['policy_rules']: subject = self._make_odl_subject(context, rule_id) subjects.append(subject) subject_names.append(subject['name']) clauses = [ { "name": context.current['name'], "subject-refs": subject_names } ] contract_id = context.current['id'] contract_desc = context.current['name'] contract = { "id": contract_id, "description": contract_desc, "clause": clauses, "subject": subjects } tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:] self.odl_manager.create_update_contract(tenant_id, contract)
def create_policy_rule_set_postcommit(self, context): """Each Policy Rule Set is mapped to a contract, with a single clause Each included Policy Rule will be mapped to one subject, which includes one rule. The clause has no matcher, but refers to all the subjects """ subjects = [] subject_names = [] for rule_id in context.current['policy_rules']: subject = self._make_odl_subject(context, rule_id) subjects.append(subject) subject_names.append(subject['name']) clauses = [ { "name": context.current['name'], "subject-refs": subject_names } ] contract_id = context.current['id'] contract_desc = context.current['name'] contract = { "id": contract_id, "description": contract_desc, "clause": clauses, "subject": subjects } tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:] self.odl_manager.create_update_contract(tenant_id, contract)
Python
def _get_v2_keystone_admin_client(self): """ Returns keystone v2 client with admin credentials Using this client one can perform CRUD operations over keystone resources. """ keystone_conf = self.config.keystone_authtoken v2client = identity_client.Client( username=keystone_conf.admin_user, password=keystone_conf.admin_password, tenant_name=keystone_conf.admin_tenant_name, tenant_id=None, auth_url=self.identity_service) return v2client
def _get_v2_keystone_admin_client(self): """ Returns keystone v2 client with admin credentials Using this client one can perform CRUD operations over keystone resources. """ keystone_conf = self.config.keystone_authtoken v2client = identity_client.Client( username=keystone_conf.admin_user, password=keystone_conf.admin_password, tenant_name=keystone_conf.admin_tenant_name, tenant_id=None, auth_url=self.identity_service) return v2client
Python
def _get_v3_keystone_admin_client(self): """ Returns keystone v3 client with admin credentials Using this client one can perform CRUD operations over keystone resources. """ keystone_conf = self.config.keystone_authtoken v3_auth_url = ('%s://%s:%s/%s/' % ( keystone_conf.auth_protocol, keystone_conf.auth_host, keystone_conf.auth_port, self.config.heat_driver.keystone_version)) v3client = keyclientv3.Client( username=keystone_conf.admin_user, password=keystone_conf.admin_password, domain_name="default", auth_url=v3_auth_url) return v3client
def _get_v3_keystone_admin_client(self): """ Returns keystone v3 client with admin credentials Using this client one can perform CRUD operations over keystone resources. """ keystone_conf = self.config.keystone_authtoken v3_auth_url = ('%s://%s:%s/%s/' % ( keystone_conf.auth_protocol, keystone_conf.auth_host, keystone_conf.auth_port, self.config.heat_driver.keystone_version)) v3client = keyclientv3.Client( username=keystone_conf.admin_user, password=keystone_conf.admin_password, domain_name="default", auth_url=v3_auth_url) return v3client
Python
def attach_interface(self, token, tenant_id, instance_id, port_id): """ Attaches a port to already created instance :param token: A scoped token :param tenant_id: Tenant UUID :param instance_id: UUID of the instance :param port_id: Port UUID """ try: nova = nova_client.Client(self.nova_version, auth_token=token, tenant_id=tenant_id, auth_url=self.identity_service) instance = nova.servers.interface_attach(instance_id, port_id, None, None) return instance except Exception as ex: err = ("Failed to attach interface %s to instance" " %s %s" % (port_id, instance_id, ex)) LOG.error(err) raise Exception(err)
def attach_interface(self, token, tenant_id, instance_id, port_id): """ Attaches a port to already created instance :param token: A scoped token :param tenant_id: Tenant UUID :param instance_id: UUID of the instance :param port_id: Port UUID """ try: nova = nova_client.Client(self.nova_version, auth_token=token, tenant_id=tenant_id, auth_url=self.identity_service) instance = nova.servers.interface_attach(instance_id, port_id, None, None) return instance except Exception as ex: err = ("Failed to attach interface %s to instance" " %s %s" % (port_id, instance_id, ex)) LOG.error(err) raise Exception(err)
Python
def detach_interface(self, token, tenant_id, instance_id, port_id): """ Detaches a port to already created instance :param token: A scoped token :param tenant_id: Tenant UUID :param instance_id: UUID of the instance :param port_id: Port UUID """ try: nova = nova_client.Client(self.nova_version, auth_token=token, tenant_id=tenant_id, auth_url=self.identity_service) instance = nova.servers.interface_detach(instance_id, port_id) return instance except Exception as ex: err = ("Failed to detach interface %s from instance" " %s %s" % (port_id, instance_id, ex)) LOG.error(err) raise Exception(err)
def detach_interface(self, token, tenant_id, instance_id, port_id): """ Detaches a port to already created instance :param token: A scoped token :param tenant_id: Tenant UUID :param instance_id: UUID of the instance :param port_id: Port UUID """ try: nova = nova_client.Client(self.nova_version, auth_token=token, tenant_id=tenant_id, auth_url=self.identity_service) instance = nova.servers.interface_detach(instance_id, port_id) return instance except Exception as ex: err = ("Failed to detach interface %s from instance" " %s %s" % (port_id, instance_id, ex)) LOG.error(err) raise Exception(err)
Python
def create_instance(self, token, tenant_id, image_id, flavor, nw_port_id_list, name, secgroup_name=None, metadata=None, files=None, config_drive=False, userdata=None, key_name='', different_hosts=None, volume_support=False, volume_size="2"): """ Launch a VM with given details :param token: A scoped token :param tenant_id: Tenant UUID :param image_id: Image UUID :param flavor: Flavor name :param nw_port_id_list: Network UUID and port UUID list :param name: Service istance name :param secgroup_name: Nova security group name :param metadata: metadata key-value pairs :param files: List of files to be copied. :example files: [{"dst": <detination_path_string>, "src": <file_contents>}] :param userdata: user data file name :param key_name: Nova keypair name :param different_hosts: Different host filter (List) :param volume_support: volume support to launch instance :param volume_size: cinder volume size in GB :return: VM instance UUID """ kwargs = dict() if volume_support: block_device_mapping_v2 = [ { "boot_index": "1", "uuid": image_id, "source_type": "image", "volume_size": volume_size, "destination_type": "volume", "delete_on_termination": True } ] kwargs.update(block_device_mapping_v2=block_device_mapping_v2) if different_hosts: kwargs.update(scheduler_hints={"different_host": different_hosts}) if key_name != '': kwargs.update(key_name=key_name) if config_drive is True: kwargs.update(config_drive=True) if userdata is not None and type(userdata) is str: kwargs.update(userdata=userdata) if metadata is not None and type(metadata) is dict and metadata != {}: kwargs.update(meta=metadata) if files is not None and type(files) is list and files != []: kwargs.update(files=files) if nw_port_id_list: nics = [{"port-id": entry.get("port"), "net-id": entry.get("uuid"), "v4-fixed-ip": entry.get("fixed_ip")} for entry in nw_port_id_list] kwargs.update(nics=nics) if secgroup_name: kwargs.update(security_groups=[secgroup_name]) try: nova = nova_client.Client(self.nova_version, auth_token=token, tenant_id=tenant_id, auth_url=self.identity_service) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name, nova.images.get(image_id), flavor, **kwargs) data = instance.to_dict() return data['id'] except Exception as ex: err = ("Failed to create instance under tenant" " %s %s" % (tenant_id, ex)) LOG.error(err) raise Exception(err)
def create_instance(self, token, tenant_id, image_id, flavor, nw_port_id_list, name, secgroup_name=None, metadata=None, files=None, config_drive=False, userdata=None, key_name='', different_hosts=None, volume_support=False, volume_size="2"): """ Launch a VM with given details :param token: A scoped token :param tenant_id: Tenant UUID :param image_id: Image UUID :param flavor: Flavor name :param nw_port_id_list: Network UUID and port UUID list :param name: Service istance name :param secgroup_name: Nova security group name :param metadata: metadata key-value pairs :param files: List of files to be copied. :example files: [{"dst": <detination_path_string>, "src": <file_contents>}] :param userdata: user data file name :param key_name: Nova keypair name :param different_hosts: Different host filter (List) :param volume_support: volume support to launch instance :param volume_size: cinder volume size in GB :return: VM instance UUID """ kwargs = dict() if volume_support: block_device_mapping_v2 = [ { "boot_index": "1", "uuid": image_id, "source_type": "image", "volume_size": volume_size, "destination_type": "volume", "delete_on_termination": True } ] kwargs.update(block_device_mapping_v2=block_device_mapping_v2) if different_hosts: kwargs.update(scheduler_hints={"different_host": different_hosts}) if key_name != '': kwargs.update(key_name=key_name) if config_drive is True: kwargs.update(config_drive=True) if userdata is not None and type(userdata) is str: kwargs.update(userdata=userdata) if metadata is not None and type(metadata) is dict and metadata != {}: kwargs.update(meta=metadata) if files is not None and type(files) is list and files != []: kwargs.update(files=files) if nw_port_id_list: nics = [{"port-id": entry.get("port"), "net-id": entry.get("uuid"), "v4-fixed-ip": entry.get("fixed_ip")} for entry in nw_port_id_list] kwargs.update(nics=nics) if secgroup_name: kwargs.update(security_groups=[secgroup_name]) try: nova = nova_client.Client(self.nova_version, auth_token=token, tenant_id=tenant_id, auth_url=self.identity_service) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name, nova.images.get(image_id), flavor, **kwargs) data = instance.to_dict() return data['id'] except Exception as ex: err = ("Failed to create instance under tenant" " %s %s" % (tenant_id, ex)) LOG.error(err) raise Exception(err)
Python
def update_policy_target_group(self, token, ptg_id, policy_target_group_info): """ Updates a GBP Policy Target Group :param token: A scoped token :param ptg_id: PTG UUID :param policy_target_group_info: PTG info dict :return: PTG dict """ try: gbp = gbp_client.Client(token=token, endpoint_url=self.network_service) return gbp.update_policy_target_group( ptg_id, body=policy_target_group_info)['policy_target_group'] except Exception as ex: err = ("Failed to update policy target group. Error :: %s" % (ex)) LOG.error(err) raise Exception(err)
def update_policy_target_group(self, token, ptg_id, policy_target_group_info): """ Updates a GBP Policy Target Group :param token: A scoped token :param ptg_id: PTG UUID :param policy_target_group_info: PTG info dict :return: PTG dict """ try: gbp = gbp_client.Client(token=token, endpoint_url=self.network_service) return gbp.update_policy_target_group( ptg_id, body=policy_target_group_info)['policy_target_group'] except Exception as ex: err = ("Failed to update policy target group. Error :: %s" % (ex)) LOG.error(err) raise Exception(err)
Python
def create_policy_target_group(self, token, tenant_id, name, l2_policy_id=None): """ Creates a GBP Policy Target Group :param token: A scoped token :param tenant_id: Tenant UUID :param name: PTG name :return: PTG dict """ policy_target_group_info = { "policy_target_group": { "tenant_id": tenant_id, "name": name, } } if l2_policy_id: policy_target_group_info["policy_target_group"].update( {"l2_policy_id": l2_policy_id}) try: gbp = gbp_client.Client(token=token, endpoint_url=self.network_service) return gbp.create_policy_target_group( body=policy_target_group_info)['policy_target_group'] except Exception as ex: err = ("Failed to create policy target group. %s" " Error :: %s" % (policy_target_group_info, ex)) LOG.error(err) raise Exception(err)
def create_policy_target_group(self, token, tenant_id, name, l2_policy_id=None): """ Creates a GBP Policy Target Group :param token: A scoped token :param tenant_id: Tenant UUID :param name: PTG name :return: PTG dict """ policy_target_group_info = { "policy_target_group": { "tenant_id": tenant_id, "name": name, } } if l2_policy_id: policy_target_group_info["policy_target_group"].update( {"l2_policy_id": l2_policy_id}) try: gbp = gbp_client.Client(token=token, endpoint_url=self.network_service) return gbp.create_policy_target_group( body=policy_target_group_info)['policy_target_group'] except Exception as ex: err = ("Failed to create policy target group. %s" " Error :: %s" % (policy_target_group_info, ex)) LOG.error(err) raise Exception(err)
Python
def create_network_function_device(self, event): """ Returns device instance for a new service This method either returns existing device which could be reused for a new service or it creates new device instance """ device = None nfp_context = event.data nfd_request = self._prepare_failure_case_device_data(nfp_context) service_details = nfp_context['service_details'] LOG.info(_LI("Device Orchestrator received create network service " "device request with data %(data)s"), {'data': nfd_request}) orchestration_driver = self._get_orchestration_driver( service_details['service_vendor']) device_data = self._prepare_device_data_from_nfp_context(nfp_context) LOG.info(_LI("Creating new device," "device request: %(device)s"), {'device': nfd_request}) driver_device_info = ( orchestration_driver.create_network_function_device( device_data)) if not driver_device_info: LOG.info(_LI("Device creation failed")) self._create_event(event_id='DEVICE_ERROR', event_data=nfd_request, is_internal_event=True) return None management = nfp_context['management'] management['port'] = driver_device_info[ 'mgmt_neutron_port_info']['neutron_port'] management['port']['ip_address'] = management[ 'port']['fixed_ips'][0]['ip_address'] management['subnet'] = driver_device_info[ 'mgmt_neutron_port_info']['neutron_subnet'] # Update newly created device with required params device = self._update_device_data(driver_device_info, device_data) device['network_function_device_id'] = device['id'] # Create DB entry with status as DEVICE_SPAWNING network_function_device = ( self._create_network_function_device_db(device, 'DEVICE_SPAWNING')) # REVISIT(mak) Wrong but nfp_db method needs in this format network_function_device['mgmt_port_id'] = device['mgmt_port_id'] nfp_context['network_function_device'] = network_function_device # Create an event to NSO, to give device_id device_created_data = { 'network_function_instance_id': ( nfp_context['network_function_instance']['id']), 'network_function_device_id': device['id'] } self._create_event(event_id='DEVICE_SPAWNING', event_data=nfp_context, is_poll_event=True, original_event=event) self._create_event(event_id='DEVICE_CREATED', event_data=device_created_data)
def create_network_function_device(self, event): """ Returns device instance for a new service This method either returns existing device which could be reused for a new service or it creates new device instance """ device = None nfp_context = event.data nfd_request = self._prepare_failure_case_device_data(nfp_context) service_details = nfp_context['service_details'] LOG.info(_LI("Device Orchestrator received create network service " "device request with data %(data)s"), {'data': nfd_request}) orchestration_driver = self._get_orchestration_driver( service_details['service_vendor']) device_data = self._prepare_device_data_from_nfp_context(nfp_context) LOG.info(_LI("Creating new device," "device request: %(device)s"), {'device': nfd_request}) driver_device_info = ( orchestration_driver.create_network_function_device( device_data)) if not driver_device_info: LOG.info(_LI("Device creation failed")) self._create_event(event_id='DEVICE_ERROR', event_data=nfd_request, is_internal_event=True) return None management = nfp_context['management'] management['port'] = driver_device_info[ 'mgmt_neutron_port_info']['neutron_port'] management['port']['ip_address'] = management[ 'port']['fixed_ips'][0]['ip_address'] management['subnet'] = driver_device_info[ 'mgmt_neutron_port_info']['neutron_subnet'] # Update newly created device with required params device = self._update_device_data(driver_device_info, device_data) device['network_function_device_id'] = device['id'] # Create DB entry with status as DEVICE_SPAWNING network_function_device = ( self._create_network_function_device_db(device, 'DEVICE_SPAWNING')) # REVISIT(mak) Wrong but nfp_db method needs in this format network_function_device['mgmt_port_id'] = device['mgmt_port_id'] nfp_context['network_function_device'] = network_function_device # Create an event to NSO, to give device_id device_created_data = { 'network_function_instance_id': ( nfp_context['network_function_instance']['id']), 'network_function_device_id': device['id'] } self._create_event(event_id='DEVICE_SPAWNING', event_data=nfp_context, is_poll_event=True, original_event=event) self._create_event(event_id='DEVICE_CREATED', event_data=device_created_data)
Python
def relevant_specs(self): """Get specs on the SCI containing this particular Node.""" if not self._relevant_specs: self._relevant_specs = [x for x in self._service_chain_specs if self.current_node['id'] in x['nodes']] return self._relevant_specs
def relevant_specs(self): """Get specs on the SCI containing this particular Node.""" if not self._relevant_specs: self._relevant_specs = [x for x in self._service_chain_specs if self.current_node['id'] in x['nodes']] return self._relevant_specs
Python
def check_in_use(f): """Check if instance of task executor is already fired and executing jobs. """ def wrapped(self, *args, **kwargs): if self.fired: raise InUse("Executor in use") return f(self, *args, **kwargs) return wrapped
def check_in_use(f): """Check if instance of task executor is already fired and executing jobs. """ def wrapped(self, *args, **kwargs): if self.fired: raise InUse("Executor in use") return f(self, *args, **kwargs) return wrapped
Python
def register(self, event_id, event_handler): """Registers a handler for event_id. Also fetches the decorated poll handlers if any for the event and caches it. """ if not isinstance(event_handler, nfp_api.NfpEventHandler): message = "%s - Handler is not instance of NfpEventHandler" % ( self._log_meta(event_id, event_handler)) LOG.error(message) return try: poll_desc_table = event_handler.get_poll_desc_table() poll_handler = poll_desc_table[event_id] spacing = poll_handler._spacing except KeyError: # Default the poll handler and spacing values poll_handler = event_handler.handle_poll_event spacing = 0 try: self._event_desc_table[event_id].append( (event_handler, poll_handler, spacing)) except KeyError: self._event_desc_table[event_id] = [ (event_handler, poll_handler, spacing)] message = "%s - Registered handler" % ( self._log_meta(event_id, event_handler)) LOG.debug(message)
def register(self, event_id, event_handler): """Registers a handler for event_id. Also fetches the decorated poll handlers if any for the event and caches it. """ if not isinstance(event_handler, nfp_api.NfpEventHandler): message = "%s - Handler is not instance of NfpEventHandler" % ( self._log_meta(event_id, event_handler)) LOG.error(message) return try: poll_desc_table = event_handler.get_poll_desc_table() poll_handler = poll_desc_table[event_id] spacing = poll_handler._spacing except KeyError: # Default the poll handler and spacing values poll_handler = event_handler.handle_poll_event spacing = 0 try: self._event_desc_table[event_id].append( (event_handler, poll_handler, spacing)) except KeyError: self._event_desc_table[event_id] = [ (event_handler, poll_handler, spacing)] message = "%s - Registered handler" % ( self._log_meta(event_id, event_handler)) LOG.debug(message)
Python
def _wait_for_events(self, pipe, timeout=0.01): """Wait & pull event from the pipe. Wait till timeout for the first event and then pull as many as available. Returns: Events[] pulled from pipe. """ events = [] try: while pipe.poll(timeout): timeout = 0 event = self._controller.pipe_recv(pipe) events.append(event) except multiprocessing.TimeoutError as err: message = "%s" % (err) LOG.exception(message) return events
def _wait_for_events(self, pipe, timeout=0.01): """Wait & pull event from the pipe. Wait till timeout for the first event and then pull as many as available. Returns: Events[] pulled from pipe. """ events = [] try: while pipe.poll(timeout): timeout = 0 event = self._controller.pipe_recv(pipe) events.append(event) except multiprocessing.TimeoutError as err: message = "%s" % (err) LOG.exception(message) return events
Python
def init_from_event_manager(self, em): """Initialize from existing event manager. Invoked when an event manager has to take over existing event manager. Whole cache is replaced and events are replayed. This is used in case where a worker dies, dead workers event manager is assigned to new worker. """ # Replay all the events from cache. self._cache = em._cache
def init_from_event_manager(self, em): """Initialize from existing event manager. Invoked when an event manager has to take over existing event manager. Whole cache is replaced and events are replayed. This is used in case where a worker dies, dead workers event manager is assigned to new worker. """ # Replay all the events from cache. self._cache = em._cache
Python
def pop_event(self, event): """Pop the passed event from cache. Is called when an event is complete/cancelled. If the event was sequenced, then sequencer is released to schedule next event. Removes event from cache. """ message = "%s - pop event" % (self._log_meta(event)) LOG.debug(message) try: self._cache.remove(event.desc.uuid) self._load -= 1 except ValueError as verr: verr = verr message = "%s - event not in cache" % ( self._log_meta(event)) LOG.warn(message)
def pop_event(self, event): """Pop the passed event from cache. Is called when an event is complete/cancelled. If the event was sequenced, then sequencer is released to schedule next event. Removes event from cache. """ message = "%s - pop event" % (self._log_meta(event)) LOG.debug(message) try: self._cache.remove(event.desc.uuid) self._load -= 1 except ValueError as verr: verr = verr message = "%s - event not in cache" % ( self._log_meta(event)) LOG.warn(message)
Python
def dispatch_event(self, event, event_type=None, inc_load=True, cache=True): """Dispatch event to the worker. Sends the event to worker through pipe. Increments load if event_type is SCHEDULED event, poll_event does not contribute to load. """ message = "%s - Dispatching to worker %d" % ( self._log_meta(event), self._pid) LOG.debug(message) # Update the worker information in the event. event.desc.worker = self._pid # Update the event with passed type if event_type: event.desc.type = event_type # Send to the worker self._controller.pipe_send(self._pipe, event) self._load = (self._load + 1) if inc_load else self._load # Add to the cache if cache: self._cache.append(event.desc.uuid)
def dispatch_event(self, event, event_type=None, inc_load=True, cache=True): """Dispatch event to the worker. Sends the event to worker through pipe. Increments load if event_type is SCHEDULED event, poll_event does not contribute to load. """ message = "%s - Dispatching to worker %d" % ( self._log_meta(event), self._pid) LOG.debug(message) # Update the worker information in the event. event.desc.worker = self._pid # Update the event with passed type if event_type: event.desc.type = event_type # Send to the worker self._controller.pipe_send(self._pipe, event) self._load = (self._load + 1) if inc_load else self._load # Add to the cache if cache: self._cache.append(event.desc.uuid)
Python
def register_events(self, event_descs): """Register event handlers with core. """ # REVISIT (mak): change name to register_event_handlers() ? for event_desc in event_descs: self._event_handlers.register(event_desc.id, event_desc.handler)
def register_events(self, event_descs): """Register event handlers with core. """ # REVISIT (mak): change name to register_event_handlers() ? for event_desc in event_descs: self._event_handlers.register(event_desc.id, event_desc.handler)
Python
def create_event(self, **kwargs): """To create a new event. """ event = None try: event = nfp_event.Event(**kwargs) # Get the logging context stored in thread logging_context = nfp_logging.get_logging_context() # Log metadata for event handling code event.context = logging_context except AssertionError as aerr: message = "%s" % (aerr) LOG.exception(message) return event
def create_event(self, **kwargs): """To create a new event. """ event = None try: event = nfp_event.Event(**kwargs) # Get the logging context stored in thread logging_context = nfp_logging.get_logging_context() # Log metadata for event handling code event.context = logging_context except AssertionError as aerr: message = "%s" % (aerr) LOG.exception(message) return event
Python
def post_event_graph(self, event): """Post a event graph. As base class, set only the required attributes of event. """ event.desc.type = nfp_event.EVENT_GRAPH event.desc.flag = '' event.desc.pid = os.getpid() return event
def post_event_graph(self, event): """Post a event graph. As base class, set only the required attributes of event. """ event.desc.type = nfp_event.EVENT_GRAPH event.desc.flag = '' event.desc.pid = os.getpid() return event
Python
def post_event(self, event): """Post an event. As a base class, it only does the descriptor preparation. NfpController class implements the required functionality. """ handler = self._event_handlers.get_event_handler(event.id) assert handler, "No handler registered for event %s" % (event.id) event.desc.type = nfp_event.SCHEDULE_EVENT event.desc.flag = nfp_event.EVENT_NEW event.desc.pid = os.getpid() return event
def post_event(self, event): """Post an event. As a base class, it only does the descriptor preparation. NfpController class implements the required functionality. """ handler = self._event_handlers.get_event_handler(event.id) assert handler, "No handler registered for event %s" % (event.id) event.desc.type = nfp_event.SCHEDULE_EVENT event.desc.flag = nfp_event.EVENT_NEW event.desc.pid = os.getpid() return event
Python
def poll_event(self, event, spacing=2, max_times=sys.maxint): """To poll for an event. As a base class, it only does the polling descriptor preparation. NfpController class implements the required functionality. """ ev_spacing = self._event_handlers.get_poll_spacing(event.id) assert spacing or ev_spacing, "No spacing specified for polling" if ev_spacing: spacing = ev_spacing handler = self._event_handlers.get_poll_handler(event.id) assert handler, "No poll handler found for event %s" % (event.id) refuuid = event.desc.uuid event = self._make_new_event(event) event.lifetime = 0 event.desc.type = nfp_event.POLL_EVENT kwargs = {'spacing': spacing, 'max_times': max_times, 'ref': refuuid} poll_desc = nfp_event.PollDesc(**kwargs) setattr(event.desc, 'poll_desc', poll_desc) return event
def poll_event(self, event, spacing=2, max_times=sys.maxint): """To poll for an event. As a base class, it only does the polling descriptor preparation. NfpController class implements the required functionality. """ ev_spacing = self._event_handlers.get_poll_spacing(event.id) assert spacing or ev_spacing, "No spacing specified for polling" if ev_spacing: spacing = ev_spacing handler = self._event_handlers.get_poll_handler(event.id) assert handler, "No poll handler found for event %s" % (event.id) refuuid = event.desc.uuid event = self._make_new_event(event) event.lifetime = 0 event.desc.type = nfp_event.POLL_EVENT kwargs = {'spacing': spacing, 'max_times': max_times, 'ref': refuuid} poll_desc = nfp_event.PollDesc(**kwargs) setattr(event.desc, 'poll_desc', poll_desc) return event
Python
def event_complete(self, event, result=None): """To declare and event complete. """ try: pickle.dumps(result) event.sequence = False event.desc.flag = nfp_event.EVENT_COMPLETE event.result = result return event except Exception as e: raise e
def event_complete(self, event, result=None): """To declare and event complete. """ try: pickle.dumps(result) event.sequence = False event.desc.flag = nfp_event.EVENT_COMPLETE event.result = result return event except Exception as e: raise e
Python
def fork_child(self, wrap): """Forks a child. Creates a full duplex pipe for child & parent to communicate. Returns: Multiprocess object. """ parent_pipe, child_pipe = PIPE(duplex=True) # Registered event handlers of nfp module. # Workers need copy of this data to dispatch an # event to module. proc = self._fork(args=(wrap.service, parent_pipe, child_pipe, self)) message = ("Forked a new child: %d" "Parent Pipe: % s, Child Pipe: % s") % ( proc.pid, str(parent_pipe), str(child_pipe)) LOG.info(message) try: wrap.child_pipe_map[proc.pid] = parent_pipe except AttributeError: setattr(wrap, 'child_pipe_map', {}) wrap.child_pipe_map[proc.pid] = parent_pipe self._worker_process[proc.pid] = proc return proc.pid
def fork_child(self, wrap): """Forks a child. Creates a full duplex pipe for child & parent to communicate. Returns: Multiprocess object. """ parent_pipe, child_pipe = PIPE(duplex=True) # Registered event handlers of nfp module. # Workers need copy of this data to dispatch an # event to module. proc = self._fork(args=(wrap.service, parent_pipe, child_pipe, self)) message = ("Forked a new child: %d" "Parent Pipe: % s, Child Pipe: % s") % ( proc.pid, str(parent_pipe), str(child_pipe)) LOG.info(message) try: wrap.child_pipe_map[proc.pid] = parent_pipe except AttributeError: setattr(wrap, 'child_pipe_map', {}) wrap.child_pipe_map[proc.pid] = parent_pipe self._worker_process[proc.pid] = proc return proc.pid
Python
def post_launch(self): """Post processing after workers launch. Tasks which needs to run only on distributor process and any other resources which are not expected to be forked are initialized here. """ self._update_manager() # Launch rpc_agents for index, rpc_agent in enumerate(self._rpc_agents): # Use threads for launching service launcher = oslo_service.launch( self._conf, rpc_agent[0], workers=None) self._rpc_agents[index] = rpc_agent + (launcher,) # One task to manage the resources - workers & events. eventlet.spawn_n(self._manager_task) # Oslo periodic task to poll for timer events nfp_poll.PollingTask(self._conf, self) # Oslo periodic task for state reporting nfp_rpc.ReportStateTask(self._conf, self)
def post_launch(self): """Post processing after workers launch. Tasks which needs to run only on distributor process and any other resources which are not expected to be forked are initialized here. """ self._update_manager() # Launch rpc_agents for index, rpc_agent in enumerate(self._rpc_agents): # Use threads for launching service launcher = oslo_service.launch( self._conf, rpc_agent[0], workers=None) self._rpc_agents[index] = rpc_agent + (launcher,) # One task to manage the resources - workers & events. eventlet.spawn_n(self._manager_task) # Oslo periodic task to poll for timer events nfp_poll.PollingTask(self._conf, self) # Oslo periodic task for state reporting nfp_rpc.ReportStateTask(self._conf, self)
Python
def report_state(self): """Invoked by report_task to report states of all agents. """ for agent in self._rpc_agents: rpc_agent = operator.itemgetter(0)(agent) rpc_agent.report_state()
def report_state(self): """Invoked by report_task to report states of all agents. """ for agent in self._rpc_agents: rpc_agent = operator.itemgetter(0)(agent) rpc_agent.report_state()