body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@abstractmethod def evaluate(self, sentences: Union[(List[Sentence], Dataset)], gold_label_type: str, out_path: Union[(str, Path)]=None, embedding_storage_mode: str='none', mini_batch_size: int=32, num_workers: int=8, main_evaluation_metric: Tuple[(str, str)]=('micro avg', 'f1-score'), exclude_labels: List[str]=[], gold_label_dictionary: Optional[Dictionary]=None) -> Result: "Evaluates the model. Returns a Result object containing evaluation\n results and a loss value. Implement this to enable evaluation.\n :param data_loader: DataLoader that iterates over dataset to be evaluated\n :param out_path: Optional output path to store predictions\n :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and\n freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU\n :return: Returns a Tuple consisting of a Result object and a loss float value\n " raise NotImplementedError
-3,746,999,487,125,572,600
Evaluates the model. Returns a Result object containing evaluation results and a loss value. Implement this to enable evaluation. :param data_loader: DataLoader that iterates over dataset to be evaluated :param out_path: Optional output path to store predictions :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU :return: Returns a Tuple consisting of a Result object and a loss float value
flair/nn/model.py
evaluate
MaxDall/flair
python
@abstractmethod def evaluate(self, sentences: Union[(List[Sentence], Dataset)], gold_label_type: str, out_path: Union[(str, Path)]=None, embedding_storage_mode: str='none', mini_batch_size: int=32, num_workers: int=8, main_evaluation_metric: Tuple[(str, str)]=('micro avg', 'f1-score'), exclude_labels: List[str]=[], gold_label_dictionary: Optional[Dictionary]=None) -> Result: "Evaluates the model. Returns a Result object containing evaluation\n results and a loss value. Implement this to enable evaluation.\n :param data_loader: DataLoader that iterates over dataset to be evaluated\n :param out_path: Optional output path to store predictions\n :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and\n freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU\n :return: Returns a Tuple consisting of a Result object and a loss float value\n " raise NotImplementedError
@abstractmethod def _get_state_dict(self): 'Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()\n functionality.' raise NotImplementedError
4,904,642,327,725,068,000
Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint() functionality.
flair/nn/model.py
_get_state_dict
MaxDall/flair
python
@abstractmethod def _get_state_dict(self): 'Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()\n functionality.' raise NotImplementedError
@staticmethod @abstractmethod def _init_model_with_state_dict(state): 'Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()\n functionality.' raise NotImplementedError
1,439,372,108,658,756,600
Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint() functionality.
flair/nn/model.py
_init_model_with_state_dict
MaxDall/flair
python
@staticmethod @abstractmethod def _init_model_with_state_dict(state): 'Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()\n functionality.' raise NotImplementedError
def save(self, model_file: Union[(str, Path)], checkpoint: bool=False): '\n Saves the current model to the provided file.\n :param model_file: the model file\n ' model_state = self._get_state_dict() optimizer = scheduler = None if hasattr(self, 'model_card'): if ('training_parameters' in self.model_card): training_parameters = self.model_card['training_parameters'] if ('optimizer' in training_parameters): optimizer = training_parameters['optimizer'] if checkpoint: training_parameters['optimizer_state_dict'] = optimizer.state_dict() training_parameters['optimizer'] = optimizer.__class__ if ('scheduler' in training_parameters): scheduler = training_parameters['scheduler'] if checkpoint: with warnings.catch_warnings(): warnings.simplefilter('ignore') training_parameters['scheduler_state_dict'] = scheduler.state_dict() training_parameters['scheduler'] = scheduler.__class__ model_state['model_card'] = self.model_card torch.save(model_state, str(model_file), pickle_protocol=4) if optimizer: self.model_card['training_parameters']['optimizer'] = optimizer if scheduler: self.model_card['training_parameters']['scheduler'] = scheduler
2,092,918,784,861,721,300
Saves the current model to the provided file. :param model_file: the model file
flair/nn/model.py
save
MaxDall/flair
python
def save(self, model_file: Union[(str, Path)], checkpoint: bool=False): '\n Saves the current model to the provided file.\n :param model_file: the model file\n ' model_state = self._get_state_dict() optimizer = scheduler = None if hasattr(self, 'model_card'): if ('training_parameters' in self.model_card): training_parameters = self.model_card['training_parameters'] if ('optimizer' in training_parameters): optimizer = training_parameters['optimizer'] if checkpoint: training_parameters['optimizer_state_dict'] = optimizer.state_dict() training_parameters['optimizer'] = optimizer.__class__ if ('scheduler' in training_parameters): scheduler = training_parameters['scheduler'] if checkpoint: with warnings.catch_warnings(): warnings.simplefilter('ignore') training_parameters['scheduler_state_dict'] = scheduler.state_dict() training_parameters['scheduler'] = scheduler.__class__ model_state['model_card'] = self.model_card torch.save(model_state, str(model_file), pickle_protocol=4) if optimizer: self.model_card['training_parameters']['optimizer'] = optimizer if scheduler: self.model_card['training_parameters']['scheduler'] = scheduler
@classmethod def load(cls, model: Union[(str, Path)]): '\n Loads the model from the given file.\n :param model: the model file\n :return: the loaded text classifier model\n ' model_file = cls._fetch_model(str(model)) with warnings.catch_warnings(): warnings.filterwarnings('ignore') f = file_utils.load_big_file(str(model_file)) state = torch.load(f, map_location='cpu') model = cls._init_model_with_state_dict(state) if ('model_card' in state): model.model_card = state['model_card'] model.eval() model.to(flair.device) return model
2,603,128,188,631,163,400
Loads the model from the given file. :param model: the model file :return: the loaded text classifier model
flair/nn/model.py
load
MaxDall/flair
python
@classmethod def load(cls, model: Union[(str, Path)]): '\n Loads the model from the given file.\n :param model: the model file\n :return: the loaded text classifier model\n ' model_file = cls._fetch_model(str(model)) with warnings.catch_warnings(): warnings.filterwarnings('ignore') f = file_utils.load_big_file(str(model_file)) state = torch.load(f, map_location='cpu') model = cls._init_model_with_state_dict(state) if ('model_card' in state): model.model_card = state['model_card'] model.eval() model.to(flair.device) return model
def forward_pass(self, sentences: Union[(List[DataPoint], DataPoint)], return_label_candidates: bool=False): 'This method does a forward pass through the model given a list of data points as input.\n Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits\n produced by the decoder and labels are the string labels for each data point.\n Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True,\n where data_points are the data points to which labels are added (commonly either Sentence or Token objects)\n and candidate_labels are empty Label objects for each prediction (depending on the task Label,\n SpanLabel or RelationLabel).' raise NotImplementedError
-4,106,641,530,345,396,000
This method does a forward pass through the model given a list of data points as input. Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits produced by the decoder and labels are the string labels for each data point. Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True, where data_points are the data points to which labels are added (commonly either Sentence or Token objects) and candidate_labels are empty Label objects for each prediction (depending on the task Label, SpanLabel or RelationLabel).
flair/nn/model.py
forward_pass
MaxDall/flair
python
def forward_pass(self, sentences: Union[(List[DataPoint], DataPoint)], return_label_candidates: bool=False): 'This method does a forward pass through the model given a list of data points as input.\n Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits\n produced by the decoder and labels are the string labels for each data point.\n Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True,\n where data_points are the data points to which labels are added (commonly either Sentence or Token objects)\n and candidate_labels are empty Label objects for each prediction (depending on the task Label,\n SpanLabel or RelationLabel).' raise NotImplementedError
def predict(self, sentences: Union[(List[Sentence], Sentence)], mini_batch_size: int=32, return_probabilities_for_all_classes: bool=False, verbose: bool=False, label_name: Optional[str]=None, return_loss=False, embedding_storage_mode='none'): "\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n :param mini_batch_size: mini batch size to use\n :param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted\n :param verbose: set to True to display a progress bar\n :param return_loss: set to True to return loss\n :param label_name: set this to change the name of the label type that is predicted\n :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if\n you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.\n 'gpu' to store embeddings in GPU memory.\n " if (label_name is None): label_name = (self.label_type if (self.label_type is not None) else 'label') with torch.no_grad(): if (not sentences): return sentences if isinstance(sentences, DataPoint): sentences = [sentences] if isinstance(sentences[0], DataPoint): sentences = [sentence for sentence in sentences if (len(sentence) > 0)] if (len(sentences) == 0): return sentences rev_order_len_index = sorted(range(len(sentences)), key=(lambda k: len(sentences[k])), reverse=True) reordered_sentences: List[Union[(DataPoint, str)]] = [sentences[index] for index in rev_order_len_index] dataloader = DataLoader(dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size) if verbose: dataloader = tqdm(dataloader) overall_loss = 0 batch_no = 0 label_count = 0 for batch in dataloader: batch_no += 1 if verbose: dataloader.set_description(f'Inferencing on batch {batch_no}') if (not batch): continue (scores, gold_labels, data_points, label_candidates) = self.forward_pass(batch, return_label_candidates=True) for sentence in data_points: sentence.remove_labels(label_name) if return_loss: overall_loss += self._calculate_loss(scores, gold_labels)[0] label_count += len(label_candidates) if (len(label_candidates) > 0): if self.multi_label: sigmoided = torch.sigmoid(scores) n_labels = sigmoided.size(1) for (s_idx, (data_point, label_candidate)) in enumerate(zip(data_points, label_candidates)): for l_idx in range(n_labels): label_value = self.label_dictionary.get_item_for_index(l_idx) if (label_value == 'O'): continue label_threshold = self._get_label_threshold(label_value) label_score = sigmoided[(s_idx, l_idx)].item() if ((label_score > label_threshold) or return_probabilities_for_all_classes): label = label_candidate.spawn(value=label_value, score=label_score) data_point.add_complex_label(label_name, label) else: softmax = torch.nn.functional.softmax(scores, dim=(- 1)) if return_probabilities_for_all_classes: n_labels = softmax.size(1) for (s_idx, (data_point, label_candidate)) in enumerate(zip(data_points, label_candidates)): for l_idx in range(n_labels): label_value = self.label_dictionary.get_item_for_index(l_idx) if (label_value == 'O'): continue label_score = softmax[(s_idx, l_idx)].item() label = label_candidate.spawn(value=label_value, score=label_score) data_point.add_complex_label(label_name, label) else: (conf, idx) = torch.max(softmax, dim=(- 1)) for (data_point, label_candidate, c, i) in zip(data_points, label_candidates, conf, idx): label_value = self.label_dictionary.get_item_for_index(i.item()) if (label_value == 'O'): continue label = label_candidate.spawn(value=label_value, score=c.item()) data_point.add_complex_label(label_name, label) store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return (overall_loss, label_count)
-3,447,273,615,040,302,600
Predicts the class labels for the given sentences. The labels are directly added to the sentences. :param sentences: list of sentences :param mini_batch_size: mini batch size to use :param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory.
flair/nn/model.py
predict
MaxDall/flair
python
def predict(self, sentences: Union[(List[Sentence], Sentence)], mini_batch_size: int=32, return_probabilities_for_all_classes: bool=False, verbose: bool=False, label_name: Optional[str]=None, return_loss=False, embedding_storage_mode='none'): "\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n :param mini_batch_size: mini batch size to use\n :param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted\n :param verbose: set to True to display a progress bar\n :param return_loss: set to True to return loss\n :param label_name: set this to change the name of the label type that is predicted\n :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if\n you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.\n 'gpu' to store embeddings in GPU memory.\n " if (label_name is None): label_name = (self.label_type if (self.label_type is not None) else 'label') with torch.no_grad(): if (not sentences): return sentences if isinstance(sentences, DataPoint): sentences = [sentences] if isinstance(sentences[0], DataPoint): sentences = [sentence for sentence in sentences if (len(sentence) > 0)] if (len(sentences) == 0): return sentences rev_order_len_index = sorted(range(len(sentences)), key=(lambda k: len(sentences[k])), reverse=True) reordered_sentences: List[Union[(DataPoint, str)]] = [sentences[index] for index in rev_order_len_index] dataloader = DataLoader(dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size) if verbose: dataloader = tqdm(dataloader) overall_loss = 0 batch_no = 0 label_count = 0 for batch in dataloader: batch_no += 1 if verbose: dataloader.set_description(f'Inferencing on batch {batch_no}') if (not batch): continue (scores, gold_labels, data_points, label_candidates) = self.forward_pass(batch, return_label_candidates=True) for sentence in data_points: sentence.remove_labels(label_name) if return_loss: overall_loss += self._calculate_loss(scores, gold_labels)[0] label_count += len(label_candidates) if (len(label_candidates) > 0): if self.multi_label: sigmoided = torch.sigmoid(scores) n_labels = sigmoided.size(1) for (s_idx, (data_point, label_candidate)) in enumerate(zip(data_points, label_candidates)): for l_idx in range(n_labels): label_value = self.label_dictionary.get_item_for_index(l_idx) if (label_value == 'O'): continue label_threshold = self._get_label_threshold(label_value) label_score = sigmoided[(s_idx, l_idx)].item() if ((label_score > label_threshold) or return_probabilities_for_all_classes): label = label_candidate.spawn(value=label_value, score=label_score) data_point.add_complex_label(label_name, label) else: softmax = torch.nn.functional.softmax(scores, dim=(- 1)) if return_probabilities_for_all_classes: n_labels = softmax.size(1) for (s_idx, (data_point, label_candidate)) in enumerate(zip(data_points, label_candidates)): for l_idx in range(n_labels): label_value = self.label_dictionary.get_item_for_index(l_idx) if (label_value == 'O'): continue label_score = softmax[(s_idx, l_idx)].item() label = label_candidate.spawn(value=label_value, score=label_score) data_point.add_complex_label(label_name, label) else: (conf, idx) = torch.max(softmax, dim=(- 1)) for (data_point, label_candidate, c, i) in zip(data_points, label_candidates, conf, idx): label_value = self.label_dictionary.get_item_for_index(i.item()) if (label_value == 'O'): continue label = label_candidate.spawn(value=label_value, score=c.item()) data_point.add_complex_label(label_name, label) store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return (overall_loss, label_count)
def __init__(__self__, resource_name, opts=None, charset=None, collation=None, instance=None, name=None, project=None, __props__=None, __name__=None, __opts__=None): '\n Create a Database resource with the given unique name, props, and options.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] project: The ID of the project in which the resource belongs.\n If it is not provided, the provider project is used.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.\n ' if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['charset'] = charset __props__['collation'] = collation if (instance is None): raise TypeError("Missing required property 'instance'") __props__['instance'] = instance __props__['name'] = name __props__['project'] = project __props__['self_link'] = None super(Database, __self__).__init__('gcp:sql/database:Database', resource_name, __props__, opts)
-8,634,422,539,388,717,000
Create a Database resource with the given unique name, props, and options. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. > This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.
sdk/python/pulumi_gcp/sql/database.py
__init__
23doors/pulumi-gcp
python
def __init__(__self__, resource_name, opts=None, charset=None, collation=None, instance=None, name=None, project=None, __props__=None, __name__=None, __opts__=None): '\n Create a Database resource with the given unique name, props, and options.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] project: The ID of the project in which the resource belongs.\n If it is not provided, the provider project is used.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.\n ' if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['charset'] = charset __props__['collation'] = collation if (instance is None): raise TypeError("Missing required property 'instance'") __props__['instance'] = instance __props__['name'] = name __props__['project'] = project __props__['self_link'] = None super(Database, __self__).__init__('gcp:sql/database:Database', resource_name, __props__, opts)
@staticmethod def get(resource_name, id, opts=None, charset=None, collation=None, instance=None, name=None, project=None, self_link=None): "\n Get an existing Database resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n \n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] project: The ID of the project in which the resource belongs.\n If it is not provided, the provider project is used.\n :param pulumi.Input[str] self_link: The URI of the created resource.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['charset'] = charset __props__['collation'] = collation __props__['instance'] = instance __props__['name'] = name __props__['project'] = project __props__['self_link'] = self_link return Database(resource_name, opts=opts, __props__=__props__)
-3,593,032,024,044,406,000
Get an existing Database resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] self_link: The URI of the created resource. > This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.
sdk/python/pulumi_gcp/sql/database.py
get
23doors/pulumi-gcp
python
@staticmethod def get(resource_name, id, opts=None, charset=None, collation=None, instance=None, name=None, project=None, self_link=None): "\n Get an existing Database resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n \n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] project: The ID of the project in which the resource belongs.\n If it is not provided, the provider project is used.\n :param pulumi.Input[str] self_link: The URI of the created resource.\n\n > This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['charset'] = charset __props__['collation'] = collation __props__['instance'] = instance __props__['name'] = name __props__['project'] = project __props__['self_link'] = self_link return Database(resource_name, opts=opts, __props__=__props__)
def parse_args(): 'Parse input arguments\n\n Use --help to see a pretty description of the arguments\n ' if ('ipykernel' in sys.argv[0]): sys.argv = [sys.argv[0]] parser = argparse.ArgumentParser() parser.add_argument('-n', type=int, default=15, choices=[8, 15, 24, 35, 48, 63, 80], help='Number of tiles') parser.add_argument('--random_seed', '-s', type=int, default=1, help='Seed to use for RNGs') parser.add_argument('--macro_type', '-m', type=str, default='primitive', choices=['primitive', 'random', 'learned'], help='Type of macro_list to consider during search') parser.add_argument('--search_alg', type=str, default='gbfs', choices=['astar', 'gbfs', 'weighted_astar', 'bfws_r0', 'bfws_rg'], help='Search algorithm to run') parser.add_argument('--g_weight', type=float, default=None, help='Weight for g-score in weighted A*') parser.add_argument('--h_weight', type=float, default=None, help='Weight for h-score in weighted A*') parser.add_argument('--random_goal', '-r', action='store_true', default=False, help='Generate a random goal instead of the default solve configuration') parser.add_argument('--max_transitions', type=(lambda x: int(float(x))), default=500000.0, help='Maximum number of state transitions') parser.add_argument('--bfws_precision', type=int, default=3, help='The number of width values, w \\in {1,...,P}, to use when the search algorithm is best-first width search') return parser.parse_args()
7,348,549,755,786,784,000
Parse input arguments Use --help to see a pretty description of the arguments
experiments/npuzzle/solve.py
parse_args
camall3n/focused-macros
python
def parse_args(): 'Parse input arguments\n\n Use --help to see a pretty description of the arguments\n ' if ('ipykernel' in sys.argv[0]): sys.argv = [sys.argv[0]] parser = argparse.ArgumentParser() parser.add_argument('-n', type=int, default=15, choices=[8, 15, 24, 35, 48, 63, 80], help='Number of tiles') parser.add_argument('--random_seed', '-s', type=int, default=1, help='Seed to use for RNGs') parser.add_argument('--macro_type', '-m', type=str, default='primitive', choices=['primitive', 'random', 'learned'], help='Type of macro_list to consider during search') parser.add_argument('--search_alg', type=str, default='gbfs', choices=['astar', 'gbfs', 'weighted_astar', 'bfws_r0', 'bfws_rg'], help='Search algorithm to run') parser.add_argument('--g_weight', type=float, default=None, help='Weight for g-score in weighted A*') parser.add_argument('--h_weight', type=float, default=None, help='Weight for h-score in weighted A*') parser.add_argument('--random_goal', '-r', action='store_true', default=False, help='Generate a random goal instead of the default solve configuration') parser.add_argument('--max_transitions', type=(lambda x: int(float(x))), default=500000.0, help='Maximum number of state transitions') parser.add_argument('--bfws_precision', type=int, default=3, help='The number of width values, w \\in {1,...,P}, to use when the search algorithm is best-first width search') return parser.parse_args()
def solve(): 'Instantiate an N-Puzzle and solve with the specified macro-actions and search algorithm' args = parse_args() random.seed(args.random_seed) np.random.seed(args.random_seed) start = NPuzzle(n=args.n).scramble(seed=args.random_seed) if args.random_goal: goal = NPuzzle(n=args.n).scramble(seed=(args.random_seed + 1000)) print('Using goal pattern: {:03d}'.format((args.random_seed + 1000))) else: goal = NPuzzle(n=args.n) print('Using seed: {:03d}'.format(args.random_seed)) print('Start:', start) print('Goal:', goal) print('Start:', ' '.join(map(str, list(start)))) print('Goal: ', ' '.join(map(str, list(goal)))) if (args.macro_type == 'random'): macros.generate_random_macro_set(args.random_seed) macro_namespace = {'primitive': SimpleNamespace(macros=[], models=[]), 'random': macros.random, 'learned': macros.learned}[args.macro_type] macro_list = macro_namespace.macros model_list = macro_namespace.models search_fn = {'astar': search.astar, 'gbfs': search.gbfs, 'weighted_astar': search.weighted_astar, 'bfws_r0': bfws.bfws, 'bfws_rg': bfws.bfws}[args.search_alg] def get_successors(puz): successors = [(copy.deepcopy(puz).transition(a), [a]) for a in puz.actions()] if (args.macro_type != 'primitive'): valid_macros = macro_list[puz.blank_idx] valid_models = model_list[puz.blank_idx] macro_successors = [(copy.deepcopy(puz).apply_macro(model=model), macro) for (macro, model) in zip(valid_macros, valid_models)] successors += macro_successors return successors search_dict = {'start': start, 'is_goal': (lambda node: (node.state == goal)), 'step_cost': (lambda macro: 1), 'heuristic': (lambda puz: len(puz.summarize_effects(baseline=goal)[0])), 'get_successors': get_successors, 'max_transitions': args.max_transitions} if (args.search_alg == 'weighted_astar'): assert ((args.g_weight is not None) and (args.h_weight is not None)), 'Must specify weights if using weighted A*.' gh_weights = (args.g_weight, args.h_weight) search_dict['gh_weights'] = gh_weights if ('bfws' in args.search_alg): search_dict['precision'] = args.bfws_precision if (args.search_alg == 'bfws_rg'): goal_fns = [(lambda x, i=i: (x.state[i] == goal[i])) for (i, _) in enumerate(goal)] relevant_atoms = iw.iw(1, start, get_successors, goal_fns) if (not relevant_atoms): relevant_atoms = iw.iw(2, start, get_successors, goal_fns) if (not relevant_atoms): relevant_atoms = start.all_atoms() search_dict['R'] = relevant_atoms search_results = search_fn(**search_dict) tag = '{}-puzzle/'.format(args.n) if args.random_goal: tag += 'random_goal/' else: tag += 'default_goal/' tag += args.macro_type results_dir = 'results/npuzzle/{}/{}/'.format(args.search_alg, tag) os.makedirs(results_dir, exist_ok=True) with open((results_dir + 'seed-{:03d}.pickle'.format(args.random_seed)), 'wb') as file: pickle.dump(search_results, file)
3,855,904,374,386,665,500
Instantiate an N-Puzzle and solve with the specified macro-actions and search algorithm
experiments/npuzzle/solve.py
solve
camall3n/focused-macros
python
def solve(): args = parse_args() random.seed(args.random_seed) np.random.seed(args.random_seed) start = NPuzzle(n=args.n).scramble(seed=args.random_seed) if args.random_goal: goal = NPuzzle(n=args.n).scramble(seed=(args.random_seed + 1000)) print('Using goal pattern: {:03d}'.format((args.random_seed + 1000))) else: goal = NPuzzle(n=args.n) print('Using seed: {:03d}'.format(args.random_seed)) print('Start:', start) print('Goal:', goal) print('Start:', ' '.join(map(str, list(start)))) print('Goal: ', ' '.join(map(str, list(goal)))) if (args.macro_type == 'random'): macros.generate_random_macro_set(args.random_seed) macro_namespace = {'primitive': SimpleNamespace(macros=[], models=[]), 'random': macros.random, 'learned': macros.learned}[args.macro_type] macro_list = macro_namespace.macros model_list = macro_namespace.models search_fn = {'astar': search.astar, 'gbfs': search.gbfs, 'weighted_astar': search.weighted_astar, 'bfws_r0': bfws.bfws, 'bfws_rg': bfws.bfws}[args.search_alg] def get_successors(puz): successors = [(copy.deepcopy(puz).transition(a), [a]) for a in puz.actions()] if (args.macro_type != 'primitive'): valid_macros = macro_list[puz.blank_idx] valid_models = model_list[puz.blank_idx] macro_successors = [(copy.deepcopy(puz).apply_macro(model=model), macro) for (macro, model) in zip(valid_macros, valid_models)] successors += macro_successors return successors search_dict = {'start': start, 'is_goal': (lambda node: (node.state == goal)), 'step_cost': (lambda macro: 1), 'heuristic': (lambda puz: len(puz.summarize_effects(baseline=goal)[0])), 'get_successors': get_successors, 'max_transitions': args.max_transitions} if (args.search_alg == 'weighted_astar'): assert ((args.g_weight is not None) and (args.h_weight is not None)), 'Must specify weights if using weighted A*.' gh_weights = (args.g_weight, args.h_weight) search_dict['gh_weights'] = gh_weights if ('bfws' in args.search_alg): search_dict['precision'] = args.bfws_precision if (args.search_alg == 'bfws_rg'): goal_fns = [(lambda x, i=i: (x.state[i] == goal[i])) for (i, _) in enumerate(goal)] relevant_atoms = iw.iw(1, start, get_successors, goal_fns) if (not relevant_atoms): relevant_atoms = iw.iw(2, start, get_successors, goal_fns) if (not relevant_atoms): relevant_atoms = start.all_atoms() search_dict['R'] = relevant_atoms search_results = search_fn(**search_dict) tag = '{}-puzzle/'.format(args.n) if args.random_goal: tag += 'random_goal/' else: tag += 'default_goal/' tag += args.macro_type results_dir = 'results/npuzzle/{}/{}/'.format(args.search_alg, tag) os.makedirs(results_dir, exist_ok=True) with open((results_dir + 'seed-{:03d}.pickle'.format(args.random_seed)), 'wb') as file: pickle.dump(search_results, file)
def format_value(value, df=None, doc=None, currency=None, translated=False): 'Format value based on given fieldtype, document reference, currency reference.\n\tIf docfield info (df) is not given, it will try and guess based on the datatype of the value' if isinstance(df, string_types): df = frappe._dict(fieldtype=df) if (not df): df = frappe._dict() if isinstance(value, datetime.datetime): df.fieldtype = 'Datetime' elif isinstance(value, datetime.date): df.fieldtype = 'Date' elif isinstance(value, datetime.timedelta): df.fieldtype = 'Time' elif isinstance(value, int): df.fieldtype = 'Int' elif isinstance(value, float): df.fieldtype = 'Float' else: df.fieldtype = 'Data' elif isinstance(df, dict): df = frappe._dict(df) if (value is None): value = '' elif translated: value = frappe._(value) if (not df): return value elif (df.get('fieldtype') == 'Date'): return formatdate(value) elif (df.get('fieldtype') == 'Datetime'): return format_datetime(value) elif (df.get('fieldtype') == 'Time'): return format_time(value) elif ((value == 0) and (df.get('fieldtype') in ('Int', 'Float', 'Currency', 'Percent')) and df.get('print_hide_if_no_value')): return '' elif (df.get('fieldtype') == 'Currency'): default_currency = frappe.db.get_default('currency') currency = (currency or get_field_currency(df, doc) or default_currency) return fmt_money(value, precision=get_field_precision(df, doc), currency=currency) elif (df.get('fieldtype') == 'Float'): precision = get_field_precision(df, doc) currency = (currency or get_field_currency(df, doc)) if ((not df.options) and (value is not None)): temp = cstr(value).split('.') if ((len(temp) == 1) or (cint(temp[1]) == 0)): precision = 0 return fmt_money(value, precision=precision, currency=currency) elif (df.get('fieldtype') == 'Percent'): return '{}%'.format(flt(value, 2)) elif (df.get('fieldtype') in ('Text', 'Small Text')): if (not re.search('(<br|<div|<p)', value)): return frappe.safe_decode(value).replace('\n', '<br>') elif (df.get('fieldtype') == 'Markdown Editor'): return frappe.utils.markdown(value) elif (df.get('fieldtype') == 'Table MultiSelect'): meta = frappe.get_meta(df.options) link_field = [df for df in meta.fields if (df.fieldtype == 'Link')][0] values = [v.get(link_field.fieldname, 'asdf') for v in value] return ', '.join(values) elif (df.get('fieldtype') == 'Duration'): hide_days = df.hide_days return format_duration(value, hide_days) elif (df.get('fieldtype') == 'Text Editor'): return "<div class='ql-snow'>{}</div>".format(value) return value
-4,790,803,982,193,777,000
Format value based on given fieldtype, document reference, currency reference. If docfield info (df) is not given, it will try and guess based on the datatype of the value
frappe/utils/formatters.py
format_value
EHASUN/frappe
python
def format_value(value, df=None, doc=None, currency=None, translated=False): 'Format value based on given fieldtype, document reference, currency reference.\n\tIf docfield info (df) is not given, it will try and guess based on the datatype of the value' if isinstance(df, string_types): df = frappe._dict(fieldtype=df) if (not df): df = frappe._dict() if isinstance(value, datetime.datetime): df.fieldtype = 'Datetime' elif isinstance(value, datetime.date): df.fieldtype = 'Date' elif isinstance(value, datetime.timedelta): df.fieldtype = 'Time' elif isinstance(value, int): df.fieldtype = 'Int' elif isinstance(value, float): df.fieldtype = 'Float' else: df.fieldtype = 'Data' elif isinstance(df, dict): df = frappe._dict(df) if (value is None): value = elif translated: value = frappe._(value) if (not df): return value elif (df.get('fieldtype') == 'Date'): return formatdate(value) elif (df.get('fieldtype') == 'Datetime'): return format_datetime(value) elif (df.get('fieldtype') == 'Time'): return format_time(value) elif ((value == 0) and (df.get('fieldtype') in ('Int', 'Float', 'Currency', 'Percent')) and df.get('print_hide_if_no_value')): return elif (df.get('fieldtype') == 'Currency'): default_currency = frappe.db.get_default('currency') currency = (currency or get_field_currency(df, doc) or default_currency) return fmt_money(value, precision=get_field_precision(df, doc), currency=currency) elif (df.get('fieldtype') == 'Float'): precision = get_field_precision(df, doc) currency = (currency or get_field_currency(df, doc)) if ((not df.options) and (value is not None)): temp = cstr(value).split('.') if ((len(temp) == 1) or (cint(temp[1]) == 0)): precision = 0 return fmt_money(value, precision=precision, currency=currency) elif (df.get('fieldtype') == 'Percent'): return '{}%'.format(flt(value, 2)) elif (df.get('fieldtype') in ('Text', 'Small Text')): if (not re.search('(<br|<div|<p)', value)): return frappe.safe_decode(value).replace('\n', '<br>') elif (df.get('fieldtype') == 'Markdown Editor'): return frappe.utils.markdown(value) elif (df.get('fieldtype') == 'Table MultiSelect'): meta = frappe.get_meta(df.options) link_field = [df for df in meta.fields if (df.fieldtype == 'Link')][0] values = [v.get(link_field.fieldname, 'asdf') for v in value] return ', '.join(values) elif (df.get('fieldtype') == 'Duration'): hide_days = df.hide_days return format_duration(value, hide_days) elif (df.get('fieldtype') == 'Text Editor'): return "<div class='ql-snow'>{}</div>".format(value) return value
def get_client(): '\n Loads the serialized client from database\n ' db = get_db() pickled_client = db.execute('SELECT pickled_client FROM btc_pay_server_client ORDER BY id').fetchone() return pickle.loads(pickled_client['pickled_client'])
-9,065,270,775,514,728,000
Loads the serialized client from database
app/btcpayserver_helper.py
get_client
psqnt/flask-btcpay-example
python
def get_client(): '\n \n ' db = get_db() pickled_client = db.execute('SELECT pickled_client FROM btc_pay_server_client ORDER BY id').fetchone() return pickle.loads(pickled_client['pickled_client'])
def create_invoice(price=Config.TIP_AMOUNT, currency=Config.TIP_CURRENCY, order_id=None, desc=None, notification_url=None, redirect_url=None): "\n Creates a new invoice and returns invoice id\n :param price: a given price (default is bitcoin)\n :param currency: currency ticker from bitpay API: 'USD', 'EUR', 'BTC' etc\n :return: invoice_id -> str\n " client = get_client() try: new_invoice = client.create_invoice({'price': price, 'currency': currency, 'orderId': order_id, 'itemDesc': desc, 'notificationUrl': notification_url, 'redirectUrl': redirect_url}) return new_invoice['id'] except Exception as e: print(e) return 'XXX'
-4,921,221,465,815,942,000
Creates a new invoice and returns invoice id :param price: a given price (default is bitcoin) :param currency: currency ticker from bitpay API: 'USD', 'EUR', 'BTC' etc :return: invoice_id -> str
app/btcpayserver_helper.py
create_invoice
psqnt/flask-btcpay-example
python
def create_invoice(price=Config.TIP_AMOUNT, currency=Config.TIP_CURRENCY, order_id=None, desc=None, notification_url=None, redirect_url=None): "\n Creates a new invoice and returns invoice id\n :param price: a given price (default is bitcoin)\n :param currency: currency ticker from bitpay API: 'USD', 'EUR', 'BTC' etc\n :return: invoice_id -> str\n " client = get_client() try: new_invoice = client.create_invoice({'price': price, 'currency': currency, 'orderId': order_id, 'itemDesc': desc, 'notificationUrl': notification_url, 'redirectUrl': redirect_url}) return new_invoice['id'] except Exception as e: print(e) return 'XXX'
def get_invoice(invoice_id: str): '\n Get an invoice by ID\n ' client = get_client() return client.get_invoice(invoice_id)
8,041,105,536,858,479,000
Get an invoice by ID
app/btcpayserver_helper.py
get_invoice
psqnt/flask-btcpay-example
python
def get_invoice(invoice_id: str): '\n \n ' client = get_client() return client.get_invoice(invoice_id)
def get_most_recent_invoice(): '\n Returns the most return invoice created\n ' client = get_client() return client.get_invoices()[:1]
-8,889,290,577,071,344,000
Returns the most return invoice created
app/btcpayserver_helper.py
get_most_recent_invoice
psqnt/flask-btcpay-example
python
def get_most_recent_invoice(): '\n \n ' client = get_client() return client.get_invoices()[:1]
def platform_config_update(config): '\n Update configuration for the remote platform\n\n @param config The configuration dictionary to use/update\n ' global remote_port_map config['port_map'] = remote_port_map.copy() config['caps_table_idx'] = 0
-1,269,685,456,096,339,700
Update configuration for the remote platform @param config The configuration dictionary to use/update
src/ptf/platforms/remote.py
platform_config_update
PJHsieh/MarkHsieh_ptf
python
def platform_config_update(config): '\n Update configuration for the remote platform\n\n @param config The configuration dictionary to use/update\n ' global remote_port_map config['port_map'] = remote_port_map.copy() config['caps_table_idx'] = 0
def repl(self, clean_code, lastonly): ' REPL\n\n If `self.debug==True` then result is the raw list of lines of bytes,\n otherwise, it is a list of (lineNumber, stdoutLines, valueLines, typeLines),\n where again the last 3 entries are lists of lines of bytes. \n ' self.proc.sendline(clean_code) EOT = False debug_lines = [] nodes = [] node = () linenumber = None state = None for echoline in self.proc: if ((echoline[:1] == b'i') and echoline.endswith(b'noop(begin)--CMD\r\n')): break while (not EOT): try: for testline in self.proc: line = testline[:(- 2)] if self.debug: print(line) break except pexpect.TIMEOUT: self.proc.sendcontrol('c') self.proc.read(1) if node: node[1].append('\r\no{} = [KERNEL ENFORCED TIMEOUT]'.format(linenumber).encode()) nodes.append(node) return (debug_lines if self.debug else nodes) if line.endswith(b'--EOB'): EOT = True if self.debug: debug_lines.append(line) continue if line.endswith(b'--CMD'): newinput = self.patt_input.match(line) if newinput: if node: if lastonly: nodes.append((node[0], node[1], [], [])) else: nodes.append(node) linenumber = int(newinput.groups()[0]) node = (linenumber, [], [], []) state = 'CMD' elif line.endswith(b'--VAL'): state = 'VAL' elif line.endswith(b'--CLS'): state = 'CLS' elif (state == 'CMD'): node[1].append(line) elif (state == 'VAL'): node[2].append(line) elif (state == 'CLS'): node[3].append(line) if (not node): pass elif node[2]: nodes.append((node[0], node[1], node[2], node[3][:(- 1)])) else: nodes.append((node[0], node[1][:(- 1)], [], [])) return (debug_lines if self.debug else nodes)
7,573,649,737,421,780,000
REPL If `self.debug==True` then result is the raw list of lines of bytes, otherwise, it is a list of (lineNumber, stdoutLines, valueLines, typeLines), where again the last 3 entries are lists of lines of bytes.
m2_kernel/kernel.py
repl
MWhybrow92/Macaulay2-Jupyter-Kernel
python
def repl(self, clean_code, lastonly): ' REPL\n\n If `self.debug==True` then result is the raw list of lines of bytes,\n otherwise, it is a list of (lineNumber, stdoutLines, valueLines, typeLines),\n where again the last 3 entries are lists of lines of bytes. \n ' self.proc.sendline(clean_code) EOT = False debug_lines = [] nodes = [] node = () linenumber = None state = None for echoline in self.proc: if ((echoline[:1] == b'i') and echoline.endswith(b'noop(begin)--CMD\r\n')): break while (not EOT): try: for testline in self.proc: line = testline[:(- 2)] if self.debug: print(line) break except pexpect.TIMEOUT: self.proc.sendcontrol('c') self.proc.read(1) if node: node[1].append('\r\no{} = [KERNEL ENFORCED TIMEOUT]'.format(linenumber).encode()) nodes.append(node) return (debug_lines if self.debug else nodes) if line.endswith(b'--EOB'): EOT = True if self.debug: debug_lines.append(line) continue if line.endswith(b'--CMD'): newinput = self.patt_input.match(line) if newinput: if node: if lastonly: nodes.append((node[0], node[1], [], [])) else: nodes.append(node) linenumber = int(newinput.groups()[0]) node = (linenumber, [], [], []) state = 'CMD' elif line.endswith(b'--VAL'): state = 'VAL' elif line.endswith(b'--CLS'): state = 'CLS' elif (state == 'CMD'): node[1].append(line) elif (state == 'VAL'): node[2].append(line) elif (state == 'CLS'): node[3].append(line) if (not node): pass elif node[2]: nodes.append((node[0], node[1], node[2], node[3][:(- 1)])) else: nodes.append((node[0], node[1][:(- 1)], [], [])) return (debug_lines if self.debug else nodes)
def __init__(self, *args, **kwargs): ' kernel init - calls __init__ on the parent and sets up the M2Interp object\n ' super().__init__(*args, **kwargs) self.interp = M2Interp(configpath=os.environ.get('M2JK_CONFIG')) self.interp.start()
8,517,768,527,292,954,000
kernel init - calls __init__ on the parent and sets up the M2Interp object
m2_kernel/kernel.py
__init__
MWhybrow92/Macaulay2-Jupyter-Kernel
python
def __init__(self, *args, **kwargs): ' \n ' super().__init__(*args, **kwargs) self.interp = M2Interp(configpath=os.environ.get('M2JK_CONFIG')) self.interp.start()
def send_stream(self, text, stderr=False): ' enqueues a stdout or stderr message for the given cell\n ' stdfile = ('stderr' if stderr else 'stdout') content = {'name': stdfile, 'text': (text + '\n')} self.send_response(self.iopub_socket, 'stream', content)
-8,047,124,716,450,033,000
enqueues a stdout or stderr message for the given cell
m2_kernel/kernel.py
send_stream
MWhybrow92/Macaulay2-Jupyter-Kernel
python
def send_stream(self, text, stderr=False): ' \n ' stdfile = ('stderr' if stderr else 'stdout') content = {'name': stdfile, 'text': (text + '\n')} self.send_response(self.iopub_socket, 'stream', content)
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): ' kernel entry point for the execution of each cell\n ' try: output_lines = self.interp.execute(code) except Exception as e: output_lines = [] self.send_stream(str(e), True) xcount = None if (not silent): if (not output_lines): return {'status': 'ok', 'execution_count': None, 'payload': [], 'user_expressions': {}} (data, stream) = self.process_output(output_lines) xcount = output_lines[(- 1)][0] if stream: stdout_content = {'name': 'stdout', 'text': stream} self.send_response(self.iopub_socket, 'stream', stdout_content) if data: execute_content = {'data': data, 'execution_count': xcount} self.send_response(self.iopub_socket, 'execute_result', execute_content) return {'status': 'ok', 'execution_count': xcount, 'payload': [], 'user_expressions': {}}
-5,955,703,454,956,886,000
kernel entry point for the execution of each cell
m2_kernel/kernel.py
do_execute
MWhybrow92/Macaulay2-Jupyter-Kernel
python
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): ' \n ' try: output_lines = self.interp.execute(code) except Exception as e: output_lines = [] self.send_stream(str(e), True) xcount = None if (not silent): if (not output_lines): return {'status': 'ok', 'execution_count': None, 'payload': [], 'user_expressions': {}} (data, stream) = self.process_output(output_lines) xcount = output_lines[(- 1)][0] if stream: stdout_content = {'name': 'stdout', 'text': stream} self.send_response(self.iopub_socket, 'stream', stdout_content) if data: execute_content = {'data': data, 'execution_count': xcount} self.send_response(self.iopub_socket, 'execute_result', execute_content) return {'status': 'ok', 'execution_count': xcount, 'payload': [], 'user_expressions': {}}
def checkFormatReturnTraceOnError(file_path): 'Run checkFormat and return the traceback of any exception.' try: return checkFormat(file_path) except: return traceback.format_exc().split('\n')
-6,684,526,828,670,164,000
Run checkFormat and return the traceback of any exception.
tools/check_format.py
checkFormatReturnTraceOnError
isholaomotayo/envoy
python
def checkFormatReturnTraceOnError(file_path): try: return checkFormat(file_path) except: return traceback.format_exc().split('\n')
def checkOwners(dir_name, owned_directories, error_messages): 'Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS\n\n Args:\n dir_name: the directory being checked.\n owned_directories: directories currently listed in CODEOWNERS.\n error_messages: where to put an error message for new unowned directories.\n ' found = False for owned in owned_directories: if (owned.startswith(dir_name) or dir_name.startswith(owned)): found = True if ((not found) and (dir_name not in UNOWNED_EXTENSIONS)): error_messages.append(('New directory %s appears to not have owners in CODEOWNERS' % dir_name))
7,425,760,383,986,747,000
Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS Args: dir_name: the directory being checked. owned_directories: directories currently listed in CODEOWNERS. error_messages: where to put an error message for new unowned directories.
tools/check_format.py
checkOwners
isholaomotayo/envoy
python
def checkOwners(dir_name, owned_directories, error_messages): 'Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS\n\n Args:\n dir_name: the directory being checked.\n owned_directories: directories currently listed in CODEOWNERS.\n error_messages: where to put an error message for new unowned directories.\n ' found = False for owned in owned_directories: if (owned.startswith(dir_name) or dir_name.startswith(owned)): found = True if ((not found) and (dir_name not in UNOWNED_EXTENSIONS)): error_messages.append(('New directory %s appears to not have owners in CODEOWNERS' % dir_name))
def checkFormatVisitor(arg, dir_name, names): 'Run checkFormat in parallel for the given files.\n\n Args:\n arg: a tuple (pool, result_list, owned_directories, error_messages)\n pool and result_list are for starting tasks asynchronously.\n owned_directories tracks directories listed in the CODEOWNERS file.\n error_messages is a list of string format errors.\n dir_name: the parent directory of the given files.\n names: a list of file names.\n ' (pool, result_list, owned_directories, error_messags) = arg source_prefix = './source/' full_prefix = './source/extensions/' if (dir_name.startswith(full_prefix) and ('/' in dir_name[len(full_prefix):])): checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages) for file_name in names: result = pool.apply_async(checkFormatReturnTraceOnError, args=(((dir_name + '/') + file_name),)) result_list.append(result)
4,855,462,129,043,061,000
Run checkFormat in parallel for the given files. Args: arg: a tuple (pool, result_list, owned_directories, error_messages) pool and result_list are for starting tasks asynchronously. owned_directories tracks directories listed in the CODEOWNERS file. error_messages is a list of string format errors. dir_name: the parent directory of the given files. names: a list of file names.
tools/check_format.py
checkFormatVisitor
isholaomotayo/envoy
python
def checkFormatVisitor(arg, dir_name, names): 'Run checkFormat in parallel for the given files.\n\n Args:\n arg: a tuple (pool, result_list, owned_directories, error_messages)\n pool and result_list are for starting tasks asynchronously.\n owned_directories tracks directories listed in the CODEOWNERS file.\n error_messages is a list of string format errors.\n dir_name: the parent directory of the given files.\n names: a list of file names.\n ' (pool, result_list, owned_directories, error_messags) = arg source_prefix = './source/' full_prefix = './source/extensions/' if (dir_name.startswith(full_prefix) and ('/' in dir_name[len(full_prefix):])): checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages) for file_name in names: result = pool.apply_async(checkFormatReturnTraceOnError, args=(((dir_name + '/') + file_name),)) result_list.append(result)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]: 'Runs the network request through the client\'s chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest("GET", "https://www.example.org/")\n <HttpRequest [GET], url: \'https://www.example.org/\'>\n >>> response = await client._send_request(request)\n <AsyncHttpResponse: 200 OK>\n\n For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.AsyncHttpResponse\n ' request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs)
996,727,745,646,716,500
Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = await client._send_request(request) <AsyncHttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.AsyncHttpResponse
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2016_04_01/aio/_policy_client.py
_send_request
AikoBB/azure-sdk-for-python
python
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]: 'Runs the network request through the client\'s chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest("GET", "https://www.example.org/")\n <HttpRequest [GET], url: \'https://www.example.org/\'>\n >>> response = await client._send_request(request)\n <AsyncHttpResponse: 200 OK>\n\n For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.AsyncHttpResponse\n ' request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs)
@api.doc('list_of_registered_users') @api.marshal_list_with(_user, envelope='data') def get(self): 'List all registered users' return UserService.get_all_users()
4,302,180,309,125,707,000
List all registered users
app/project/user/user_controller.py
get
makci97/lms_flask
python
@api.doc('list_of_registered_users') @api.marshal_list_with(_user, envelope='data') def get(self): return UserService.get_all_users()
@auth.login_required @AuthService.admin_permission_required @api.response(201, 'User successfully created.') @api.doc('create a new user(only for admin)') @api.expect(_user, validate=True) def post(self): 'Creates a new User(only for admin) ' user_service = UserService() return user_service.create_user(request.json)
-6,621,883,920,926,674,000
Creates a new User(only for admin)
app/project/user/user_controller.py
post
makci97/lms_flask
python
@auth.login_required @AuthService.admin_permission_required @api.response(201, 'User successfully created.') @api.doc('create a new user(only for admin)') @api.expect(_user, validate=True) def post(self): ' ' user_service = UserService() return user_service.create_user(request.json)
@api.doc('get a user') @api.marshal_with(_user) def get(self, public_id): 'get a user given its identifier' user_service = UserService() user_service.load_user(public_id) if user_service.is_nan_user(): api.abort(404) else: return user_service.get_user_public()
6,630,313,780,989,657,000
get a user given its identifier
app/project/user/user_controller.py
get
makci97/lms_flask
python
@api.doc('get a user') @api.marshal_with(_user) def get(self, public_id): user_service = UserService() user_service.load_user(public_id) if user_service.is_nan_user(): api.abort(404) else: return user_service.get_user_public()
def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str]=None) -> None: 'Create a new gate.\n\n Args:\n name: The Qobj name of the gate.\n num_qubits: The number of qubits the gate acts on.\n params: A list of parameters.\n label: An optional label for the gate.\n ' self._label = label self.definition = None super().__init__(name, num_qubits, 0, params)
2,209,248,935,091,320,600
Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate.
qiskit/circuit/gate.py
__init__
Blacksmith-qi/qiskit-terra
python
def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str]=None) -> None: 'Create a new gate.\n\n Args:\n name: The Qobj name of the gate.\n num_qubits: The number of qubits the gate acts on.\n params: A list of parameters.\n label: An optional label for the gate.\n ' self._label = label self.definition = None super().__init__(name, num_qubits, 0, params)
def to_matrix(self) -> np.ndarray: 'Return a Numpy.array for the gate unitary matrix.\n\n Returns:\n np.ndarray: if the Gate subclass has a matrix definition.\n\n Raises:\n CircuitError: If a Gate subclass does not implement this method an\n exception will be raised when this base class method is called.\n ' if hasattr(self, '__array__'): return self.__array__(dtype=complex) raise CircuitError('to_matrix not defined for this {}'.format(type(self)))
-7,402,389,822,080,293,000
Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called.
qiskit/circuit/gate.py
to_matrix
Blacksmith-qi/qiskit-terra
python
def to_matrix(self) -> np.ndarray: 'Return a Numpy.array for the gate unitary matrix.\n\n Returns:\n np.ndarray: if the Gate subclass has a matrix definition.\n\n Raises:\n CircuitError: If a Gate subclass does not implement this method an\n exception will be raised when this base class method is called.\n ' if hasattr(self, '__array__'): return self.__array__(dtype=complex) raise CircuitError('to_matrix not defined for this {}'.format(type(self)))
def power(self, exponent: float): 'Creates a unitary gate as `gate^exponent`.\n\n Args:\n exponent (float): Gate^exponent\n\n Returns:\n qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent.\n\n Raises:\n CircuitError: If Gate is not unitary\n ' from qiskit.quantum_info.operators import Operator from qiskit.extensions.unitary import UnitaryGate (decomposition, unitary) = schur(Operator(self).data, output='complex') decomposition_power = list() decomposition_diagonal = decomposition.diagonal() if (not np.allclose(np.diag(decomposition_diagonal), decomposition)): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) unitary_power = ((unitary @ np.diag(decomposition_power)) @ unitary.conj().T) return UnitaryGate(unitary_power, label=('%s^%s' % (self.name, exponent)))
5,892,279,998,234,714,000
Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary
qiskit/circuit/gate.py
power
Blacksmith-qi/qiskit-terra
python
def power(self, exponent: float): 'Creates a unitary gate as `gate^exponent`.\n\n Args:\n exponent (float): Gate^exponent\n\n Returns:\n qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent.\n\n Raises:\n CircuitError: If Gate is not unitary\n ' from qiskit.quantum_info.operators import Operator from qiskit.extensions.unitary import UnitaryGate (decomposition, unitary) = schur(Operator(self).data, output='complex') decomposition_power = list() decomposition_diagonal = decomposition.diagonal() if (not np.allclose(np.diag(decomposition_diagonal), decomposition)): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) unitary_power = ((unitary @ np.diag(decomposition_power)) @ unitary.conj().T) return UnitaryGate(unitary_power, label=('%s^%s' % (self.name, exponent)))
def assemble(self) -> 'Instruction': 'Assemble a QasmQobjInstruction' instruction = super().assemble() if self.label: instruction.label = self.label return instruction
6,947,129,926,959,157,000
Assemble a QasmQobjInstruction
qiskit/circuit/gate.py
assemble
Blacksmith-qi/qiskit-terra
python
def assemble(self) -> 'Instruction': instruction = super().assemble() if self.label: instruction.label = self.label return instruction
@property def label(self) -> str: 'Return gate label' return self._label
3,554,960,801,669,385,700
Return gate label
qiskit/circuit/gate.py
label
Blacksmith-qi/qiskit-terra
python
@property def label(self) -> str: return self._label
@label.setter def label(self, name: str): 'Set gate label to name\n\n Args:\n name (str or None): label to assign unitary\n\n Raises:\n TypeError: name is not string or None.\n ' if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None')
-3,460,210,592,454,413,000
Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None.
qiskit/circuit/gate.py
label
Blacksmith-qi/qiskit-terra
python
@label.setter def label(self, name: str): 'Set gate label to name\n\n Args:\n name (str or None): label to assign unitary\n\n Raises:\n TypeError: name is not string or None.\n ' if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None')
def control(self, num_ctrl_qubits: Optional[int]=1, label: Optional[str]=None, ctrl_state: Optional[Union[(int, str)]]=None): "Return controlled version of gate. See :class:`.ControlledGate` for usage.\n\n Args:\n num_ctrl_qubits: number of controls to add to gate (default=1)\n label: optional gate label\n ctrl_state: The control state in decimal or as a bitstring\n (e.g. '111'). If None, use 2**num_ctrl_qubits-1.\n\n Returns:\n qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm\n uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size\n num_qubits + 2*num_ctrl_qubits - 1.\n\n Raises:\n QiskitError: unrecognized mode or invalid ctrl_state\n " from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state)
-3,450,574,199,674,920,400
Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state
qiskit/circuit/gate.py
control
Blacksmith-qi/qiskit-terra
python
def control(self, num_ctrl_qubits: Optional[int]=1, label: Optional[str]=None, ctrl_state: Optional[Union[(int, str)]]=None): "Return controlled version of gate. See :class:`.ControlledGate` for usage.\n\n Args:\n num_ctrl_qubits: number of controls to add to gate (default=1)\n label: optional gate label\n ctrl_state: The control state in decimal or as a bitstring\n (e.g. '111'). If None, use 2**num_ctrl_qubits-1.\n\n Returns:\n qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm\n uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size\n num_qubits + 2*num_ctrl_qubits - 1.\n\n Raises:\n QiskitError: unrecognized mode or invalid ctrl_state\n " from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state)
@staticmethod def _broadcast_single_argument(qarg: List) -> List: 'Expands a single argument.\n\n For example: [q[0], q[1]] -> [q[0]], [q[1]]\n ' for arg0 in qarg: (yield ([arg0], []))
-4,360,610,716,977,950,000
Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]]
qiskit/circuit/gate.py
_broadcast_single_argument
Blacksmith-qi/qiskit-terra
python
@staticmethod def _broadcast_single_argument(qarg: List) -> List: 'Expands a single argument.\n\n For example: [q[0], q[1]] -> [q[0]], [q[1]]\n ' for arg0 in qarg: (yield ([arg0], []))
def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[(List, List)]: 'Validation and handling of the arguments and its relationship.\n\n For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This\n method yields the arguments in the right grouping. In the given example::\n\n in: [[q[0],q[1]], q[2]],[]\n outs: [q[0], q[2]], []\n [q[1], q[2]], []\n\n The general broadcasting rules are:\n\n * If len(qargs) == 0000:0000:0000:0000:0000:0000:0000:0000\n\n [q[0], q[1]] -> [q[0]],[q[1]]\n\n * If len(qargs) == 0000:0000:0000:0000:0000:0000:0000:0000\n\n [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]]\n [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]]\n [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]]\n\n * If len(qargs) >= 0000:0000:0000:0000:0000:0000:0000:0000\n\n [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...]\n\n Args:\n qargs: List of quantum bit arguments.\n cargs: List of classical bit arguments.\n\n Returns:\n A tuple with single arguments.\n\n Raises:\n CircuitError: If the input is not valid. For example, the number of\n arguments does not match the gate expectation.\n ' if ((len(qargs) != self.num_qubits) or cargs): raise CircuitError(f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does not match the gate expectation ({self.num_qubits}).') if any(((not qarg) for qarg in qargs)): raise CircuitError('One or more of the arguments are empty') if (len(qargs) == 1): return Gate._broadcast_single_argument(qargs[0]) elif (len(qargs) == 2): return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif (len(qargs) >= 3): return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError(('This gate cannot handle %i arguments' % len(qargs)))
-2,935,113,582,535,424,000
Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 0000:0000:0000:0000:0000:0000:0000:0000 [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 0000:0000:0000:0000:0000:0000:0000:0000 [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 0000:0000:0000:0000:0000:0000:0000:0000 [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation.
qiskit/circuit/gate.py
broadcast_arguments
Blacksmith-qi/qiskit-terra
python
def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[(List, List)]: 'Validation and handling of the arguments and its relationship.\n\n For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This\n method yields the arguments in the right grouping. In the given example::\n\n in: [[q[0],q[1]], q[2]],[]\n outs: [q[0], q[2]], []\n [q[1], q[2]], []\n\n The general broadcasting rules are:\n\n * If len(qargs) == 0000:0000:0000:0000:0000:0000:0000:0000\n\n [q[0], q[1]] -> [q[0]],[q[1]]\n\n * If len(qargs) == 0000:0000:0000:0000:0000:0000:0000:0000\n\n [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]]\n [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]]\n [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]]\n\n * If len(qargs) >= 0000:0000:0000:0000:0000:0000:0000:0000\n\n [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...]\n\n Args:\n qargs: List of quantum bit arguments.\n cargs: List of classical bit arguments.\n\n Returns:\n A tuple with single arguments.\n\n Raises:\n CircuitError: If the input is not valid. For example, the number of\n arguments does not match the gate expectation.\n ' if ((len(qargs) != self.num_qubits) or cargs): raise CircuitError(f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does not match the gate expectation ({self.num_qubits}).') if any(((not qarg) for qarg in qargs)): raise CircuitError('One or more of the arguments are empty') if (len(qargs) == 1): return Gate._broadcast_single_argument(qargs[0]) elif (len(qargs) == 2): return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif (len(qargs) >= 3): return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError(('This gate cannot handle %i arguments' % len(qargs)))
def validate_parameter(self, parameter): 'Gate parameters should be int, float, or ParameterExpression' if isinstance(parameter, ParameterExpression): if (len(parameter.parameters) > 0): return parameter if (not parameter._symbol_expr.is_real): raise CircuitError('Bound parameter expression is complex in gate {}'.format(self.name)) return parameter if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn(('Gate param type %s is being deprecated as of 0.16.0, and will be removed no earlier than 3 months after that release date. Considering creating your own Gate subclass with the method validate_parameter to allow this param type.' % type(parameter)), DeprecationWarning, 3) return parameter else: raise CircuitError('Invalid param type {0} for gate {1}.'.format(type(parameter), self.name))
-5,746,868,024,655,658,000
Gate parameters should be int, float, or ParameterExpression
qiskit/circuit/gate.py
validate_parameter
Blacksmith-qi/qiskit-terra
python
def validate_parameter(self, parameter): if isinstance(parameter, ParameterExpression): if (len(parameter.parameters) > 0): return parameter if (not parameter._symbol_expr.is_real): raise CircuitError('Bound parameter expression is complex in gate {}'.format(self.name)) return parameter if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn(('Gate param type %s is being deprecated as of 0.16.0, and will be removed no earlier than 3 months after that release date. Considering creating your own Gate subclass with the method validate_parameter to allow this param type.' % type(parameter)), DeprecationWarning, 3) return parameter else: raise CircuitError('Invalid param type {0} for gate {1}.'.format(type(parameter), self.name))
def weighted_neighbors_loss(train_data, valid_data, kernel): 'Computes the negative log prob per data point.' (X_train, T_train) = train_data (X_valid, T_valid) = valid_data weight_mat = kernel(X_valid, X_train) label_probs = np.dot(weight_mat, T_train) label_probs = (label_probs / np.sum(label_probs, axis=1, keepdims=True)) mean_neg_log_prob = (- np.mean(np.log(np.sum((label_probs * T_valid), axis=1)), axis=0)) return mean_neg_log_prob
3,900,170,860,203,776,000
Computes the negative log prob per data point.
cpu_ver/hypergrad/kernel_methods.py
weighted_neighbors_loss
LinZichuan/drmad
python
def weighted_neighbors_loss(train_data, valid_data, kernel): (X_train, T_train) = train_data (X_valid, T_valid) = valid_data weight_mat = kernel(X_valid, X_train) label_probs = np.dot(weight_mat, T_train) label_probs = (label_probs / np.sum(label_probs, axis=1, keepdims=True)) mean_neg_log_prob = (- np.mean(np.log(np.sum((label_probs * T_valid), axis=1)), axis=0)) return mean_neg_log_prob
def parse_args(): 'PARAMETERS' parser = argparse.ArgumentParser('training') parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device') parser.add_argument('--batch_size', type=int, default=8, help='batch size in training') parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]') parser.add_argument('--num_category', default=12, type=int, help='training on real dataset') parser.add_argument('--epoch', default=20, type=int, help='number of epoch in training') parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training') parser.add_argument('--num_point', type=int, default=1024, help='Point Number') parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training') parser.add_argument('--log_dir', type=str, default=None, help='experiment root') parser.add_argument('--decay_rate', type=float, default=0.0001, help='decay rate') parser.add_argument('--use_normals', action='store_true', default=False, help='use normals') parser.add_argument('--process_data', action='store_true', default=False, help='save data offline') parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling') parser.add_argument('--num_sparse_point', type=int, default=50, help='Point Number for domain loss') parser.add_argument('--random_choose_sparse', type=bool, default=False, help='Random select num_sparse_point from [10,20,30,40,50]') parser.add_argument('--SO3_Rotation', action='store_true', default=False, help='arbitrary rotation in SO3') parser.add_argument('--DA_method', type=str, default='multi_coral_mmd', help='choose the DA loss function') parser.add_argument('--alpha', type=float, default=10, help='set the value of classification loss') parser.add_argument('--lamda', type=float, default=10, help='set the value of CORAL loss') parser.add_argument('--beta', type=float, default=10, help='set the value of MMD loss') return parser.parse_args()
7,232,738,429,550,530,000
PARAMETERS
train_realMulti-DA-Loss_classification.py
parse_args
congw112358/Pointnet_Pointnet2_pytorch
python
def parse_args(): parser = argparse.ArgumentParser('training') parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device') parser.add_argument('--batch_size', type=int, default=8, help='batch size in training') parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]') parser.add_argument('--num_category', default=12, type=int, help='training on real dataset') parser.add_argument('--epoch', default=20, type=int, help='number of epoch in training') parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training') parser.add_argument('--num_point', type=int, default=1024, help='Point Number') parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training') parser.add_argument('--log_dir', type=str, default=None, help='experiment root') parser.add_argument('--decay_rate', type=float, default=0.0001, help='decay rate') parser.add_argument('--use_normals', action='store_true', default=False, help='use normals') parser.add_argument('--process_data', action='store_true', default=False, help='save data offline') parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling') parser.add_argument('--num_sparse_point', type=int, default=50, help='Point Number for domain loss') parser.add_argument('--random_choose_sparse', type=bool, default=False, help='Random select num_sparse_point from [10,20,30,40,50]') parser.add_argument('--SO3_Rotation', action='store_true', default=False, help='arbitrary rotation in SO3') parser.add_argument('--DA_method', type=str, default='multi_coral_mmd', help='choose the DA loss function') parser.add_argument('--alpha', type=float, default=10, help='set the value of classification loss') parser.add_argument('--lamda', type=float, default=10, help='set the value of CORAL loss') parser.add_argument('--beta', type=float, default=10, help='set the value of MMD loss') return parser.parse_args()
def stream(self, start_offset: int=0, shuffle: bool=False, skip_shuffle_at_epoch_end: bool=False, shuffle_seed: Optional[int]=None, shard_rank: int=0, num_shards: int=1, drop_shard_remainder: bool=False) -> yogadl.Stream: '\n Create a stream from a cache.\n ' if (shuffle and (not skip_shuffle_at_epoch_end)): assert (shuffle_seed is not None), 'Please set `shuffle_seed` if enabling `shuffle` and not enabling `skip_shuffle_at_epoch_end`.' generated_keys = self._shard_keys(shard_rank=shard_rank, num_shards=num_shards, drop_shard_remainder=drop_shard_remainder) generator_from_keys = yogadl.GeneratorFromKeys(keys=generated_keys, initial_offset=start_offset, read_val_from_key_fn=self._lmdb_access.read_value_by_key, shuffle_at_start=shuffle, shuffle_after_epoch=(shuffle and (not skip_shuffle_at_epoch_end)), shuffle_seed=shuffle_seed) return yogadl.Stream(iterator_fn=generator_from_keys.instantiate_generator, length=len(generated_keys), output_types=self._lmdb_access.get_types(), output_shapes=self._lmdb_access.get_shapes())
-904,865,744,401,641,300
Create a stream from a cache.
yogadl/dataref/_local_lmdb_dataref.py
stream
determined-ai/yogadl
python
def stream(self, start_offset: int=0, shuffle: bool=False, skip_shuffle_at_epoch_end: bool=False, shuffle_seed: Optional[int]=None, shard_rank: int=0, num_shards: int=1, drop_shard_remainder: bool=False) -> yogadl.Stream: '\n \n ' if (shuffle and (not skip_shuffle_at_epoch_end)): assert (shuffle_seed is not None), 'Please set `shuffle_seed` if enabling `shuffle` and not enabling `skip_shuffle_at_epoch_end`.' generated_keys = self._shard_keys(shard_rank=shard_rank, num_shards=num_shards, drop_shard_remainder=drop_shard_remainder) generator_from_keys = yogadl.GeneratorFromKeys(keys=generated_keys, initial_offset=start_offset, read_val_from_key_fn=self._lmdb_access.read_value_by_key, shuffle_at_start=shuffle, shuffle_after_epoch=(shuffle and (not skip_shuffle_at_epoch_end)), shuffle_seed=shuffle_seed) return yogadl.Stream(iterator_fn=generator_from_keys.instantiate_generator, length=len(generated_keys), output_types=self._lmdb_access.get_types(), output_shapes=self._lmdb_access.get_shapes())
def __init__(self, title=None, url=None, latest_comment_url=None, type=None): 'UserNotificationSubject - a model defined in Swagger' self._title = None self._url = None self._latest_comment_url = None self._type = None self.discriminator = None if (title is not None): self.title = title if (url is not None): self.url = url if (latest_comment_url is not None): self.latest_comment_url = latest_comment_url if (type is not None): self.type = type
-7,562,347,988,598,502,000
UserNotificationSubject - a model defined in Swagger
gitee/models/user_notification_subject.py
__init__
pygitee/pygitee
python
def __init__(self, title=None, url=None, latest_comment_url=None, type=None): self._title = None self._url = None self._latest_comment_url = None self._type = None self.discriminator = None if (title is not None): self.title = title if (url is not None): self.url = url if (latest_comment_url is not None): self.latest_comment_url = latest_comment_url if (type is not None): self.type = type
@property def title(self): 'Gets the title of this UserNotificationSubject. # noqa: E501\n\n\n :return: The title of this UserNotificationSubject. # noqa: E501\n :rtype: str\n ' return self._title
6,033,907,202,783,320,000
Gets the title of this UserNotificationSubject. # noqa: E501 :return: The title of this UserNotificationSubject. # noqa: E501 :rtype: str
gitee/models/user_notification_subject.py
title
pygitee/pygitee
python
@property def title(self): 'Gets the title of this UserNotificationSubject. # noqa: E501\n\n\n :return: The title of this UserNotificationSubject. # noqa: E501\n :rtype: str\n ' return self._title
@title.setter def title(self, title): 'Sets the title of this UserNotificationSubject.\n\n\n :param title: The title of this UserNotificationSubject. # noqa: E501\n :type: str\n ' self._title = title
5,592,689,324,592,237,000
Sets the title of this UserNotificationSubject. :param title: The title of this UserNotificationSubject. # noqa: E501 :type: str
gitee/models/user_notification_subject.py
title
pygitee/pygitee
python
@title.setter def title(self, title): 'Sets the title of this UserNotificationSubject.\n\n\n :param title: The title of this UserNotificationSubject. # noqa: E501\n :type: str\n ' self._title = title
@property def url(self): 'Gets the url of this UserNotificationSubject. # noqa: E501\n\n\n :return: The url of this UserNotificationSubject. # noqa: E501\n :rtype: str\n ' return self._url
4,854,045,996,556,589,000
Gets the url of this UserNotificationSubject. # noqa: E501 :return: The url of this UserNotificationSubject. # noqa: E501 :rtype: str
gitee/models/user_notification_subject.py
url
pygitee/pygitee
python
@property def url(self): 'Gets the url of this UserNotificationSubject. # noqa: E501\n\n\n :return: The url of this UserNotificationSubject. # noqa: E501\n :rtype: str\n ' return self._url
@url.setter def url(self, url): 'Sets the url of this UserNotificationSubject.\n\n\n :param url: The url of this UserNotificationSubject. # noqa: E501\n :type: str\n ' self._url = url
-3,394,678,449,273,618,000
Sets the url of this UserNotificationSubject. :param url: The url of this UserNotificationSubject. # noqa: E501 :type: str
gitee/models/user_notification_subject.py
url
pygitee/pygitee
python
@url.setter def url(self, url): 'Sets the url of this UserNotificationSubject.\n\n\n :param url: The url of this UserNotificationSubject. # noqa: E501\n :type: str\n ' self._url = url
@property def latest_comment_url(self): 'Gets the latest_comment_url of this UserNotificationSubject. # noqa: E501\n\n\n :return: The latest_comment_url of this UserNotificationSubject. # noqa: E501\n :rtype: str\n ' return self._latest_comment_url
6,100,275,455,326,668,000
Gets the latest_comment_url of this UserNotificationSubject. # noqa: E501 :return: The latest_comment_url of this UserNotificationSubject. # noqa: E501 :rtype: str
gitee/models/user_notification_subject.py
latest_comment_url
pygitee/pygitee
python
@property def latest_comment_url(self): 'Gets the latest_comment_url of this UserNotificationSubject. # noqa: E501\n\n\n :return: The latest_comment_url of this UserNotificationSubject. # noqa: E501\n :rtype: str\n ' return self._latest_comment_url
@latest_comment_url.setter def latest_comment_url(self, latest_comment_url): 'Sets the latest_comment_url of this UserNotificationSubject.\n\n\n :param latest_comment_url: The latest_comment_url of this UserNotificationSubject. # noqa: E501\n :type: str\n ' self._latest_comment_url = latest_comment_url
-4,382,607,989,924,463,600
Sets the latest_comment_url of this UserNotificationSubject. :param latest_comment_url: The latest_comment_url of this UserNotificationSubject. # noqa: E501 :type: str
gitee/models/user_notification_subject.py
latest_comment_url
pygitee/pygitee
python
@latest_comment_url.setter def latest_comment_url(self, latest_comment_url): 'Sets the latest_comment_url of this UserNotificationSubject.\n\n\n :param latest_comment_url: The latest_comment_url of this UserNotificationSubject. # noqa: E501\n :type: str\n ' self._latest_comment_url = latest_comment_url
@property def type(self): 'Gets the type of this UserNotificationSubject. # noqa: E501\n\n\n :return: The type of this UserNotificationSubject. # noqa: E501\n :rtype: str\n ' return self._type
-4,663,851,688,619,458,000
Gets the type of this UserNotificationSubject. # noqa: E501 :return: The type of this UserNotificationSubject. # noqa: E501 :rtype: str
gitee/models/user_notification_subject.py
type
pygitee/pygitee
python
@property def type(self): 'Gets the type of this UserNotificationSubject. # noqa: E501\n\n\n :return: The type of this UserNotificationSubject. # noqa: E501\n :rtype: str\n ' return self._type
@type.setter def type(self, type): 'Sets the type of this UserNotificationSubject.\n\n\n :param type: The type of this UserNotificationSubject. # noqa: E501\n :type: str\n ' self._type = type
-1,639,869,060,860,430,600
Sets the type of this UserNotificationSubject. :param type: The type of this UserNotificationSubject. # noqa: E501 :type: str
gitee/models/user_notification_subject.py
type
pygitee/pygitee
python
@type.setter def type(self, type): 'Sets the type of this UserNotificationSubject.\n\n\n :param type: The type of this UserNotificationSubject. # noqa: E501\n :type: str\n ' self._type = type
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(UserNotificationSubject, dict): for (key, value) in self.items(): result[key] = value return result
-2,232,206,385,540,422,100
Returns the model properties as a dict
gitee/models/user_notification_subject.py
to_dict
pygitee/pygitee
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(UserNotificationSubject, dict): for (key, value) in self.items(): result[key] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
gitee/models/user_notification_subject.py
to_str
pygitee/pygitee
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
gitee/models/user_notification_subject.py
__repr__
pygitee/pygitee
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, UserNotificationSubject)): return False return (self.__dict__ == other.__dict__)
978,677,337,168,194,600
Returns true if both objects are equal
gitee/models/user_notification_subject.py
__eq__
pygitee/pygitee
python
def __eq__(self, other): if (not isinstance(other, UserNotificationSubject)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
7,764,124,047,908,058,000
Returns true if both objects are not equal
gitee/models/user_notification_subject.py
__ne__
pygitee/pygitee
python
def __ne__(self, other): return (not (self == other))
def memoize(f): 'Memoization decorator for functions taking one or more arguments.' class Memo(dict): def __init__(self, f): super(Memo, self).__init__() self.f = f def __call__(self, *args): return self[args] def __missing__(self, key): ret = self[key] = self.f(*key) return ret return Memo(f)
-1,536,678,804,841,662,500
Memoization decorator for functions taking one or more arguments.
lib/webports/util.py
memoize
DiamondLovesYou/webports
python
def memoize(f): class Memo(dict): def __init__(self, f): super(Memo, self).__init__() self.f = f def __call__(self, *args): return self[args] def __missing__(self, key): ret = self[key] = self.f(*key) return ret return Memo(f)
def log(message, verbosity=LOG_INFO): 'Log a message to the console (stdout).' if (log_level < verbosity): return sys.stdout.write((str(message) + '\n')) sys.stdout.flush()
5,817,673,250,203,344,000
Log a message to the console (stdout).
lib/webports/util.py
log
DiamondLovesYou/webports
python
def log(message, verbosity=LOG_INFO): if (log_level < verbosity): return sys.stdout.write((str(message) + '\n')) sys.stdout.flush()
def log_heading(message, suffix=''): 'Log a colored/highlighted message with optional suffix.' if colorize.enabled: log((colorize(message, 'green') + suffix)) elif (log_level > LOG_WARN): log('###################################################################') log((message + suffix)) log('###################################################################') else: log((message + suffix))
-8,274,271,697,911,984,000
Log a colored/highlighted message with optional suffix.
lib/webports/util.py
log_heading
DiamondLovesYou/webports
python
def log_heading(message, suffix=): if colorize.enabled: log((colorize(message, 'green') + suffix)) elif (log_level > LOG_WARN): log('###################################################################') log((message + suffix)) log('###################################################################') else: log((message + suffix))
def find_in_path(command_name): "Search user's PATH for a given executable.\n\n Returns:\n Full path to executable.\n " extensions = ('',) if ((not os.path.splitext(command_name)[1]) and (os.name == 'nt')): extensions = ('.bat', '.com', '.exe') for path in os.environ.get('PATH', '').split(os.pathsep): for ext in extensions: full_name = os.path.join(path, (command_name + ext)) if (os.path.exists(full_name) and os.path.isfile(full_name)): return full_name raise error.Error(('command not found: %s' % command_name))
-9,210,549,876,342,332,000
Search user's PATH for a given executable. Returns: Full path to executable.
lib/webports/util.py
find_in_path
DiamondLovesYou/webports
python
def find_in_path(command_name): "Search user's PATH for a given executable.\n\n Returns:\n Full path to executable.\n " extensions = (,) if ((not os.path.splitext(command_name)[1]) and (os.name == 'nt')): extensions = ('.bat', '.com', '.exe') for path in os.environ.get('PATH', ).split(os.pathsep): for ext in extensions: full_name = os.path.join(path, (command_name + ext)) if (os.path.exists(full_name) and os.path.isfile(full_name)): return full_name raise error.Error(('command not found: %s' % command_name))
def download_file(filename, url): 'Download a file from a given URL.\n\n Args:\n filename: the name of the file to download the URL to.\n url: then URL to fetch.\n ' temp_filename = (filename + '.partial') find_in_path('curl') curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o', temp_filename] if (hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno())): curl_cmd.append('--progress-bar') else: curl_cmd += ['--silent', '--show-error'] curl_cmd.append(url) if (log_level > LOG_WARN): log(('Downloading: %s [%s]' % (url, filename))) else: log(('Downloading: %s' % url.replace(GS_URL, ''))) try: subprocess.check_call(curl_cmd) except subprocess.CalledProcessError as e: raise error.Error(('Error downloading file: %s' % str(e))) os.rename(temp_filename, filename)
-12,641,727,738,878,662
Download a file from a given URL. Args: filename: the name of the file to download the URL to. url: then URL to fetch.
lib/webports/util.py
download_file
DiamondLovesYou/webports
python
def download_file(filename, url): 'Download a file from a given URL.\n\n Args:\n filename: the name of the file to download the URL to.\n url: then URL to fetch.\n ' temp_filename = (filename + '.partial') find_in_path('curl') curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o', temp_filename] if (hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno())): curl_cmd.append('--progress-bar') else: curl_cmd += ['--silent', '--show-error'] curl_cmd.append(url) if (log_level > LOG_WARN): log(('Downloading: %s [%s]' % (url, filename))) else: log(('Downloading: %s' % url.replace(GS_URL, ))) try: subprocess.check_call(curl_cmd) except subprocess.CalledProcessError as e: raise error.Error(('Error downloading file: %s' % str(e))) os.rename(temp_filename, filename)
def check_stamp(filename, contents=None): 'Check that a given stamp file is up-to-date.\n\n Returns: False is the file does not exists or is older that that given\n comparison file, or does not contain the given contents. True otherwise.\n ' if (not os.path.exists(filename)): return False if (contents is not None): with open(filename) as f: if (not f.read().startswith(contents)): return False return True
-4,632,564,270,663,111,000
Check that a given stamp file is up-to-date. Returns: False is the file does not exists or is older that that given comparison file, or does not contain the given contents. True otherwise.
lib/webports/util.py
check_stamp
DiamondLovesYou/webports
python
def check_stamp(filename, contents=None): 'Check that a given stamp file is up-to-date.\n\n Returns: False is the file does not exists or is older that that given\n comparison file, or does not contain the given contents. True otherwise.\n ' if (not os.path.exists(filename)): return False if (contents is not None): with open(filename) as f: if (not f.read().startswith(contents)): return False return True
@memoize def get_sdk_root(): 'Returns the root of the currently configured Native Client SDK.' root = os.environ.get('NACL_SDK_ROOT') if (root is None): local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk') if os.path.exists(local_sdk_root): root = local_sdk_root else: raise error.Error('$NACL_SDK_ROOT not set') if (sys.platform == 'cygwin'): root = root.replace('\\', '/') return root
-2,800,879,476,471,468,000
Returns the root of the currently configured Native Client SDK.
lib/webports/util.py
get_sdk_root
DiamondLovesYou/webports
python
@memoize def get_sdk_root(): root = os.environ.get('NACL_SDK_ROOT') if (root is None): local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk') if os.path.exists(local_sdk_root): root = local_sdk_root else: raise error.Error('$NACL_SDK_ROOT not set') if (sys.platform == 'cygwin'): root = root.replace('\\', '/') return root
@memoize def get_sdk_version(): 'Returns the version (as a string) of the current SDK.' getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-version']).strip() return version
8,809,473,329,846,702,000
Returns the version (as a string) of the current SDK.
lib/webports/util.py
get_sdk_version
DiamondLovesYou/webports
python
@memoize def get_sdk_version(): getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-version']).strip() return version
def check_sdk_version(version): "Returns True if the currently configured SDK is 'version' or above." return (int(get_sdk_version()) >= int(version))
-9,164,487,087,495,700,000
Returns True if the currently configured SDK is 'version' or above.
lib/webports/util.py
check_sdk_version
DiamondLovesYou/webports
python
def check_sdk_version(version): return (int(get_sdk_version()) >= int(version))
@memoize def get_sdk_revision(): 'Returns the revision of the currently configured Native Client SDK.' getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-revision']).strip() return int(version)
164,433,028,036,483,260
Returns the revision of the currently configured Native Client SDK.
lib/webports/util.py
get_sdk_revision
DiamondLovesYou/webports
python
@memoize def get_sdk_revision(): getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-revision']).strip() return int(version)
@memoize def get_platform(): 'Returns the current platform name according getos.py.' getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') platform = subprocess.check_output([getos]).strip() return platform
-7,034,581,924,587,687,000
Returns the current platform name according getos.py.
lib/webports/util.py
get_platform
DiamondLovesYou/webports
python
@memoize def get_platform(): getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') platform = subprocess.check_output([getos]).strip() return platform
@memoize def get_toolchain_root(config): 'Returns the toolchain folder for a given NaCl toolchain.' if (config.toolchain == 'emscripten'): return get_emscripten_root() platform = get_platform() if (config.toolchain in ('pnacl', 'clang-newlib')): tc_dir = os.path.join(('%s_pnacl' % platform)) else: tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch] tc_dir = ('%s_%s_%s' % (platform, tc_arch, config.libc)) return os.path.join(get_sdk_root(), 'toolchain', tc_dir)
1,493,153,723,794,381,000
Returns the toolchain folder for a given NaCl toolchain.
lib/webports/util.py
get_toolchain_root
DiamondLovesYou/webports
python
@memoize def get_toolchain_root(config): if (config.toolchain == 'emscripten'): return get_emscripten_root() platform = get_platform() if (config.toolchain in ('pnacl', 'clang-newlib')): tc_dir = os.path.join(('%s_pnacl' % platform)) else: tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch] tc_dir = ('%s_%s_%s' % (platform, tc_arch, config.libc)) return os.path.join(get_sdk_root(), 'toolchain', tc_dir)
@memoize def get_install_root(config): 'Returns the install location given a build configuration.' tc_dir = get_toolchain_root(config) if (config.toolchain == 'emscripten'): return os.path.join(tc_dir, 'system', 'local') if (config.toolchain == 'pnacl'): tc_dir = os.path.join(tc_dir, 'le32-nacl') else: tc_dir = os.path.join(tc_dir, ('%s-nacl' % config.arch)) return os.path.join(tc_dir, 'usr')
8,301,899,291,888,007,000
Returns the install location given a build configuration.
lib/webports/util.py
get_install_root
DiamondLovesYou/webports
python
@memoize def get_install_root(config): tc_dir = get_toolchain_root(config) if (config.toolchain == 'emscripten'): return os.path.join(tc_dir, 'system', 'local') if (config.toolchain == 'pnacl'): tc_dir = os.path.join(tc_dir, 'le32-nacl') else: tc_dir = os.path.join(tc_dir, ('%s-nacl' % config.arch)) return os.path.join(tc_dir, 'usr')
@memoize def get_install_stamp_root(config): 'Returns the installation metadata folder for the give configuration.' tc_root = get_install_root(config) return os.path.join(tc_root, 'var', 'lib', 'npkg')
-5,050,390,960,978,637,000
Returns the installation metadata folder for the give configuration.
lib/webports/util.py
get_install_stamp_root
DiamondLovesYou/webports
python
@memoize def get_install_stamp_root(config): tc_root = get_install_root(config) return os.path.join(tc_root, 'var', 'lib', 'npkg')
def get_install_stamp(package_name, config): 'Returns the filename of the install stamp for for a given package.\n\n This file is written at install time and contains metadata\n about the installed package.\n ' root = get_install_stamp_root(config) return os.path.join(root, (package_name + '.info'))
-550,498,234,523,841,660
Returns the filename of the install stamp for for a given package. This file is written at install time and contains metadata about the installed package.
lib/webports/util.py
get_install_stamp
DiamondLovesYou/webports
python
def get_install_stamp(package_name, config): 'Returns the filename of the install stamp for for a given package.\n\n This file is written at install time and contains metadata\n about the installed package.\n ' root = get_install_stamp_root(config) return os.path.join(root, (package_name + '.info'))
def get_list_file(package_name, config): 'Returns the filename of the list of installed files for a given package.\n\n This file is written at install time.\n ' root = get_install_stamp_root(config) return os.path.join(root, (package_name + '.list'))
-8,665,224,385,076,447,000
Returns the filename of the list of installed files for a given package. This file is written at install time.
lib/webports/util.py
get_list_file
DiamondLovesYou/webports
python
def get_list_file(package_name, config): 'Returns the filename of the list of installed files for a given package.\n\n This file is written at install time.\n ' root = get_install_stamp_root(config) return os.path.join(root, (package_name + '.list'))
def is_installed(package_name, config, stamp_content=None): 'Returns True if the given package is installed.' stamp = get_install_stamp(package_name, config) result = check_stamp(stamp, stamp_content) return result
4,999,615,864,298,275,000
Returns True if the given package is installed.
lib/webports/util.py
is_installed
DiamondLovesYou/webports
python
def is_installed(package_name, config, stamp_content=None): stamp = get_install_stamp(package_name, config) result = check_stamp(stamp, stamp_content) return result
def check_sdk_root(): 'Check validity of NACL_SDK_ROOT.' root = get_sdk_root() if (not os.path.isdir(root)): raise error.Error(('$NACL_SDK_ROOT does not exist: %s' % root)) landmark = os.path.join(root, 'tools', 'getos.py') if (not os.path.exists(landmark)): raise error.Error(("$NACL_SDK_ROOT (%s) doesn't look right. Couldn't find landmark file (%s)" % (root, landmark))) if (not check_sdk_version(MIN_SDK_VERSION)): raise error.Error(('This version of webports requires at least version %s of\nthe NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\nto use webports with an older version of the SDK please checkout\none of the pepper_XX branches (or run with\n--skip-sdk-version-check).' % (MIN_SDK_VERSION, get_sdk_version())))
-3,336,221,697,445,436,400
Check validity of NACL_SDK_ROOT.
lib/webports/util.py
check_sdk_root
DiamondLovesYou/webports
python
def check_sdk_root(): root = get_sdk_root() if (not os.path.isdir(root)): raise error.Error(('$NACL_SDK_ROOT does not exist: %s' % root)) landmark = os.path.join(root, 'tools', 'getos.py') if (not os.path.exists(landmark)): raise error.Error(("$NACL_SDK_ROOT (%s) doesn't look right. Couldn't find landmark file (%s)" % (root, landmark))) if (not check_sdk_version(MIN_SDK_VERSION)): raise error.Error(('This version of webports requires at least version %s of\nthe NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\nto use webports with an older version of the SDK please checkout\none of the pepper_XX branches (or run with\n--skip-sdk-version-check).' % (MIN_SDK_VERSION, get_sdk_version())))
def hash_file(filename): 'Return the SHA1 (in hex format) of the contents of the given file.' block_size = (100 * 1024) sha1 = hashlib.sha1() with open(filename) as f: while True: data = f.read(block_size) if (not data): break sha1.update(data) return sha1.hexdigest()
-6,489,478,050,917,186,000
Return the SHA1 (in hex format) of the contents of the given file.
lib/webports/util.py
hash_file
DiamondLovesYou/webports
python
def hash_file(filename): block_size = (100 * 1024) sha1 = hashlib.sha1() with open(filename) as f: while True: data = f.read(block_size) if (not data): break sha1.update(data) return sha1.hexdigest()
def verify_hash(filename, sha1): 'Return True if the sha1 of the given file match the sha1 passed in.' file_sha1 = hash_file(filename) if (sha1 != file_sha1): raise HashVerificationError(('verification failed: %s\nExpected: %s\nActual: %s' % (filename, sha1, file_sha1)))
-2,977,212,297,900,223,000
Return True if the sha1 of the given file match the sha1 passed in.
lib/webports/util.py
verify_hash
DiamondLovesYou/webports
python
def verify_hash(filename, sha1): file_sha1 = hash_file(filename) if (sha1 != file_sha1): raise HashVerificationError(('verification failed: %s\nExpected: %s\nActual: %s' % (filename, sha1, file_sha1)))
def remove_tree(directory): 'Recursively remove a directory and its contents.' if (not os.path.exists(directory)): return if (not os.path.isdir(directory)): raise error.Error('RemoveTree: not a directory: %s', directory) shutil.rmtree(directory)
-2,689,833,118,508,521,000
Recursively remove a directory and its contents.
lib/webports/util.py
remove_tree
DiamondLovesYou/webports
python
def remove_tree(directory): if (not os.path.exists(directory)): return if (not os.path.isdir(directory)): raise error.Error('RemoveTree: not a directory: %s', directory) shutil.rmtree(directory)
def rel_path(filename): 'Return a pathname relative to the root the webports src tree.\n\n This is used mostly to make output more readable when printing filenames.' return os.path.relpath(filename, paths.NACLPORTS_ROOT)
3,528,904,553,062,792,000
Return a pathname relative to the root the webports src tree. This is used mostly to make output more readable when printing filenames.
lib/webports/util.py
rel_path
DiamondLovesYou/webports
python
def rel_path(filename): 'Return a pathname relative to the root the webports src tree.\n\n This is used mostly to make output more readable when printing filenames.' return os.path.relpath(filename, paths.NACLPORTS_ROOT)
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]]=None, user_settings_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None): "\n Response to get user settings\n API Version: 2018-10-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties.\n :param pulumi.Input[str] user_settings_name: The name of the user settings\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if ((properties is None) and (not opts.urn)): raise TypeError("Missing required property 'properties'") __props__['properties'] = properties __props__['user_settings_name'] = user_settings_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:portal:UserSettings'), pulumi.Alias(type_='azure-native:portal/latest:UserSettings'), pulumi.Alias(type_='azure-nextgen:portal/latest:UserSettings'), pulumi.Alias(type_='azure-native:portal/v20181001:UserSettings'), pulumi.Alias(type_='azure-nextgen:portal/v20181001:UserSettings')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(UserSettings, __self__).__init__('azure-native:portal:UserSettings', resource_name, __props__, opts)
-4,359,774,321,173,679,000
Response to get user settings API Version: 2018-10-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties. :param pulumi.Input[str] user_settings_name: The name of the user settings
sdk/python/pulumi_azure_native/portal/user_settings.py
__init__
pulumi-bot/pulumi-azure-native
python
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]]=None, user_settings_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None): "\n Response to get user settings\n API Version: 2018-10-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties.\n :param pulumi.Input[str] user_settings_name: The name of the user settings\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if ((properties is None) and (not opts.urn)): raise TypeError("Missing required property 'properties'") __props__['properties'] = properties __props__['user_settings_name'] = user_settings_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:portal:UserSettings'), pulumi.Alias(type_='azure-native:portal/latest:UserSettings'), pulumi.Alias(type_='azure-nextgen:portal/latest:UserSettings'), pulumi.Alias(type_='azure-native:portal/v20181001:UserSettings'), pulumi.Alias(type_='azure-nextgen:portal/v20181001:UserSettings')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(UserSettings, __self__).__init__('azure-native:portal:UserSettings', resource_name, __props__, opts)
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'UserSettings': "\n Get an existing UserSettings resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['properties'] = None return UserSettings(resource_name, opts=opts, __props__=__props__)
-6,685,719,736,812,812,000
Get an existing UserSettings resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_azure_native/portal/user_settings.py
get
pulumi-bot/pulumi-azure-native
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'UserSettings': "\n Get an existing UserSettings resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['properties'] = None return UserSettings(resource_name, opts=opts, __props__=__props__)
@property @pulumi.getter def properties(self) -> pulumi.Output['outputs.UserPropertiesResponse']: '\n The cloud shell user settings properties.\n ' return pulumi.get(self, 'properties')
899,342,624,073,554,000
The cloud shell user settings properties.
sdk/python/pulumi_azure_native/portal/user_settings.py
properties
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def properties(self) -> pulumi.Output['outputs.UserPropertiesResponse']: '\n \n ' return pulumi.get(self, 'properties')
def __get_spike_trains(spike_trains): 'Make sure SpikeTrainsAPI object is always returned' if isinstance(spike_trains, six.string_types): return SpikeTrains.load(spike_trains) elif isinstance(spike_trains, (SpikeTrains, SpikeTrainsAPI)): return spike_trains raise AttributeError('Could not parse spiketrains. Pass in file-path, SpikeTrains object, or list of the previous')
-1,422,286,837,447,889,700
Make sure SpikeTrainsAPI object is always returned
bmtk/utils/reports/spike_trains/plotting.py
__get_spike_trains
chenziao/bmtk
python
def __get_spike_trains(spike_trains): if isinstance(spike_trains, six.string_types): return SpikeTrains.load(spike_trains) elif isinstance(spike_trains, (SpikeTrains, SpikeTrainsAPI)): return spike_trains raise AttributeError('Could not parse spiketrains. Pass in file-path, SpikeTrains object, or list of the previous')
def __get_population(spike_trains, population): 'Helper function to figure out which population of nodes to use.' pops = spike_trains.populations if (population is None): if (len(pops) > 1): raise Exception('SpikeTrains contains more than one population of nodes. Use "population" parameter to specify population to display.') else: return pops[0] elif (population not in pops): raise Exception('Could not find node population "{}" in SpikeTrains, only found {}'.format(population, pops)) else: return population
5,916,561,399,342,816,000
Helper function to figure out which population of nodes to use.
bmtk/utils/reports/spike_trains/plotting.py
__get_population
chenziao/bmtk
python
def __get_population(spike_trains, population): pops = spike_trains.populations if (population is None): if (len(pops) > 1): raise Exception('SpikeTrains contains more than one population of nodes. Use "population" parameter to specify population to display.') else: return pops[0] elif (population not in pops): raise Exception('Could not find node population "{}" in SpikeTrains, only found {}'.format(population, pops)) else: return population
def __get_node_groups(spike_trains, node_groups, population): "Helper function for parsing the 'node_groups' params" if (node_groups is None): selected_nodes = spike_trains.node_ids(population=population) return ([{'node_ids': selected_nodes, 'c': 'b'}], selected_nodes) else: node_groups = copy.deepcopy(node_groups) selected_nodes = np.array(node_groups[0]['node_ids']) for grp in node_groups[1:]: if ('node_ids' not in grp): raise AttributeError('Could not find "node_ids" key in node_groups parameter.') selected_nodes = np.concatenate((selected_nodes, np.array(grp['node_ids']))) return (node_groups, selected_nodes)
7,747,009,254,361,503,000
Helper function for parsing the 'node_groups' params
bmtk/utils/reports/spike_trains/plotting.py
__get_node_groups
chenziao/bmtk
python
def __get_node_groups(spike_trains, node_groups, population): if (node_groups is None): selected_nodes = spike_trains.node_ids(population=population) return ([{'node_ids': selected_nodes, 'c': 'b'}], selected_nodes) else: node_groups = copy.deepcopy(node_groups) selected_nodes = np.array(node_groups[0]['node_ids']) for grp in node_groups[1:]: if ('node_ids' not in grp): raise AttributeError('Could not find "node_ids" key in node_groups parameter.') selected_nodes = np.concatenate((selected_nodes, np.array(grp['node_ids']))) return (node_groups, selected_nodes)
def plot_raster(spike_trains, with_histogram=True, population=None, node_groups=None, times=None, title=None, show=True, save_as=None): "will create a raster plot (plus optional histogram) from a SpikeTrains object or SONATA Spike-Trains file. Will\n return the figure\n\n By default will display all nodes, if you want to only display a subset of nodes and/or group together different\n nodes (by node_id) by dot colors and labels then you can use the node_groups, which should be a list of dicts::\n\n plot_raster('/path/to/my/spike.h5',\n node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'}, # first 70 nodes are blue pyr cells\n {'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}]) # last 30 nodes are red inh cells\n\n The histogram will not be grouped.\n\n :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.\n :param with_histogram: If True the a histogram will be shown as a small subplot below the scatter plot. Default\n True.\n :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which\n nodes to actually plot. If only one population exists and population=None then the function will find it by\n default.\n :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should\n be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for\n label and color. If None all nodes will be labeled and colored the same.\n :param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking\n data.\n :param title: str, Use to add a title. Default no tile\n :param show: bool to display or not display plot. default True.\n :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not\n save plot.\n :return: matplotlib figure.Figure object\n " spike_trains = __get_spike_trains(spike_trains=spike_trains) pop = __get_population(spike_trains=spike_trains, population=population) (node_groups, selected_ids) = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop) show_legend = False (min_id, max_id) = (np.inf, (- 1)) spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False) spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)] if (times is not None): (min_ts, max_ts) = (times[0], times[1]) spikes_df = spikes_df[((spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1]))] else: min_ts = np.min(spikes_df['timestamps']) max_ts = np.max(spikes_df['timestamps']) if with_histogram: (fig, axes) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [7, 1]}, squeeze=True) raster_axes = axes[0] bottom_axes = hist_axes = axes[1] else: (fig, axes) = plt.subplots(1, 1) bottom_axes = raster_axes = axes hist_axes = None for node_grp in node_groups: grp_ids = node_grp.pop('node_ids') grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)] show_legend = (show_legend or ('label' in node_grp)) min_id = np.min([np.min(grp_ids), min_id]) max_id = np.max([np.max(grp_ids), max_id]) raster_axes.scatter(grp_spikes['timestamps'], grp_spikes['node_ids'], lw=0, s=8, **node_grp) if show_legend: raster_axes.legend(loc='upper right') if title: raster_axes.set_title(title) raster_axes.set_ylabel('node_ids') raster_axes.set_ylim((min_id - 0.5), (max_id + 1)) raster_axes.set_xlim(min_ts, (max_ts + 1)) bottom_axes.set_xlabel('timestamps ({})'.format(spike_trains.units(population=pop))) if with_histogram: hist_axes.hist(spikes_df['timestamps'], 100) hist_axes.set_xlim((min_ts - 0.5), (max_ts + 1)) hist_axes.axes.get_yaxis().set_visible(False) raster_axes.set_xticks([]) if save_as: plt.savefig(save_as) if show: plt.show() return fig
-5,944,393,047,132,877,000
will create a raster plot (plus optional histogram) from a SpikeTrains object or SONATA Spike-Trains file. Will return the figure By default will display all nodes, if you want to only display a subset of nodes and/or group together different nodes (by node_id) by dot colors and labels then you can use the node_groups, which should be a list of dicts:: plot_raster('/path/to/my/spike.h5', node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'}, # first 70 nodes are blue pyr cells {'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}]) # last 30 nodes are red inh cells The histogram will not be grouped. :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file. :param with_histogram: If True the a histogram will be shown as a small subplot below the scatter plot. Default True. :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which nodes to actually plot. If only one population exists and population=None then the function will find it by default. :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for label and color. If None all nodes will be labeled and colored the same. :param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking data. :param title: str, Use to add a title. Default no tile :param show: bool to display or not display plot. default True. :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not save plot. :return: matplotlib figure.Figure object
bmtk/utils/reports/spike_trains/plotting.py
plot_raster
chenziao/bmtk
python
def plot_raster(spike_trains, with_histogram=True, population=None, node_groups=None, times=None, title=None, show=True, save_as=None): "will create a raster plot (plus optional histogram) from a SpikeTrains object or SONATA Spike-Trains file. Will\n return the figure\n\n By default will display all nodes, if you want to only display a subset of nodes and/or group together different\n nodes (by node_id) by dot colors and labels then you can use the node_groups, which should be a list of dicts::\n\n plot_raster('/path/to/my/spike.h5',\n node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'}, # first 70 nodes are blue pyr cells\n {'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}]) # last 30 nodes are red inh cells\n\n The histogram will not be grouped.\n\n :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.\n :param with_histogram: If True the a histogram will be shown as a small subplot below the scatter plot. Default\n True.\n :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which\n nodes to actually plot. If only one population exists and population=None then the function will find it by\n default.\n :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should\n be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for\n label and color. If None all nodes will be labeled and colored the same.\n :param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking\n data.\n :param title: str, Use to add a title. Default no tile\n :param show: bool to display or not display plot. default True.\n :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not\n save plot.\n :return: matplotlib figure.Figure object\n " spike_trains = __get_spike_trains(spike_trains=spike_trains) pop = __get_population(spike_trains=spike_trains, population=population) (node_groups, selected_ids) = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop) show_legend = False (min_id, max_id) = (np.inf, (- 1)) spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False) spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)] if (times is not None): (min_ts, max_ts) = (times[0], times[1]) spikes_df = spikes_df[((spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1]))] else: min_ts = np.min(spikes_df['timestamps']) max_ts = np.max(spikes_df['timestamps']) if with_histogram: (fig, axes) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [7, 1]}, squeeze=True) raster_axes = axes[0] bottom_axes = hist_axes = axes[1] else: (fig, axes) = plt.subplots(1, 1) bottom_axes = raster_axes = axes hist_axes = None for node_grp in node_groups: grp_ids = node_grp.pop('node_ids') grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)] show_legend = (show_legend or ('label' in node_grp)) min_id = np.min([np.min(grp_ids), min_id]) max_id = np.max([np.max(grp_ids), max_id]) raster_axes.scatter(grp_spikes['timestamps'], grp_spikes['node_ids'], lw=0, s=8, **node_grp) if show_legend: raster_axes.legend(loc='upper right') if title: raster_axes.set_title(title) raster_axes.set_ylabel('node_ids') raster_axes.set_ylim((min_id - 0.5), (max_id + 1)) raster_axes.set_xlim(min_ts, (max_ts + 1)) bottom_axes.set_xlabel('timestamps ({})'.format(spike_trains.units(population=pop))) if with_histogram: hist_axes.hist(spikes_df['timestamps'], 100) hist_axes.set_xlim((min_ts - 0.5), (max_ts + 1)) hist_axes.axes.get_yaxis().set_visible(False) raster_axes.set_xticks([]) if save_as: plt.savefig(save_as) if show: plt.show() return fig
def plot_rates(spike_trains, population=None, node_groups=None, times=None, smoothing=False, smoothing_params=None, title=None, show=True, save_as=None): 'Calculate and plot the rates of each node in a SpikeTrains object or SONATA Spike-Trains file. If start and stop\n times are not specified from the "times" parameter, will try to parse values from the timestamps data.\n\n If you want to only display a subset of nodes and/or group together different nodes (by node_id) by dot colors and\n labels then you can use the node_groups, which should be a list of dicts::\n\n plot_rates(\'/path/to/my/spike.h5\',\n node_groups=[{\'node_ids\': range(0, 70), \'c\': \'b\', \'label\': \'pyr\'},\n {\'node_ids\': range(70, 100), \'c\': \'r\', \'label\': \'inh\'}])\n\n :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.\n :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which\n nodes to actually plot. If only one population exists and population=None then the function will find it by\n default.\n :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should\n be a dictionary with a \'node_ids\' key with a list of the ids. You can also add \'label\' and \'c\' keys for\n label and color. If None all nodes will be labeled and colored the same.\n :param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking\n data.\n :param smoothing: Bool or function. Used to smooth the data. By default (False) no smoothing will be done. If True\n will using a moving average smoothing function. Or use a function pointer.\n :param smoothing_params: dict, parameters when using a function pointer smoothing value.\n :param title: str, Use to add a title. Default no tile\n :param show: bool to display or not display plot. default True.\n :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not\n save plot.\n :return: matplotlib figure.Figure object\n ' spike_trains = __get_spike_trains(spike_trains=spike_trains) pop = __get_population(spike_trains=spike_trains, population=population) (node_groups, selected_ids) = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop) smoothing_params = (smoothing_params or {}) if isinstance(smoothing, types.FunctionType): smoothing_fnc = partial(smoothing, **smoothing_params) elif smoothing: smoothing_fnc = partial(moving_average, **smoothing_params) else: smoothing_fnc = (lambda d: d) spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False) spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)] if (times is not None): recording_interval = (times[1] - times[0]) spikes_df = spikes_df[((spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1]))] else: recording_interval = (np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])) (fig, axes) = plt.subplots() show_legend = False for node_grp in node_groups: show_legend = (show_legend or ('label' in node_grp)) grp_ids = node_grp.pop('node_ids') grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)] spike_rates = (grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)) axes.plot(np.array(spike_rates.index), smoothing_fnc(spike_rates), '.', **node_grp) axes.set_ylabel('Firing Rates (Hz)') axes.set_xlabel('node_ids') if show_legend: axes.legend() if title: axes.set_title(title) if save_as: plt.savefig(save_as) if show: plt.show() return fig
-5,521,680,345,867,462,000
Calculate and plot the rates of each node in a SpikeTrains object or SONATA Spike-Trains file. If start and stop times are not specified from the "times" parameter, will try to parse values from the timestamps data. If you want to only display a subset of nodes and/or group together different nodes (by node_id) by dot colors and labels then you can use the node_groups, which should be a list of dicts:: plot_rates('/path/to/my/spike.h5', node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'}, {'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}]) :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file. :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which nodes to actually plot. If only one population exists and population=None then the function will find it by default. :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for label and color. If None all nodes will be labeled and colored the same. :param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking data. :param smoothing: Bool or function. Used to smooth the data. By default (False) no smoothing will be done. If True will using a moving average smoothing function. Or use a function pointer. :param smoothing_params: dict, parameters when using a function pointer smoothing value. :param title: str, Use to add a title. Default no tile :param show: bool to display or not display plot. default True. :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not save plot. :return: matplotlib figure.Figure object
bmtk/utils/reports/spike_trains/plotting.py
plot_rates
chenziao/bmtk
python
def plot_rates(spike_trains, population=None, node_groups=None, times=None, smoothing=False, smoothing_params=None, title=None, show=True, save_as=None): 'Calculate and plot the rates of each node in a SpikeTrains object or SONATA Spike-Trains file. If start and stop\n times are not specified from the "times" parameter, will try to parse values from the timestamps data.\n\n If you want to only display a subset of nodes and/or group together different nodes (by node_id) by dot colors and\n labels then you can use the node_groups, which should be a list of dicts::\n\n plot_rates(\'/path/to/my/spike.h5\',\n node_groups=[{\'node_ids\': range(0, 70), \'c\': \'b\', \'label\': \'pyr\'},\n {\'node_ids\': range(70, 100), \'c\': \'r\', \'label\': \'inh\'}])\n\n :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.\n :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which\n nodes to actually plot. If only one population exists and population=None then the function will find it by\n default.\n :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should\n be a dictionary with a \'node_ids\' key with a list of the ids. You can also add \'label\' and \'c\' keys for\n label and color. If None all nodes will be labeled and colored the same.\n :param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking\n data.\n :param smoothing: Bool or function. Used to smooth the data. By default (False) no smoothing will be done. If True\n will using a moving average smoothing function. Or use a function pointer.\n :param smoothing_params: dict, parameters when using a function pointer smoothing value.\n :param title: str, Use to add a title. Default no tile\n :param show: bool to display or not display plot. default True.\n :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not\n save plot.\n :return: matplotlib figure.Figure object\n ' spike_trains = __get_spike_trains(spike_trains=spike_trains) pop = __get_population(spike_trains=spike_trains, population=population) (node_groups, selected_ids) = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop) smoothing_params = (smoothing_params or {}) if isinstance(smoothing, types.FunctionType): smoothing_fnc = partial(smoothing, **smoothing_params) elif smoothing: smoothing_fnc = partial(moving_average, **smoothing_params) else: smoothing_fnc = (lambda d: d) spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False) spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)] if (times is not None): recording_interval = (times[1] - times[0]) spikes_df = spikes_df[((spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1]))] else: recording_interval = (np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])) (fig, axes) = plt.subplots() show_legend = False for node_grp in node_groups: show_legend = (show_legend or ('label' in node_grp)) grp_ids = node_grp.pop('node_ids') grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)] spike_rates = (grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)) axes.plot(np.array(spike_rates.index), smoothing_fnc(spike_rates), '.', **node_grp) axes.set_ylabel('Firing Rates (Hz)') axes.set_xlabel('node_ids') if show_legend: axes.legend() if title: axes.set_title(title) if save_as: plt.savefig(save_as) if show: plt.show() return fig
def plot_rates_boxplot(spike_trains, population=None, node_groups=None, times=None, title=None, show=True, save_as=None): 'Creates a box plot of the firing rates taken from a SpikeTrains object or SONATA Spike-Trains file. If start\n and stop times are not specified from the "times" parameter, will try to parse values from the timestamps data.\n\n By default will plot all nodes together. To only display a subset of the nodes and/or create groups of nodes use\n the node_groups options::\n\n plot_rates_boxplot(\n \'/path/to/my/spike.h5\',\n node_groups=[{\'node_ids\': range(0, 70), \'label\': \'pyr\'},\n {\'node_ids\': range(70, 100), \'label\': \'inh\'}]\n )\n\n :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.\n :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which\n nodes to actually plot. If only one population exists and population=None then the function will find it by\n default.\n :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should\n be a dictionary with a \'node_ids\' key with a list of the ids. You can also add \'label\' and \'c\' keys for\n label and color. If None all nodes will be labeled and colored the same.\n :param title: str, Use to add a title. Default no tile\n :param show: bool to display or not display plot. default True.\n :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not\n save plot.\n :return: matplotlib figure.Figure object\n ' spike_trains = __get_spike_trains(spike_trains=spike_trains) pop = __get_population(spike_trains=spike_trains, population=population) (node_groups, selected_ids) = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop) spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False) spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)] if (times is not None): recording_interval = (times[1] - times[0]) spikes_df = spikes_df[((spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1]))] else: recording_interval = (np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])) (fig, axes) = plt.subplots() rates_data = [] rates_labels = [] if ((len(node_groups) == 1) and ('label' not in node_groups[0])): node_groups[0]['label'] = 'All Nodes' for (i, node_grp) in enumerate(node_groups): rates_labels.append(node_grp.get('label', 'Node Group {}'.format(i))) grp_ids = node_grp.pop('node_ids') grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)] spike_rates = (grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)) rates_data.append(spike_rates) axes.boxplot(rates_data) axes.set_ylabel('Firing Rates (Hz)') axes.set_xticklabels(rates_labels) if title: axes.set_title(title) if save_as: plt.savefig(save_as) if show: plt.show() return fig
4,023,441,265,487,282,700
Creates a box plot of the firing rates taken from a SpikeTrains object or SONATA Spike-Trains file. If start and stop times are not specified from the "times" parameter, will try to parse values from the timestamps data. By default will plot all nodes together. To only display a subset of the nodes and/or create groups of nodes use the node_groups options:: plot_rates_boxplot( '/path/to/my/spike.h5', node_groups=[{'node_ids': range(0, 70), 'label': 'pyr'}, {'node_ids': range(70, 100), 'label': 'inh'}] ) :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file. :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which nodes to actually plot. If only one population exists and population=None then the function will find it by default. :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for label and color. If None all nodes will be labeled and colored the same. :param title: str, Use to add a title. Default no tile :param show: bool to display or not display plot. default True. :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not save plot. :return: matplotlib figure.Figure object
bmtk/utils/reports/spike_trains/plotting.py
plot_rates_boxplot
chenziao/bmtk
python
def plot_rates_boxplot(spike_trains, population=None, node_groups=None, times=None, title=None, show=True, save_as=None): 'Creates a box plot of the firing rates taken from a SpikeTrains object or SONATA Spike-Trains file. If start\n and stop times are not specified from the "times" parameter, will try to parse values from the timestamps data.\n\n By default will plot all nodes together. To only display a subset of the nodes and/or create groups of nodes use\n the node_groups options::\n\n plot_rates_boxplot(\n \'/path/to/my/spike.h5\',\n node_groups=[{\'node_ids\': range(0, 70), \'label\': \'pyr\'},\n {\'node_ids\': range(70, 100), \'label\': \'inh\'}]\n )\n\n :param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.\n :param population: string. If a spikes-file contains more than one population of nodes, use this to determine which\n nodes to actually plot. If only one population exists and population=None then the function will find it by\n default.\n :param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should\n be a dictionary with a \'node_ids\' key with a list of the ids. You can also add \'label\' and \'c\' keys for\n label and color. If None all nodes will be labeled and colored the same.\n :param title: str, Use to add a title. Default no tile\n :param show: bool to display or not display plot. default True.\n :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not\n save plot.\n :return: matplotlib figure.Figure object\n ' spike_trains = __get_spike_trains(spike_trains=spike_trains) pop = __get_population(spike_trains=spike_trains, population=population) (node_groups, selected_ids) = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop) spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False) spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)] if (times is not None): recording_interval = (times[1] - times[0]) spikes_df = spikes_df[((spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1]))] else: recording_interval = (np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])) (fig, axes) = plt.subplots() rates_data = [] rates_labels = [] if ((len(node_groups) == 1) and ('label' not in node_groups[0])): node_groups[0]['label'] = 'All Nodes' for (i, node_grp) in enumerate(node_groups): rates_labels.append(node_grp.get('label', 'Node Group {}'.format(i))) grp_ids = node_grp.pop('node_ids') grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)] spike_rates = (grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)) rates_data.append(spike_rates) axes.boxplot(rates_data) axes.set_ylabel('Firing Rates (Hz)') axes.set_xticklabels(rates_labels) if title: axes.set_title(title) if save_as: plt.savefig(save_as) if show: plt.show() return fig
def train(model, device, dataloader, optimizer): '\n Performs one epoch of training.\n Order of rooms in building and in data must match otherwise model will fit wrong rooms to data.\n ' model.reset_iv() model.train() model.cooling_policy.eval() for layer in model.cooling_policy.parameters(): layer.requires_grad = False num_cols = len(model.building.rooms) num_batches = len(dataloader) train_loss = 0 loss_fn = torch.nn.MSELoss() for (batch, (time, temp)) in enumerate(dataloader): (time, temp) = (time.to(device), temp.to(device)) time = time.squeeze(0) temp = temp.squeeze(0) pred = model(time) pred = pred.squeeze((- 1)) loss = loss_fn(pred[:, 2:], temp[:, 0:num_cols]) train_loss += loss.item() model.iv = pred[(- 1), :].unsqueeze(1).detach() optimizer.zero_grad() loss.backward() optimizer.step() return (train_loss / num_batches)
-8,422,809,956,291,717,000
Performs one epoch of training. Order of rooms in building and in data must match otherwise model will fit wrong rooms to data.
src/rcmodel/optimisation.py
train
BFourcin/rcmodel
python
def train(model, device, dataloader, optimizer): '\n Performs one epoch of training.\n Order of rooms in building and in data must match otherwise model will fit wrong rooms to data.\n ' model.reset_iv() model.train() model.cooling_policy.eval() for layer in model.cooling_policy.parameters(): layer.requires_grad = False num_cols = len(model.building.rooms) num_batches = len(dataloader) train_loss = 0 loss_fn = torch.nn.MSELoss() for (batch, (time, temp)) in enumerate(dataloader): (time, temp) = (time.to(device), temp.to(device)) time = time.squeeze(0) temp = temp.squeeze(0) pred = model(time) pred = pred.squeeze((- 1)) loss = loss_fn(pred[:, 2:], temp[:, 0:num_cols]) train_loss += loss.item() model.iv = pred[(- 1), :].unsqueeze(1).detach() optimizer.zero_grad() loss.backward() optimizer.step() return (train_loss / num_batches)
def sort_data(path, dt): '\n Check if path has sorted data tag (_sorted)\n If not check if data has previously been sorted and exists in the directory.\n Check to see if the value dt is correct\n If not sort data and write filename_sorted.csv\n\n data is sorted by time in ascending order and downsampled to a frequency of dt seconds.\n Missing values are interpolated.\n A time-date string is also inserted.\n ' def sort(path, dt): df = pd.read_csv(path) if (path[(- 11):] == '_sorted.csv'): path_sorted = path else: path_sorted = (path[:(- 4)] + '_sorted.csv') df = df.sort_values(by=['time'], ascending=True) try: df.insert(loc=0, column='date-time', value=pd.to_datetime(df['time'], unit='ms')) except ValueError: raise ValueError('Data appears to have already been sorted. Check if still appropriate and add _sorted.csv tag to avoid this error.') df = df.set_index('date-time').resample((str(dt) + 's')).mean().round(2) df['time'] = ((df.index - pd.Timestamp('1970-01-01')) // pd.Timedelta('1s')) df = df.tz_localize('Europe/London') df = df.interpolate().round(2) df.to_csv(path_sorted, index=True) def need_to_sort(path, dt): def get_dt(path): df_dt = pd.read_csv(path)['time'][0:2].values return (df_dt[1] - df_dt[0]) if (path[(- 11):] == '_sorted.csv'): if (get_dt(path) == dt): return False else: return True else: path_sorted = (path[:(- 4)] + '_sorted.csv') import os.path if os.path.isfile(path_sorted): if (get_dt(path_sorted) == dt): return False else: return True else: return True if need_to_sort(path, dt): sort(path, dt) if (path[(- 11):] == '_sorted.csv'): path_sorted = path else: path_sorted = (path[:(- 4)] + '_sorted.csv') return path_sorted
5,082,796,445,324,769,000
Check if path has sorted data tag (_sorted) If not check if data has previously been sorted and exists in the directory. Check to see if the value dt is correct If not sort data and write filename_sorted.csv data is sorted by time in ascending order and downsampled to a frequency of dt seconds. Missing values are interpolated. A time-date string is also inserted.
src/rcmodel/optimisation.py
sort_data
BFourcin/rcmodel
python
def sort_data(path, dt): '\n Check if path has sorted data tag (_sorted)\n If not check if data has previously been sorted and exists in the directory.\n Check to see if the value dt is correct\n If not sort data and write filename_sorted.csv\n\n data is sorted by time in ascending order and downsampled to a frequency of dt seconds.\n Missing values are interpolated.\n A time-date string is also inserted.\n ' def sort(path, dt): df = pd.read_csv(path) if (path[(- 11):] == '_sorted.csv'): path_sorted = path else: path_sorted = (path[:(- 4)] + '_sorted.csv') df = df.sort_values(by=['time'], ascending=True) try: df.insert(loc=0, column='date-time', value=pd.to_datetime(df['time'], unit='ms')) except ValueError: raise ValueError('Data appears to have already been sorted. Check if still appropriate and add _sorted.csv tag to avoid this error.') df = df.set_index('date-time').resample((str(dt) + 's')).mean().round(2) df['time'] = ((df.index - pd.Timestamp('1970-01-01')) // pd.Timedelta('1s')) df = df.tz_localize('Europe/London') df = df.interpolate().round(2) df.to_csv(path_sorted, index=True) def need_to_sort(path, dt): def get_dt(path): df_dt = pd.read_csv(path)['time'][0:2].values return (df_dt[1] - df_dt[0]) if (path[(- 11):] == '_sorted.csv'): if (get_dt(path) == dt): return False else: return True else: path_sorted = (path[:(- 4)] + '_sorted.csv') import os.path if os.path.isfile(path_sorted): if (get_dt(path_sorted) == dt): return False else: return True else: return True if need_to_sort(path, dt): sort(path, dt) if (path[(- 11):] == '_sorted.csv'): path_sorted = path else: path_sorted = (path[:(- 4)] + '_sorted.csv') return path_sorted
def HideNotImplemented(comment): 'Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?' return (annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations)
-5,335,436,601,311,139,000
Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?
tools/protodoc/protodoc.py
HideNotImplemented
Gsantomaggio/envoy
python
def HideNotImplemented(comment): return (annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations)
def GithubUrl(type_context): 'Obtain data plane API Github URL by path from a TypeContext.\n\n Args:\n type_context: type_context.TypeContext for node.\n\n Returns:\n A string with a corresponding data plane API GitHub Url.\n ' if (type_context.location is not None): return (DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0])) return ''
6,404,972,300,512,804,000
Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url.
tools/protodoc/protodoc.py
GithubUrl
Gsantomaggio/envoy
python
def GithubUrl(type_context): 'Obtain data plane API Github URL by path from a TypeContext.\n\n Args:\n type_context: type_context.TypeContext for node.\n\n Returns:\n A string with a corresponding data plane API GitHub Url.\n ' if (type_context.location is not None): return (DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0])) return
def FormatCommentWithAnnotations(comment, type_name=''): "Format a comment string with additional RST for annotations.\n\n Args:\n comment: comment string.\n type_name: optional, 'message' or 'enum' may be specified for additional\n message/enum specific annotations.\n\n Returns:\n A string with additional RST from annotations.\n " formatted_extension = '' if (annotations.EXTENSION_ANNOTATION in comment.annotations): extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if (annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations): formatted_extension_category = FormatExtensionCategory(comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations((StripLeadingSpace(comment.raw) + '\n')) return ((comment + formatted_extension) + formatted_extension_category)
-4,937,179,242,579,999,000
Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations.
tools/protodoc/protodoc.py
FormatCommentWithAnnotations
Gsantomaggio/envoy
python
def FormatCommentWithAnnotations(comment, type_name=): "Format a comment string with additional RST for annotations.\n\n Args:\n comment: comment string.\n type_name: optional, 'message' or 'enum' may be specified for additional\n message/enum specific annotations.\n\n Returns:\n A string with additional RST from annotations.\n " formatted_extension = if (annotations.EXTENSION_ANNOTATION in comment.annotations): extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = if (annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations): formatted_extension_category = FormatExtensionCategory(comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations((StripLeadingSpace(comment.raw) + '\n')) return ((comment + formatted_extension) + formatted_extension_category)
def MapLines(f, s): 'Apply a function across each line in a flat string.\n\n Args:\n f: A string transform function for a line.\n s: A string consisting of potentially multiple lines.\n\n Returns:\n A flat string with f applied to each line.\n ' return '\n'.join((f(line) for line in s.split('\n')))
9,207,010,195,570,567,000
Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line.
tools/protodoc/protodoc.py
MapLines
Gsantomaggio/envoy
python
def MapLines(f, s): 'Apply a function across each line in a flat string.\n\n Args:\n f: A string transform function for a line.\n s: A string consisting of potentially multiple lines.\n\n Returns:\n A flat string with f applied to each line.\n ' return '\n'.join((f(line) for line in s.split('\n')))
def Indent(spaces, line): 'Indent a string.' return ((' ' * spaces) + line)
2,544,529,550,714,990,000
Indent a string.
tools/protodoc/protodoc.py
Indent
Gsantomaggio/envoy
python
def Indent(spaces, line): return ((' ' * spaces) + line)
def IndentLines(spaces, lines): 'Indent a list of strings.' return map(functools.partial(Indent, spaces), lines)
7,741,411,605,827,786,000
Indent a list of strings.
tools/protodoc/protodoc.py
IndentLines
Gsantomaggio/envoy
python
def IndentLines(spaces, lines): return map(functools.partial(Indent, spaces), lines)
def FormatHeader(style, text): "Format RST header.\n\n Args:\n style: underline style, e.g. '=', '-'.\n text: header text\n\n Returns:\n RST formatted header.\n " return ('%s\n%s\n\n' % (text, (style * len(text))))
7,263,651,663,836,982,000
Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header.
tools/protodoc/protodoc.py
FormatHeader
Gsantomaggio/envoy
python
def FormatHeader(style, text): "Format RST header.\n\n Args:\n style: underline style, e.g. '=', '-'.\n text: header text\n\n Returns:\n RST formatted header.\n " return ('%s\n%s\n\n' % (text, (style * len(text))))
def FormatExtension(extension): 'Format extension metadata as RST.\n\n Args:\n extension: the name of the extension, e.g. com.acme.foo.\n\n Returns:\n RST formatted extension description.\n ' try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor(('extension_' + extension)) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata['categories'], 'extension_category') cat_or_cats = ('categories' if (len(categories) > 1) else 'category') category_message = f'This extension extends and can be used with the following extension {cat_or_cats}' extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f'''{extension} {extension_category}''' except KeyError as e: sys.stderr.write('\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1)
-8,631,740,234,684,610,000
Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description.
tools/protodoc/protodoc.py
FormatExtension
Gsantomaggio/envoy
python
def FormatExtension(extension): 'Format extension metadata as RST.\n\n Args:\n extension: the name of the extension, e.g. com.acme.foo.\n\n Returns:\n RST formatted extension description.\n ' try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor(('extension_' + extension)) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], ) security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata['categories'], 'extension_category') cat_or_cats = ('categories' if (len(categories) > 1) else 'category') category_message = f'This extension extends and can be used with the following extension {cat_or_cats}' extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f'{extension} {extension_category}' except KeyError as e: sys.stderr.write('\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1)
def FormatExtensionCategory(extension_category): 'Format extension metadata as RST.\n\n Args:\n extension_category: the name of the extension_category, e.g. com.acme.\n\n Returns:\n RST formatted extension category description.\n ' try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f''' Unable to find extension category: {extension_category} ''') anchor = FormatAnchor(('extension_category_' + extension_category)) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions)
2,411,712,575,106,835,500
Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description.
tools/protodoc/protodoc.py
FormatExtensionCategory
Gsantomaggio/envoy
python
def FormatExtensionCategory(extension_category): 'Format extension metadata as RST.\n\n Args:\n extension_category: the name of the extension_category, e.g. com.acme.\n\n Returns:\n RST formatted extension category description.\n ' try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f' Unable to find extension category: {extension_category} ') anchor = FormatAnchor(('extension_category_' + extension_category)) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions)
def FormatHeaderFromFile(style, source_code_info, proto_name): "Format RST header based on special file level title\n\n Args:\n style: underline style, e.g. '=', '-'.\n source_code_info: SourceCodeInfo object.\n proto_name: If the file_level_comment does not contain a user specified\n title, use this as page title.\n\n Returns:\n RST formatted header, and file level comment without page title strings.\n " anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations(StripLeadingSpace('\n'.join(((c + '\n') for c in source_code_info.file_level_comments)))) formatted_extension = '' if (annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations): extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if (annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations): return (((anchor + FormatHeader(style, source_code_info.file_level_annotations[annotations.DOC_TITLE_ANNOTATION])) + formatted_extension), stripped_comment) return (((anchor + FormatHeader(style, proto_name)) + formatted_extension), stripped_comment)
-6,055,170,154,052,651,000
Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings.
tools/protodoc/protodoc.py
FormatHeaderFromFile
Gsantomaggio/envoy
python
def FormatHeaderFromFile(style, source_code_info, proto_name): "Format RST header based on special file level title\n\n Args:\n style: underline style, e.g. '=', '-'.\n source_code_info: SourceCodeInfo object.\n proto_name: If the file_level_comment does not contain a user specified\n title, use this as page title.\n\n Returns:\n RST formatted header, and file level comment without page title strings.\n " anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations(StripLeadingSpace('\n'.join(((c + '\n') for c in source_code_info.file_level_comments)))) formatted_extension = if (annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations): extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if (annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations): return (((anchor + FormatHeader(style, source_code_info.file_level_annotations[annotations.DOC_TITLE_ANNOTATION])) + formatted_extension), stripped_comment) return (((anchor + FormatHeader(style, proto_name)) + formatted_extension), stripped_comment)
def FormatFieldTypeAsJson(type_context, field): 'Format FieldDescriptorProto.Type as a pseudo-JSON string.\n\n Args:\n type_context: contextual information for message/enum/field.\n field: FieldDescriptor proto.\n Return: RST formatted pseudo-JSON string representation of field type.\n ' if (TypeNameFromFQN(field.type_name) in type_context.map_typenames): return '"{...}"' if (field.label == field.LABEL_REPEATED): return '[]' if (field.type == field.TYPE_MESSAGE): return '"{...}"' return '"..."'
-3,217,291,533,705,860,000
Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type.
tools/protodoc/protodoc.py
FormatFieldTypeAsJson
Gsantomaggio/envoy
python
def FormatFieldTypeAsJson(type_context, field): 'Format FieldDescriptorProto.Type as a pseudo-JSON string.\n\n Args:\n type_context: contextual information for message/enum/field.\n field: FieldDescriptor proto.\n Return: RST formatted pseudo-JSON string representation of field type.\n ' if (TypeNameFromFQN(field.type_name) in type_context.map_typenames): return '"{...}"' if (field.label == field.LABEL_REPEATED): return '[]' if (field.type == field.TYPE_MESSAGE): return '"{...}"' return '"..."'
def FormatMessageAsJson(type_context, msg): 'Format a message definition DescriptorProto as a pseudo-JSON block.\n\n Args:\n type_context: contextual information for message/enum/field.\n msg: message definition DescriptorProto.\n Return: RST formatted pseudo-JSON string representation of message definition.\n ' lines = [] for (index, field) in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append(('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field)))) if lines: return (('.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines))) + '\n }\n\n') else: return '.. code-block:: json\n\n {}\n\n'
7,837,340,100,763,897,000
Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition.
tools/protodoc/protodoc.py
FormatMessageAsJson
Gsantomaggio/envoy
python
def FormatMessageAsJson(type_context, msg): 'Format a message definition DescriptorProto as a pseudo-JSON block.\n\n Args:\n type_context: contextual information for message/enum/field.\n msg: message definition DescriptorProto.\n Return: RST formatted pseudo-JSON string representation of message definition.\n ' lines = [] for (index, field) in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append(('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field)))) if lines: return (('.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines))) + '\n }\n\n') else: return '.. code-block:: json\n\n {}\n\n'