language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def inference(args): """FOTS Inference on give images.""" model = _load_model(args.model) for image_path in os.listdir(args.input_dir): image = _load_image(os.path.join(args.input_dir, image_path)) # Forward pass pred_score_map, pred_geo_map = model(image) pred_score_map = pred_score_map.permute(0, 2, 3, 1).detach().cpu().numpy() pred_geo_map = pred_geo_map.permute(0, 2, 3, 1).detach().cpu().numpy() pred_bboxes = [] for idx in range(pred_score_map.shape[0]): bboxes = Toolbox.detect( score_map=pred_score_map[idx, :, :, 0], geo_map=pred_geo_map[idx, :, :, ] ) if len(bboxes) > 0: pred_bboxes.append(bboxes) pred_bboxes = np.concatenate(pred_bboxes) image = image.permute(0, 2, 3, 1)[0].cpu().detach().numpy() for i in range(pred_bboxes.shape[0]): # Define predicted rectangle vertices vertices = [ [pred_bboxes[i][0], pred_bboxes[i][1]], [pred_bboxes[i][2], pred_bboxes[i][3]], [pred_bboxes[i][4], pred_bboxes[i][5]], [pred_bboxes[i][6], pred_bboxes[i][7]] ] cv2.polylines(image, [np.array(vertices).astype(np.int32)], isClosed=True, color=(255, 255, 0), thickness=1) # Save the image cv2.imwrite(os.path.join(args.output_dir, os.path.basename(image_path)), image)
def inference(args): """FOTS Inference on give images.""" model = _load_model(args.model) for image_path in os.listdir(args.input_dir): image = _load_image(os.path.join(args.input_dir, image_path)) # Forward pass pred_score_map, pred_geo_map = model(image) pred_score_map = pred_score_map.permute(0, 2, 3, 1).detach().cpu().numpy() pred_geo_map = pred_geo_map.permute(0, 2, 3, 1).detach().cpu().numpy() pred_bboxes = [] for idx in range(pred_score_map.shape[0]): bboxes = Toolbox.detect( score_map=pred_score_map[idx, :, :, 0], geo_map=pred_geo_map[idx, :, :, ] ) if len(bboxes) > 0: pred_bboxes.append(bboxes) pred_bboxes = np.concatenate(pred_bboxes) image = image.permute(0, 2, 3, 1)[0].cpu().detach().numpy() for i in range(pred_bboxes.shape[0]): # Define predicted rectangle vertices vertices = [ [pred_bboxes[i][0], pred_bboxes[i][1]], [pred_bboxes[i][2], pred_bboxes[i][3]], [pred_bboxes[i][4], pred_bboxes[i][5]], [pred_bboxes[i][6], pred_bboxes[i][7]] ] cv2.polylines(image, [np.array(vertices).astype(np.int32)], isClosed=True, color=(255, 255, 0), thickness=1) # Save the image cv2.imwrite(os.path.join(args.output_dir, os.path.basename(image_path)), image)
Python
def _eval_metrics(self, y_pred, y_true): """ Calculate evaluation metrics given predictions and ground truths. """ precious, recall, hmean = self.metric(y_pred, y_true) return np.array([precious, recall, hmean])
def _eval_metrics(self, y_pred, y_true): """ Calculate evaluation metrics given predictions and ground truths. """ precious, recall, hmean = self.metric(y_pred, y_true) return np.array([precious, recall, hmean])
Python
def eval_epoch(self): """Validate after training a single epoch.""" self.model.eval() # total_metrics = np.zeros(3) val_loss = 0 with torch.no_grad(): for i, batch in tqdm(enumerate(self.valid_iterator), total=len(self.valid_iterator), position=0, leave=True): # image_paths, images, bboxs, transcripts, score_map, geo_map, mapping = batch images, score_map, geo_map, training_mask = batch images = images.to(self.device) score_map = score_map.to(self.device) geo_map = geo_map.to(self.device) training_mask = training_mask.to(self.device) # Forward pass # pred_score_map, pred_geo_map, pred_recog, pred_boxes, pred_mapping, indices = self.model(images, bboxs, mapping) # Forward pass pred_score_map, pred_geo_map = self.model(images) # Calculate loss val_loss += self.loss(score_map, pred_score_map, geo_map, pred_geo_map, training_mask) # pred_transcripts = [] # pred_fns = [] # if len(pred_mapping) > 0: # pred_mapping = pred_mapping[indices] # pred_boxes = pred_boxes[indices] # pred_fns = [image_paths[i] for i in pred_mapping] # pred, lengths = pred_recog # _, pred = pred.max(2) # for i in range(lengths.numel()): # l = lengths[i] # p = pred[:l, i] # t = self.transcript_encoder.decode(p, l) # pred_transcripts.append(t) # pred_transcripts = np.array(pred_transcripts) # gt_fns = [image_paths[i] for i in mapping] # total_metrics += self._eval_metrics((pred_boxes, pred_transcripts, pred_fns), # (bboxs, transcripts, gt_fns)) return val_loss / len(self.valid_iterator) # return ( # total_metrics[0] / len(self.train_iterator), # precision # total_metrics[1] / len(self.train_iterator), # recall # total_metrics[2] / len(self.train_iterator) # f1-score # )
def eval_epoch(self): """Validate after training a single epoch.""" self.model.eval() # total_metrics = np.zeros(3) val_loss = 0 with torch.no_grad(): for i, batch in tqdm(enumerate(self.valid_iterator), total=len(self.valid_iterator), position=0, leave=True): # image_paths, images, bboxs, transcripts, score_map, geo_map, mapping = batch images, score_map, geo_map, training_mask = batch images = images.to(self.device) score_map = score_map.to(self.device) geo_map = geo_map.to(self.device) training_mask = training_mask.to(self.device) # Forward pass # pred_score_map, pred_geo_map, pred_recog, pred_boxes, pred_mapping, indices = self.model(images, bboxs, mapping) # Forward pass pred_score_map, pred_geo_map = self.model(images) # Calculate loss val_loss += self.loss(score_map, pred_score_map, geo_map, pred_geo_map, training_mask) # pred_transcripts = [] # pred_fns = [] # if len(pred_mapping) > 0: # pred_mapping = pred_mapping[indices] # pred_boxes = pred_boxes[indices] # pred_fns = [image_paths[i] for i in pred_mapping] # pred, lengths = pred_recog # _, pred = pred.max(2) # for i in range(lengths.numel()): # l = lengths[i] # p = pred[:l, i] # t = self.transcript_encoder.decode(p, l) # pred_transcripts.append(t) # pred_transcripts = np.array(pred_transcripts) # gt_fns = [image_paths[i] for i in mapping] # total_metrics += self._eval_metrics((pred_boxes, pred_transcripts, pred_fns), # (bboxs, transcripts, gt_fns)) return val_loss / len(self.valid_iterator) # return ( # total_metrics[0] / len(self.train_iterator), # precision # total_metrics[1] / len(self.train_iterator), # recall # total_metrics[2] / len(self.train_iterator) # f1-score # )
Python
def epoch_time(start_time, end_time): """Measure single epoch time based on epoch's start and end times.""" elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs
def epoch_time(start_time, end_time): """Measure single epoch time based on epoch's start and end times.""" elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs
Python
def _save_model(self, name, model): """Save the given model at given path.""" if not os.path.isdir(self.config["model_save_path"]): os.makedirs(self.config["model_save_path"], exist_ok=True) torch.save( model.state_dict(), os.path.join(self.config["model_save_path"], name) )
def _save_model(self, name, model): """Save the given model at given path.""" if not os.path.isdir(self.config["model_save_path"]): os.makedirs(self.config["model_save_path"], exist_ok=True) torch.save( model.state_dict(), os.path.join(self.config["model_save_path"], name) )
Python
def train(self): """Train the model for given numner of epochs.""" best_val_loss = float('inf') for epoch in range(self.epochs): # Epoch start time start_time = time() # Train # loss, train_precision, train_recall, train_f1 = self.train_epoch(epoch) train_loss = self.train_epoch(epoch) # Evaluate # val_precision, val_recall, val_f1 = val_loss = self.eval_epoch() self.lr_scheduler.step(val_loss) # Epoch start time end_time = time() epoch_mins, epoch_secs = self.epoch_time(start_time, end_time) # Save the model when loss improves and at last epoch if val_loss < best_val_loss: print(f"Loss reduced from previous best {best_val_loss} to {val_loss}. Saving the model!") self._save_model(f"FOTS_epoch{epoch+1}.pt", self.model) best_val_loss = val_loss if epoch+1 == self.epochs: self._save_model(f"FOTS_epoch{epoch+1}.pt", self.model) # Log the training progress per epoch # print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s') # print(f'\t Train Loss: {loss:.3f} | Train Precision: {train_precision:7.3f} | Train Recall: {train_recall:7.3f} | Train F1: {train_f1:7.3f}') # print(f'\t Val. Precision: {val_precision:7.3f} | Val. Recall: {val_recall:7.3f} | Val F1: {val_f1:7.3f}') print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s') print(f'\t Train Loss: {train_loss:.3f}') print(f'\t Val. Loss: {val_loss:.3f}\n')
def train(self): """Train the model for given numner of epochs.""" best_val_loss = float('inf') for epoch in range(self.epochs): # Epoch start time start_time = time() # Train # loss, train_precision, train_recall, train_f1 = self.train_epoch(epoch) train_loss = self.train_epoch(epoch) # Evaluate # val_precision, val_recall, val_f1 = val_loss = self.eval_epoch() self.lr_scheduler.step(val_loss) # Epoch start time end_time = time() epoch_mins, epoch_secs = self.epoch_time(start_time, end_time) # Save the model when loss improves and at last epoch if val_loss < best_val_loss: print(f"Loss reduced from previous best {best_val_loss} to {val_loss}. Saving the model!") self._save_model(f"FOTS_epoch{epoch+1}.pt", self.model) best_val_loss = val_loss if epoch+1 == self.epochs: self._save_model(f"FOTS_epoch{epoch+1}.pt", self.model) # Log the training progress per epoch # print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s') # print(f'\t Train Loss: {loss:.3f} | Train Precision: {train_precision:7.3f} | Train Recall: {train_recall:7.3f} | Train F1: {train_f1:7.3f}') # print(f'\t Val. Precision: {val_precision:7.3f} | Val. Recall: {val_recall:7.3f} | Val F1: {val_f1:7.3f}') print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s') print(f'\t Train Loss: {train_loss:.3f}') print(f'\t Val. Loss: {val_loss:.3f}\n')
Python
def calc_balance(bal, minPR, mIR, month, payments): ''' Calculates the credit card balance after X months for a person who only pays the minimum monthly payment. It also builds a list of monthly payments. ''' month -= 1 if month < 0: return round(bal, 2), payments # base case else: minP = minPR * bal # minimum monthly payment payments.append(minP) # build list of monthly payments unpaid = bal - minP # unpaid balance bal = unpaid + mIR * unpaid # updated balance return calc_balance(bal, minPR, mIR, month, payments)
def calc_balance(bal, minPR, mIR, month, payments): ''' Calculates the credit card balance after X months for a person who only pays the minimum monthly payment. It also builds a list of monthly payments. ''' month -= 1 if month < 0: return round(bal, 2), payments # base case else: minP = minPR * bal # minimum monthly payment payments.append(minP) # build list of monthly payments unpaid = bal - minP # unpaid balance bal = unpaid + mIR * unpaid # updated balance return calc_balance(bal, minPR, mIR, month, payments)
Python
def pay_off_dept(bal, mIR, month, lower, upper): ''' Calculates the minimum fixed monthly payment needed in order to pay off a credit card balance within X months using bisection search. ''' guess = (lower + upper) / 2 # guess for bisection search month -= 1 if month < 0: if round(bal) == 0: return round(guess, 2) # base case elif bal < 0: # if balance is negative, reduce upper bound for monthly payment return pay_off_dept(balance, mIR, months, lower, guess) # narrow the guessing range recursively # while resetting arguments 'bal' and 'month' to the global variables 'balance' and 'months' else: # if balance is positive, change increase lower bound for monthly payment return pay_off_dept(balance, mIR, months, guess, upper) else: unpaid = bal - guess # unpaid balance bal = unpaid + mIR * unpaid # updated balance return pay_off_dept(bal, mIR, month, lower, upper)
def pay_off_dept(bal, mIR, month, lower, upper): ''' Calculates the minimum fixed monthly payment needed in order to pay off a credit card balance within X months using bisection search. ''' guess = (lower + upper) / 2 # guess for bisection search month -= 1 if month < 0: if round(bal) == 0: return round(guess, 2) # base case elif bal < 0: # if balance is negative, reduce upper bound for monthly payment return pay_off_dept(balance, mIR, months, lower, guess) # narrow the guessing range recursively # while resetting arguments 'bal' and 'month' to the global variables 'balance' and 'months' else: # if balance is positive, change increase lower bound for monthly payment return pay_off_dept(balance, mIR, months, guess, upper) else: unpaid = bal - guess # unpaid balance bal = unpaid + mIR * unpaid # updated balance return pay_off_dept(bal, mIR, month, lower, upper)
Python
def run(self): """ Run the check. All check scripts must implement this method. It must return a tuple of: (<success>, <message>) In this example, if the check succeeds and FileSharing processes are nowhere to be found, the check will return (True, "all directories smaller than their configured limits"). If the check fails and an FileSharing process is found, it returns (False, "[message including directories that are larger than their limits") """ over_full_dirs = [] if idiot.config.dir_size != None: for d in idiot.config.dir_size: size = self.get_size(d['path']) / float(1<<20) #convert from bytes to MB if size > d['limit']: over_full_dirs.append(d['path']) if len(over_full_dirs) == 1: return (False, "found a directory larger than its configured limit: " + str(over_full_dirs[0])) if len(over_full_dirs) > 1: return (False, "found directories larger than their configured limits: {}".format(', '.join([str(d) for d in over_full_dirs]))) return (True, "all directories smaller than their configured limits") else: return (True, "no directories specified")
def run(self): """ Run the check. All check scripts must implement this method. It must return a tuple of: (<success>, <message>) In this example, if the check succeeds and FileSharing processes are nowhere to be found, the check will return (True, "all directories smaller than their configured limits"). If the check fails and an FileSharing process is found, it returns (False, "[message including directories that are larger than their limits") """ over_full_dirs = [] if idiot.config.dir_size != None: for d in idiot.config.dir_size: size = self.get_size(d['path']) / float(1<<20) #convert from bytes to MB if size > d['limit']: over_full_dirs.append(d['path']) if len(over_full_dirs) == 1: return (False, "found a directory larger than its configured limit: " + str(over_full_dirs[0])) if len(over_full_dirs) > 1: return (False, "found directories larger than their configured limits: {}".format(', '.join([str(d) for d in over_full_dirs]))) return (True, "all directories smaller than their configured limits") else: return (True, "no directories specified")
Python
def run(self): """ Run the check. All check scripts must implement this method. It must return a tuple of: (<success>, <message>) In this example, if the check succeeds and FileSharing processes are nowhere to be found, the check will return (True, "No FileSharing processes found"). If the check fails and an FileSharing process is found, it returns (False, "Found SMB or AFP FileSharing processes with pids <pids>") """ pids = [] for p in psutil.process_iter(): try: if (p.name() == 'AppleFileServer' or p.name() == 'smbd'): pids.append(p.pid) except psutil.NoSuchProcess: pass if len(pids): return (False, "found SMB or AFP file sharing processes with pids: {} - Disable Sharing Prefs: File Sharing".format(', '.join([str(p) for p in pids]))) else: return (True, "disabled")
def run(self): """ Run the check. All check scripts must implement this method. It must return a tuple of: (<success>, <message>) In this example, if the check succeeds and FileSharing processes are nowhere to be found, the check will return (True, "No FileSharing processes found"). If the check fails and an FileSharing process is found, it returns (False, "Found SMB or AFP FileSharing processes with pids <pids>") """ pids = [] for p in psutil.process_iter(): try: if (p.name() == 'AppleFileServer' or p.name() == 'smbd'): pids.append(p.pid) except psutil.NoSuchProcess: pass if len(pids): return (False, "found SMB or AFP file sharing processes with pids: {} - Disable Sharing Prefs: File Sharing".format(', '.join([str(p) for p in pids]))) else: return (True, "disabled")
Python
def snooze(self): """ Disable notifications for this check for a period. """ # increment snooze index, but not past the number of intervals self.snooze_index += 1 self.snooze_index = min(self.snooze_index, len(self.snooze_intervals) - 1) if self.snooze_intervals[self.snooze_index] == 'forever': # snooze forever log.debug("Snoozing check {} forever".format(self)) self.snooze_until = True else: # set snooze until time to now + the currently selected interval self.snooze_until = datetime.datetime.now() + datetime.timedelta(seconds=self.snooze_intervals[self.snooze_index]) log.debug("Snoozing check {} until {}".format(self, self.snooze_until))
def snooze(self): """ Disable notifications for this check for a period. """ # increment snooze index, but not past the number of intervals self.snooze_index += 1 self.snooze_index = min(self.snooze_index, len(self.snooze_intervals) - 1) if self.snooze_intervals[self.snooze_index] == 'forever': # snooze forever log.debug("Snoozing check {} forever".format(self)) self.snooze_until = True else: # set snooze until time to now + the currently selected interval self.snooze_until = datetime.datetime.now() + datetime.timedelta(seconds=self.snooze_intervals[self.snooze_index]) log.debug("Snoozing check {} until {}".format(self, self.snooze_until))
Python
def snoozing(self): """ Return a boolean indicating whether or not the check is currently snoozing (notifications are disabled). """ if self.snooze_until is None: return False elif self.snooze_until is True: # snoozing forever return True else: return datetime.datetime.now() < self.snooze_until
def snoozing(self): """ Return a boolean indicating whether or not the check is currently snoozing (notifications are disabled). """ if self.snooze_until is None: return False elif self.snooze_until is True: # snoozing forever return True else: return datetime.datetime.now() < self.snooze_until
Python
def run(self): """ Run the check. Subclasses must implement this method. """ return (False, "Subclass hasn't implemented the `run` method.")
def run(self): """ Run the check. Subclasses must implement this method. """ return (False, "Subclass hasn't implemented the `run` method.")
Python
def qaoa_with_optimizer(problem_instance: ProblemInstance): """Runs the QAOA algorithm with a classical optimizer of hyperparameters and returns a ProblemInstance containing a solution.""" backend = Aer.get_backend('statevector_simulator') quantum_instance = QuantumInstance(backend) min_cost = None result_min_cost = None all_results = [] for _ in range(problem_instance.num_starting_points): result = qaoa(quantum_instance, problem_instance, initial_point=_generate_uniformly_random_parameters(problem_instance.p), optimizer=problem_instance.optimizer.optimizer) qaoa_expectation = result['eigenvalue'].real + problem_instance.offset all_results.append(result) if _is_solution_better(min_cost, qaoa_expectation): min_cost, result_min_cost = qaoa_expectation, result problem_instance.good_params = _get_high_quality_solutions(all_results, result_min_cost, problem_instance.offset) most_likely_binary_solution = sample_most_likely(result_min_cost['eigenstate']) most_likely_solution_value = problem_instance.calc_objective_value(most_likely_binary_solution) problem_instance = _get_instance_with_best_solution(problem_instance, -min_cost, result_min_cost['optimal_point'], most_likely_binary_solution, most_likely_solution_value) return problem_instance
def qaoa_with_optimizer(problem_instance: ProblemInstance): """Runs the QAOA algorithm with a classical optimizer of hyperparameters and returns a ProblemInstance containing a solution.""" backend = Aer.get_backend('statevector_simulator') quantum_instance = QuantumInstance(backend) min_cost = None result_min_cost = None all_results = [] for _ in range(problem_instance.num_starting_points): result = qaoa(quantum_instance, problem_instance, initial_point=_generate_uniformly_random_parameters(problem_instance.p), optimizer=problem_instance.optimizer.optimizer) qaoa_expectation = result['eigenvalue'].real + problem_instance.offset all_results.append(result) if _is_solution_better(min_cost, qaoa_expectation): min_cost, result_min_cost = qaoa_expectation, result problem_instance.good_params = _get_high_quality_solutions(all_results, result_min_cost, problem_instance.offset) most_likely_binary_solution = sample_most_likely(result_min_cost['eigenstate']) most_likely_solution_value = problem_instance.calc_objective_value(most_likely_binary_solution) problem_instance = _get_instance_with_best_solution(problem_instance, -min_cost, result_min_cost['optimal_point'], most_likely_binary_solution, most_likely_solution_value) return problem_instance
Python
def qaoa(quantum_instance, problem_instance, initial_point, optimizer=None): """Runs the QAOA algorithm without a classical optimizer.""" aqua_globals.massive = True qubit_operator = problem_instance.qubit_operator p = problem_instance.p qaoa_object = QAOA(operator=qubit_operator, p=p, initial_point=initial_point, optimizer=optimizer) return qaoa_object.run(quantum_instance)
def qaoa(quantum_instance, problem_instance, initial_point, optimizer=None): """Runs the QAOA algorithm without a classical optimizer.""" aqua_globals.massive = True qubit_operator = problem_instance.qubit_operator p = problem_instance.p qaoa_object = QAOA(operator=qubit_operator, p=p, initial_point=initial_point, optimizer=optimizer) return qaoa_object.run(quantum_instance)
Python
def _get_instance_with_best_solution(problem_instance: ProblemInstance, min_cost, optimal_params, most_likely_binary_solution, most_likely_solution_value): """Fills a problem instance provided with optimal solution data provided.""" problem_instance.optimal_value = min_cost problem_instance.optimal_params = optimal_params problem_instance.most_likely_binary_solution = most_likely_binary_solution problem_instance.most_likely_solution_value = most_likely_solution_value return problem_instance
def _get_instance_with_best_solution(problem_instance: ProblemInstance, min_cost, optimal_params, most_likely_binary_solution, most_likely_solution_value): """Fills a problem instance provided with optimal solution data provided.""" problem_instance.optimal_value = min_cost problem_instance.optimal_params = optimal_params problem_instance.most_likely_binary_solution = most_likely_binary_solution problem_instance.most_likely_solution_value = most_likely_solution_value return problem_instance
Python
def _get_high_quality_solutions(all_results, best_result, offset): """Filters solutions obtained from several runs to these with low approximation error.""" high_quality_solutions_params = [] best_objective_value = best_result['eigenvalue'].real + offset allowed_approximation_error = 0.01 # 1% approximation error for solution in all_results: solution_objective_value = solution['eigenvalue'].real + offset optimality_ratio = solution_objective_value / best_objective_value approximation_error = 1 - optimality_ratio if allowed_approximation_error >= approximation_error >= 0.0: high_quality_solutions_params.append(solution['optimal_point']) return high_quality_solutions_params
def _get_high_quality_solutions(all_results, best_result, offset): """Filters solutions obtained from several runs to these with low approximation error.""" high_quality_solutions_params = [] best_objective_value = best_result['eigenvalue'].real + offset allowed_approximation_error = 0.01 # 1% approximation error for solution in all_results: solution_objective_value = solution['eigenvalue'].real + offset optimality_ratio = solution_objective_value / best_objective_value approximation_error = 1 - optimality_ratio if allowed_approximation_error >= approximation_error >= 0.0: high_quality_solutions_params.append(solution['optimal_point']) return high_quality_solutions_params
Python
def validate_json_list_not_empty(jsons_list): """Validates that list of json files provided for ML model training is not empty.""" number_of_instances_trained = len(jsons_list) if number_of_instances_trained == 0: raise Exception("Empty list of json files provided for KDE training.")
def validate_json_list_not_empty(jsons_list): """Validates that list of json files provided for ML model training is not empty.""" number_of_instances_trained = len(jsons_list) if number_of_instances_trained == 0: raise Exception("Empty list of json files provided for KDE training.")
Python
def validate_jsons(jsons_list): """Validates that list of json files provided for ML model training all refer to the same problem instance.""" reference_problem_name, reference_graph_type, reference_p = _get_reference_metadata(jsons_list) for json in jsons_list: if _is_compliant_with_reference_json(reference_problem_name, reference_graph_type, reference_p, json): return False return True
def validate_jsons(jsons_list): """Validates that list of json files provided for ML model training all refer to the same problem instance.""" reference_problem_name, reference_graph_type, reference_p = _get_reference_metadata(jsons_list) for json in jsons_list: if _is_compliant_with_reference_json(reference_problem_name, reference_graph_type, reference_p, json): return False return True
Python
def _get_reference_metadata(jsons_list): """Gets metadata for a problem instance that the first json in the list refers to.""" reference_json = jsons_list[0] problem_name = reference_json["problem_name"] graph_type = reference_json["graph_type"] p = reference_json["p"] return problem_name, graph_type, p
def _get_reference_metadata(jsons_list): """Gets metadata for a problem instance that the first json in the list refers to.""" reference_json = jsons_list[0] problem_name = reference_json["problem_name"] graph_type = reference_json["graph_type"] p = reference_json["p"] return problem_name, graph_type, p
Python
def _get_model_metadata(jsons_list): """Gets metadata regarding a problem instance from the list of json files that should have been checked to be referring to the same problem instance.""" number_of_instances_trained = len(jsons_list) problem_name, graph_type, p = _get_reference_metadata(jsons_list) return problem_name, graph_type, number_of_instances_trained, p
def _get_model_metadata(jsons_list): """Gets metadata regarding a problem instance from the list of json files that should have been checked to be referring to the same problem instance.""" number_of_instances_trained = len(jsons_list) problem_name, graph_type, p = _get_reference_metadata(jsons_list) return problem_name, graph_type, number_of_instances_trained, p
Python
def load_kde_model(file_path): """Loads a dilled KDE model from a file path provided.""" with open(file_path, 'rb') as file: kde_model = dill.load(file) return kde_model
def load_kde_model(file_path): """Loads a dilled KDE model from a file path provided.""" with open(file_path, 'rb') as file: kde_model = dill.load(file) return kde_model
Python
def _get_best_solution(all_cases, case_id): """Returns the best solution found among all provided solutions.""" optimizer_runs = 1250 graph_id = 0 # max 0 because only 1 graph optimal_value = 0 optimal_params = None for optimizer_run_id in range(optimizer_runs): # Get optimal values optimal_value = _get_current_optimal_value(all_cases, case_id, graph_id, optimizer_run_id) if optimal_value < optimal_value: optimal_value = optimal_value optimal_params = _get_optimal_params(all_cases, case_id, graph_id, optimizer_run_id) return optimal_value, optimal_params
def _get_best_solution(all_cases, case_id): """Returns the best solution found among all provided solutions.""" optimizer_runs = 1250 graph_id = 0 # max 0 because only 1 graph optimal_value = 0 optimal_params = None for optimizer_run_id in range(optimizer_runs): # Get optimal values optimal_value = _get_current_optimal_value(all_cases, case_id, graph_id, optimizer_run_id) if optimal_value < optimal_value: optimal_value = optimal_value optimal_params = _get_optimal_params(all_cases, case_id, graph_id, optimizer_run_id) return optimal_value, optimal_params
Python
def _generate_json_with_params(data, problem_name, graph_type, p, json_name): """Saves a json file with data provided.""" save_path = "workflow_results_converted/" + problem_name + "/" + graph_type + "/" + "p=" + str( int(p)) + "/" + json_name with open(save_path, "w") as outfile: json.dump(data, outfile)
def _generate_json_with_params(data, problem_name, graph_type, p, json_name): """Saves a json file with data provided.""" save_path = "workflow_results_converted/" + problem_name + "/" + graph_type + "/" + "p=" + str( int(p)) + "/" + json_name with open(save_path, "w") as outfile: json.dump(data, outfile)
Python
def _generate_output_dictionary(optimal_params, min_value, good_params, optimizer_name, graph_type, p, weight_matrix, problem): """Generates a dictionary that stores important fields extracted from a workflow output.""" dic = {} dic["problem_name"] = problem dic["hamiltonian_matrix"] = [] dic["weight_matrix"] = weight_matrix dic["optimal_params"] = optimal_params dic["min_value"] = min_value dic["most_likely_binary_solution"] = [] dic["most_likely_solution_value"] = 1.0 dic["classical_solution_value"] = 1.0 dic["good_params"] = good_params dic["optimizer_name"] = optimizer_name dic["graph_type"] = graph_type dic["p"] = p return dic
def _generate_output_dictionary(optimal_params, min_value, good_params, optimizer_name, graph_type, p, weight_matrix, problem): """Generates a dictionary that stores important fields extracted from a workflow output.""" dic = {} dic["problem_name"] = problem dic["hamiltonian_matrix"] = [] dic["weight_matrix"] = weight_matrix dic["optimal_params"] = optimal_params dic["min_value"] = min_value dic["most_likely_binary_solution"] = [] dic["most_likely_solution_value"] = 1.0 dic["classical_solution_value"] = 1.0 dic["good_params"] = good_params dic["optimizer_name"] = optimizer_name dic["graph_type"] = graph_type dic["p"] = p return dic
Python
def _get_current_optimal_value(all_cases_json, case_id, graph_id, optimizer_run_id): """Extracts an optimal value from a given workflow run ID.""" return all_cases_json[case_id]['optimization-results-aggregated'][str(graph_id)][ str(optimizer_run_id)][ "opt_value"][ "value"]
def _get_current_optimal_value(all_cases_json, case_id, graph_id, optimizer_run_id): """Extracts an optimal value from a given workflow run ID.""" return all_cases_json[case_id]['optimization-results-aggregated'][str(graph_id)][ str(optimizer_run_id)][ "opt_value"][ "value"]
Python
def _get_optimal_params(all_cases_json, case_id, graph_id, optimizer_run_id): """Extracts QAOA parameters associated with an optimal solution for a given workflow run ID.""" return all_cases_json[case_id]['optimization-results-aggregated'][str(graph_id)][ str(optimizer_run_id)][ "opt_params"][ "real"]
def _get_optimal_params(all_cases_json, case_id, graph_id, optimizer_run_id): """Extracts QAOA parameters associated with an optimal solution for a given workflow run ID.""" return all_cases_json[case_id]['optimization-results-aggregated'][str(graph_id)][ str(optimizer_run_id)][ "opt_params"][ "real"]
Python
def _get_bitstrings(all_cases_json, case_id, graph_id, optimizer_run_id): """Extracts QAOA binary strings distribution associated with an optimal solution for a given workflow run ID.""" return all_cases_json[case_id]['bitstring-distributions-aggregated'][str(graph_id)][ str(optimizer_run_id)][ 'bitstring_distribution']
def _get_bitstrings(all_cases_json, case_id, graph_id, optimizer_run_id): """Extracts QAOA binary strings distribution associated with an optimal solution for a given workflow run ID.""" return all_cases_json[case_id]['bitstring-distributions-aggregated'][str(graph_id)][ str(optimizer_run_id)][ 'bitstring_distribution']
Python
def read_from_json(directory: str, file_name: str): """Reads a json file from a given directory.""" path = directory + "/" + file_name with open(path) as json_file: json_object = json.load(json_file) return json_object
def read_from_json(directory: str, file_name: str): """Reads a json file from a given directory.""" path = directory + "/" + file_name with open(path) as json_file: json_object = json.load(json_file) return json_object
Python
def worker(self, problem_name, input_graph, p_param, optimizer, initial_points_num): """Worker method for running QAOA algorithm for a given problem instance.""" optimizer_instance = optimizers_factory.create_optimizer(optimizer) problem_instance = create_graph_problem_instance(problem_name, p_param, input_graph, optimizer_instance, initial_points_num) qaoa_res = qaoa.qaoa_with_optimizer(problem_instance) directory = self._build_problem_instance_directory(problem_instance, problem_name) results_serializer.save_to_json(directory, qaoa_res) return qaoa_res.optimal_params, qaoa_res.optimal_value
def worker(self, problem_name, input_graph, p_param, optimizer, initial_points_num): """Worker method for running QAOA algorithm for a given problem instance.""" optimizer_instance = optimizers_factory.create_optimizer(optimizer) problem_instance = create_graph_problem_instance(problem_name, p_param, input_graph, optimizer_instance, initial_points_num) qaoa_res = qaoa.qaoa_with_optimizer(problem_instance) directory = self._build_problem_instance_directory(problem_instance, problem_name) results_serializer.save_to_json(directory, qaoa_res) return qaoa_res.optimal_params, qaoa_res.optimal_value
Python
def _build_problem_instance_directory(problem_instance, problem_name): """Builds a file directory depending on the problem instance and problem name provided where a result of the worker method can be stored.""" path = pathlib.Path(__file__).parent.resolve() directory = Path("../output", problem_name.value, problem_instance.input_graph.graph["graph_type"].value) full_path = (path).joinpath(directory) return full_path
def _build_problem_instance_directory(problem_instance, problem_name): """Builds a file directory depending on the problem instance and problem name provided where a result of the worker method can be stored.""" path = pathlib.Path(__file__).parent.resolve() directory = Path("../output", problem_name.value, problem_instance.input_graph.graph["graph_type"].value) full_path = (path).joinpath(directory) return full_path
Python
def generate_caveman_graph(number_of_cliques: int, size_of_cliques: int, graph_id: int): """Generates a caveman graph based on its parameters.""" graph = nx.caveman_graph(number_of_cliques, size_of_cliques) graph = _get_graph_with_attributes(graph, graph_id, GraphType.CAVEMAN) return graph
def generate_caveman_graph(number_of_cliques: int, size_of_cliques: int, graph_id: int): """Generates a caveman graph based on its parameters.""" graph = nx.caveman_graph(number_of_cliques, size_of_cliques) graph = _get_graph_with_attributes(graph, graph_id, GraphType.CAVEMAN) return graph
Python
def generate_ladder_graph(length_of_ladder: int, graph_id: int): """Generates a ladder graph based on its parameters.""" graph = nx.ladder_graph(length_of_ladder) graph = _get_graph_with_attributes(graph, graph_id, GraphType.LADDER) return graph
def generate_ladder_graph(length_of_ladder: int, graph_id: int): """Generates a ladder graph based on its parameters.""" graph = nx.ladder_graph(length_of_ladder) graph = _get_graph_with_attributes(graph, graph_id, GraphType.LADDER) return graph
Python
def generate_barbell_graph(number_of_vertices_complete_graph: int, graph_id: int): """Generates a barbell graph based on its parameters.""" graph = nx.barbell_graph(number_of_vertices_complete_graph, number_of_vertices_complete_graph) graph = _get_graph_with_attributes(graph, graph_id, GraphType.BARBELL) return graph
def generate_barbell_graph(number_of_vertices_complete_graph: int, graph_id: int): """Generates a barbell graph based on its parameters.""" graph = nx.barbell_graph(number_of_vertices_complete_graph, number_of_vertices_complete_graph) graph = _get_graph_with_attributes(graph, graph_id, GraphType.BARBELL) return graph
Python
def generate_random_graph(number_of_vertices: int, edge_generation_probability: float, graph_id: int): """Generates a random graph based on its parameters.""" graph = nx.erdos_renyi_graph(number_of_vertices, edge_generation_probability) graph = _get_graph_with_attributes(graph, graph_id, GraphType.RANDOM) return graph
def generate_random_graph(number_of_vertices: int, edge_generation_probability: float, graph_id: int): """Generates a random graph based on its parameters.""" graph = nx.erdos_renyi_graph(number_of_vertices, edge_generation_probability) graph = _get_graph_with_attributes(graph, graph_id, GraphType.RANDOM) return graph
Python
def _get_graph_with_attributes(graph, graph_id: int, graph_type: GraphType): """Adds additional attributed to a networkx graph - ID and type of a graph.""" graph.graph["graph_id"] = graph_id graph.graph["graph_type"] = graph_type return graph
def _get_graph_with_attributes(graph, graph_id: int, graph_type: GraphType): """Adds additional attributed to a networkx graph - ID and type of a graph.""" graph.graph["graph_id"] = graph_id graph.graph["graph_type"] = graph_type return graph
Python
def complex_ndarray_to_matrix(array: ndarray): """Converts a complex ndarray into a matrix (list of lists).""" elements = len(array) dimension = sqrt(elements) assert dimension == int(dimension) dimension = int(dimension) matrix = [] array = array.reshape((dimension, dimension)) for row_ind in range(dimension): row = [] for col_ind in range(dimension): row.append((array[row_ind][col_ind].real, array[row_ind][col_ind].imag)) matrix.append(row) return matrix
def complex_ndarray_to_matrix(array: ndarray): """Converts a complex ndarray into a matrix (list of lists).""" elements = len(array) dimension = sqrt(elements) assert dimension == int(dimension) dimension = int(dimension) matrix = [] array = array.reshape((dimension, dimension)) for row_ind in range(dimension): row = [] for col_ind in range(dimension): row.append((array[row_ind][col_ind].real, array[row_ind][col_ind].imag)) matrix.append(row) return matrix
Python
def create_optimizer(optimizer_name: OptimizerName): """Factory producing a corresponding optimizer.""" if optimizer_name == OptimizerName.COBYLA: return CobylaOptimizer() elif optimizer_name == OptimizerName.SPSA: return SpsaOptimizer() elif optimizer_name == OptimizerName.NELDER_MEAD: return NelderMeadOptimizer() elif optimizer_name == OptimizerName.LBFGS: return LbfgsOptimizer()
def create_optimizer(optimizer_name: OptimizerName): """Factory producing a corresponding optimizer.""" if optimizer_name == OptimizerName.COBYLA: return CobylaOptimizer() elif optimizer_name == OptimizerName.SPSA: return SpsaOptimizer() elif optimizer_name == OptimizerName.NELDER_MEAD: return NelderMeadOptimizer() elif optimizer_name == OptimizerName.LBFGS: return LbfgsOptimizer()
Python
def serialize_kde_model(directory, kde_model: KdeModel): """Uses dill to save a binary of a KDE model in the directory provided.""" file_name = _create_model_file_name(kde_model) directory = directory + "\\" + file_name with open(directory, 'wb') as f: dill.dump(kde_model, f)
def serialize_kde_model(directory, kde_model: KdeModel): """Uses dill to save a binary of a KDE model in the directory provided.""" file_name = _create_model_file_name(kde_model) directory = directory + "\\" + file_name with open(directory, 'wb') as f: dill.dump(kde_model, f)
Python
def _create_model_file_name(kde_model: KdeModel): """Constructs a file name for a given KDE model.""" problem_name = kde_model.problem_name graph_type = kde_model.graph_type kernel = kde_model.kernel bandwidth = kde_model.bandwidth p = kde_model.p return 'kde_model_' + problem_name + "_" + graph_type + "_" + kernel + "_bandwidth=" + str(bandwidth) + "_p=" + str( p)
def _create_model_file_name(kde_model: KdeModel): """Constructs a file name for a given KDE model.""" problem_name = kde_model.problem_name graph_type = kde_model.graph_type kernel = kde_model.kernel bandwidth = kde_model.bandwidth p = kde_model.p return 'kde_model_' + problem_name + "_" + graph_type + "_" + kernel + "_bandwidth=" + str(bandwidth) + "_p=" + str( p)
Python
def worker(self, problem_name, input_graph, p_param, bandwidth, kernel): """Worker method for running QAOA algorithm (without a classical optimizer) for a given problem instance.""" backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend) problem_instance = create_graph_problem_instance(problem_name, p_param, input_graph) min_cost_expectation = None result_min_cost = None min_cost_params = None graph_type = input_graph.graph["graph_type"].value kde_model = get_kde_model(problem_name, graph_type, p_param, kernel, bandwidth) # we try 10 sampled points without an optimizer initial_points = kde_model.kde_model.sample(10) print(initial_points) for initial_point in initial_points: qaoa_res = qaoa.qaoa(quantum_instance, problem_instance, initial_point) qaoa_expectation = qaoa_res['eigenvalue'].real + problem_instance.offset if _is_solution_better(min_cost_expectation, qaoa_expectation): min_cost_expectation, result_min_cost, min_cost_params = qaoa_expectation, qaoa_res, initial_point most_likely_binary_solution = sample_most_likely(result_min_cost['eigenstate']) result_min_cost = _get_instance_with_best_solution(problem_instance, min_cost_expectation, min_cost_params, most_likely_binary_solution, None) directory = self._build_model_validation_directory(problem_instance, problem_name) results_serializer.save_to_json(directory, result_min_cost, kernel, bandwidth) # print(result_min_cost.optimal_params, result_min_cost.optimal_value) print(result_min_cost.classical_solution_value) return result_min_cost.optimal_params, result_min_cost.optimal_value
def worker(self, problem_name, input_graph, p_param, bandwidth, kernel): """Worker method for running QAOA algorithm (without a classical optimizer) for a given problem instance.""" backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend) problem_instance = create_graph_problem_instance(problem_name, p_param, input_graph) min_cost_expectation = None result_min_cost = None min_cost_params = None graph_type = input_graph.graph["graph_type"].value kde_model = get_kde_model(problem_name, graph_type, p_param, kernel, bandwidth) # we try 10 sampled points without an optimizer initial_points = kde_model.kde_model.sample(10) print(initial_points) for initial_point in initial_points: qaoa_res = qaoa.qaoa(quantum_instance, problem_instance, initial_point) qaoa_expectation = qaoa_res['eigenvalue'].real + problem_instance.offset if _is_solution_better(min_cost_expectation, qaoa_expectation): min_cost_expectation, result_min_cost, min_cost_params = qaoa_expectation, qaoa_res, initial_point most_likely_binary_solution = sample_most_likely(result_min_cost['eigenstate']) result_min_cost = _get_instance_with_best_solution(problem_instance, min_cost_expectation, min_cost_params, most_likely_binary_solution, None) directory = self._build_model_validation_directory(problem_instance, problem_name) results_serializer.save_to_json(directory, result_min_cost, kernel, bandwidth) # print(result_min_cost.optimal_params, result_min_cost.optimal_value) print(result_min_cost.classical_solution_value) return result_min_cost.optimal_params, result_min_cost.optimal_value
Python
def _build_model_validation_directory(problem_instance, problem_name) -> pathlib.Path: """Builds a file directory depending on the problem instance and problem name provided where a result of the worker method can be stored.""" path = pathlib.Path(__file__).parent.parent.resolve() path = (path).joinpath(pathlib.Path("kernel_density_estimation", "model_validation")) directory = pathlib.Path(problem_name.value, problem_instance.input_graph.graph["graph_type"].value + "_evaluation") full_path = (path).joinpath(directory) return full_path
def _build_model_validation_directory(problem_instance, problem_name) -> pathlib.Path: """Builds a file directory depending on the problem instance and problem name provided where a result of the worker method can be stored.""" path = pathlib.Path(__file__).parent.parent.resolve() path = (path).joinpath(pathlib.Path("kernel_density_estimation", "model_validation")) directory = pathlib.Path(problem_name.value, problem_instance.input_graph.graph["graph_type"].value + "_evaluation") full_path = (path).joinpath(directory) return full_path
Python
def train_kde_model(jsons_list, kernel: str, bandwidth: float): """Trains a KDE model from data provided as json files.""" validate_json_list_not_empty(jsons_list) if not validate_jsons(jsons_list): raise Exception("Provided json files reference different optimization problem and/or graph type.") good_params_matrix = build_kde_parameters_matrix(jsons_list) kde = KernelDensity(kernel=kernel, bandwidth=bandwidth) kde.fit(good_params_matrix) return _create_kde_model(jsons_list, kde, kernel, bandwidth)
def train_kde_model(jsons_list, kernel: str, bandwidth: float): """Trains a KDE model from data provided as json files.""" validate_json_list_not_empty(jsons_list) if not validate_jsons(jsons_list): raise Exception("Provided json files reference different optimization problem and/or graph type.") good_params_matrix = build_kde_parameters_matrix(jsons_list) kde = KernelDensity(kernel=kernel, bandwidth=bandwidth) kde.fit(good_params_matrix) return _create_kde_model(jsons_list, kde, kernel, bandwidth)
Python
def save_to_json(directory: pathlib.Path, problem_instance: ProblemInstance, kernel=None, bandwidth=None): """Saves a given ProblemInstance as a json file to a given directory.""" file_name = _create_problem_instance_file_name( problem_instance) if kernel is not None and bandwidth is not None: file_name += f"_{kernel}_bandwidth_{bandwidth}" result = _create_result(problem_instance) print(result) print(file_name) path = (directory).joinpath(file_name) print(path) with open(path, 'w') as outfile: json.dump(result.__dict__, outfile)
def save_to_json(directory: pathlib.Path, problem_instance: ProblemInstance, kernel=None, bandwidth=None): """Saves a given ProblemInstance as a json file to a given directory.""" file_name = _create_problem_instance_file_name( problem_instance) if kernel is not None and bandwidth is not None: file_name += f"_{kernel}_bandwidth_{bandwidth}" result = _create_result(problem_instance) print(result) print(file_name) path = (directory).joinpath(file_name) print(path) with open(path, 'w') as outfile: json.dump(result.__dict__, outfile)
Python
def _create_result(problem_instance: ProblemInstance): """Creates a Result object from ProblemInstance. Result object is a lighter version of a ProblemInstance.""" optimizer_name = problem_instance.optimizer.optimizer_name.value if problem_instance.optimizer else None print(optimizer_name) most_likely_binary_solution = problem_instance.most_likely_binary_solution.tolist() if \ problem_instance.most_likely_binary_solution is not None else None print(most_likely_binary_solution) return Result(problem_instance.problem_name.value, optimizer_name, # complex_ndarray_to_list.complex_ndarray_to_matrix(problem_instance.hamiltonian_matrix), None, problem_instance.weight_matrix.tolist(), problem_instance.optimal_params.tolist(), problem_instance.optimal_value, most_likely_binary_solution, problem_instance.most_likely_solution_value, problem_instance.classical_solution_value.tolist(), [np_array.tolist() for np_array in problem_instance.good_params], problem_instance.input_graph.graph["graph_type"].value, problem_instance.p)
def _create_result(problem_instance: ProblemInstance): """Creates a Result object from ProblemInstance. Result object is a lighter version of a ProblemInstance.""" optimizer_name = problem_instance.optimizer.optimizer_name.value if problem_instance.optimizer else None print(optimizer_name) most_likely_binary_solution = problem_instance.most_likely_binary_solution.tolist() if \ problem_instance.most_likely_binary_solution is not None else None print(most_likely_binary_solution) return Result(problem_instance.problem_name.value, optimizer_name, # complex_ndarray_to_list.complex_ndarray_to_matrix(problem_instance.hamiltonian_matrix), None, problem_instance.weight_matrix.tolist(), problem_instance.optimal_params.tolist(), problem_instance.optimal_value, most_likely_binary_solution, problem_instance.most_likely_solution_value, problem_instance.classical_solution_value.tolist(), [np_array.tolist() for np_array in problem_instance.good_params], problem_instance.input_graph.graph["graph_type"].value, problem_instance.p)
Python
def _create_problem_instance_file_name(problem_instance: ProblemInstance): """Uses a ProblemInstance to create a related file name.""" timestamp_str = datetime.now().strftime("%d-%b-%Y-%H-%M-%S.%f") optimizer_name_underscored = ( problem_instance.optimizer.optimizer_name.value + "_") if problem_instance.optimizer else "" return problem_instance.problem_name.value + "_" + problem_instance.input_graph.graph[ "graph_type"].value + "_" + str(problem_instance.input_graph.graph["graph_id"]) + "_" + "p=" + str( problem_instance.p) + "_" + optimizer_name_underscored + timestamp_str
def _create_problem_instance_file_name(problem_instance: ProblemInstance): """Uses a ProblemInstance to create a related file name.""" timestamp_str = datetime.now().strftime("%d-%b-%Y-%H-%M-%S.%f") optimizer_name_underscored = ( problem_instance.optimizer.optimizer_name.value + "_") if problem_instance.optimizer else "" return problem_instance.problem_name.value + "_" + problem_instance.input_graph.graph[ "graph_type"].value + "_" + str(problem_instance.input_graph.graph["graph_id"]) + "_" + "p=" + str( problem_instance.p) + "_" + optimizer_name_underscored + timestamp_str
Python
def generate_ladder_graph_instances(ladder_graph_ladder_lengths): """Generates ladder graph instances from a list of parameters defining each graph and gives them ID.""" instances = [] graph_id = 0 for ladder_length in ladder_graph_ladder_lengths: graph = graphs_builder.generate_ladder_graph(ladder_length, graph_id) instances.append(graph) graph_id += 1 return instances
def generate_ladder_graph_instances(ladder_graph_ladder_lengths): """Generates ladder graph instances from a list of parameters defining each graph and gives them ID.""" instances = [] graph_id = 0 for ladder_length in ladder_graph_ladder_lengths: graph = graphs_builder.generate_ladder_graph(ladder_length, graph_id) instances.append(graph) graph_id += 1 return instances
Python
def generate_barbell_graph_instances(barbell_graph_num_of_vertices): """Generates barbell graph instances from a list of parameters defining each graph and gives them ID.""" instances = [] graph_id = 0 for num_of_vertices in barbell_graph_num_of_vertices: graph = graphs_builder.generate_barbell_graph(num_of_vertices, graph_id) instances.append(graph) graph_id += 1 return instances
def generate_barbell_graph_instances(barbell_graph_num_of_vertices): """Generates barbell graph instances from a list of parameters defining each graph and gives them ID.""" instances = [] graph_id = 0 for num_of_vertices in barbell_graph_num_of_vertices: graph = graphs_builder.generate_barbell_graph(num_of_vertices, graph_id) instances.append(graph) graph_id += 1 return instances
Python
def generate_random_graph_instances(random_graph_num_of_vertices, random_graph_probabilities): """Generates random graph instances from a list of parameters defining each graph and gives them ID.""" instances = [] graph_id = 0 for num_of_vertices in random_graph_num_of_vertices: for prob in random_graph_probabilities: graph = graphs_builder.generate_random_graph(num_of_vertices, prob, graph_id) instances.append(graph) graph_id += 1 return instances
def generate_random_graph_instances(random_graph_num_of_vertices, random_graph_probabilities): """Generates random graph instances from a list of parameters defining each graph and gives them ID.""" instances = [] graph_id = 0 for num_of_vertices in random_graph_num_of_vertices: for prob in random_graph_probabilities: graph = graphs_builder.generate_random_graph(num_of_vertices, prob, graph_id) instances.append(graph) graph_id += 1 return instances
Python
def generate_caveman_graph_instances(caveman_graph_cliques): """Generates caveman graph instances from a list of parameters defining each graph and gives them ID.""" instances = [] graph_id = 0 for clique_num, clique_size in caveman_graph_cliques: graph = graphs_builder.generate_caveman_graph(clique_num, clique_size, graph_id) instances.append(graph) graph_id += 1 return instances
def generate_caveman_graph_instances(caveman_graph_cliques): """Generates caveman graph instances from a list of parameters defining each graph and gives them ID.""" instances = [] graph_id = 0 for clique_num, clique_size in caveman_graph_cliques: graph = graphs_builder.generate_caveman_graph(clique_num, clique_size, graph_id) instances.append(graph) graph_id += 1 return instances
Python
def _benchmark_create_tensor(self, value, dtype, device): """Benchmark overheads of creating a Tensor object.""" ctx = context.context() handle = ctx._handle if device == GPU: # Warmup the GPU ops.EagerTensor(value, context=handle, device=device) def func(): ops.EagerTensor(value, context=handle, device=device, dtype=dtype) self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device): """Benchmark overheads of creating a Tensor object.""" ctx = context.context() handle = ctx._handle if device == GPU: # Warmup the GPU ops.EagerTensor(value, context=handle, device=device) def func(): ops.EagerTensor(value, context=handle, device=device, dtype=dtype) self._run(func, 30000)
Python
def _create_ordered_io(keras_model, estimator_io, is_input=True): """Create a list of tensors from IO dictionary based on Keras IO order. Args: keras_model: An instance of compiled keras model. estimator_io: The features or labels (dict or plain array) from model_fn. is_input: True if dictionary is for inputs. Returns: A list of tensors based on Keras IO order. Raises: ValueError: if dictionary keys cannot be found in Keras model input_names or output_names. """ if isinstance(estimator_io, (list, tuple)): # Case currently not supported by most built-in input_fn, # but it's good to have for sanity return [_convert_tensor(x) for x in estimator_io] elif isinstance(estimator_io, dict): if is_input: if keras_model._is_graph_network: keras_io_names = keras_model.input_names else: keras_io_names = [ 'input_%d' % i for i in range(1, len(estimator_io) + 1)] else: if keras_model._is_graph_network: keras_io_names = keras_model.output_names else: keras_io_names = [ 'output_%d' % i for i in range(1, len(estimator_io) + 1)] for key in estimator_io: if key not in keras_io_names: raise ValueError( 'Cannot find %s with name "%s" in Keras Model. ' 'It needs to match one ' 'of the following: %s' % ('input' if is_input else 'output', key, ', '.join(keras_io_names))) tensors = [_convert_tensor(estimator_io[io_name]) for io_name in keras_io_names] return tensors else: # Plain array. return _convert_tensor(estimator_io)
def _create_ordered_io(keras_model, estimator_io, is_input=True): """Create a list of tensors from IO dictionary based on Keras IO order. Args: keras_model: An instance of compiled keras model. estimator_io: The features or labels (dict or plain array) from model_fn. is_input: True if dictionary is for inputs. Returns: A list of tensors based on Keras IO order. Raises: ValueError: if dictionary keys cannot be found in Keras model input_names or output_names. """ if isinstance(estimator_io, (list, tuple)): # Case currently not supported by most built-in input_fn, # but it's good to have for sanity return [_convert_tensor(x) for x in estimator_io] elif isinstance(estimator_io, dict): if is_input: if keras_model._is_graph_network: keras_io_names = keras_model.input_names else: keras_io_names = [ 'input_%d' % i for i in range(1, len(estimator_io) + 1)] else: if keras_model._is_graph_network: keras_io_names = keras_model.output_names else: keras_io_names = [ 'output_%d' % i for i in range(1, len(estimator_io) + 1)] for key in estimator_io: if key not in keras_io_names: raise ValueError( 'Cannot find %s with name "%s" in Keras Model. ' 'It needs to match one ' 'of the following: %s' % ('input' if is_input else 'output', key, ', '.join(keras_io_names))) tensors = [_convert_tensor(estimator_io[io_name]) for io_name in keras_io_names] return tensors else: # Plain array. return _convert_tensor(estimator_io)
Python
def _in_place_subclassed_model_reset(model): """Substitute for model cloning that works for subclassed models. Subclassed models cannot be cloned because their topology is not serializable. To "instantiate" an identical model in a new TF graph, we reuse the original model object, but we clear its state. After calling this function on a model instance, you can use the model instance as if it were a model clone (in particular you can use it in a new graph). This method clears the state of the input model. It is thus destructive. However the original state can be restored fully by calling `_in_place_subclassed_model_state_restoration`. Args: model: Instance of a Keras model created via subclassing. Raises: ValueError: In case the model uses a subclassed model as inner layer. """ assert not model._is_graph_network # Only makes sense for subclassed networks # Retrieve all layers tracked by the model as well as their attribute names attributes_cache = {} for name in dir(model): try: value = getattr(model, name) except (AttributeError, ValueError, TypeError): continue if isinstance(value, Layer): attributes_cache[name] = value assert value in model._layers elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers'): # Handle case: list/tuple of layers (also tracked by the Network API). if value and all(isinstance(val, Layer) for val in value): raise ValueError('We do not support the use of list-of-layers ' 'attributes in subclassed models used with ' '`model_to_estimator` at this time. Found list ' 'model: %s' % name) # Replace layers on the model with fresh layers layers_to_names = {value: key for key, value in attributes_cache.items()} original_layers = model._layers[:] model._layers = data_structures.NoDependency([]) for layer in original_layers: # We preserve layer order. config = layer.get_config() # This will not work for nested subclassed models used as layers. # This would be theoretically possible to support, but would add complexity. # Only do it if users complain. if isinstance(layer, Network) and not layer._is_graph_network: raise ValueError('We do not support the use of nested subclassed models ' 'in `model_to_estimator` at this time. Found nested ' 'model: %s' % layer) fresh_layer = layer.__class__.from_config(config) name = layers_to_names[layer] setattr(model, name, fresh_layer) # Cache original model build attributes (in addition to layers) if (not hasattr(model, '_original_attributes_cache') or model._original_attributes_cache is None): if model.built: attributes_to_cache = [ 'inputs', 'outputs', '_feed_outputs', '_feed_output_names', '_feed_output_shapes', '_feed_loss_fns', 'loss_weights_list', 'targets', '_feed_targets', 'sample_weight_modes', 'weighted_metrics', 'metrics_names', 'metrics_tensors', 'metrics_updates', 'stateful_metric_names', 'total_loss', 'sample_weights', '_feed_sample_weights', 'train_function', 'test_function', 'predict_function', '_collected_trainable_weights', '_feed_inputs', '_feed_input_names', '_feed_input_shapes', 'optimizer', ] for name in attributes_to_cache: attributes_cache[name] = getattr(model, name) model._original_attributes_cache = data_structures.NoDependency( attributes_cache) # Reset built state model.built = False model.inputs = None model.outputs = None
def _in_place_subclassed_model_reset(model): """Substitute for model cloning that works for subclassed models. Subclassed models cannot be cloned because their topology is not serializable. To "instantiate" an identical model in a new TF graph, we reuse the original model object, but we clear its state. After calling this function on a model instance, you can use the model instance as if it were a model clone (in particular you can use it in a new graph). This method clears the state of the input model. It is thus destructive. However the original state can be restored fully by calling `_in_place_subclassed_model_state_restoration`. Args: model: Instance of a Keras model created via subclassing. Raises: ValueError: In case the model uses a subclassed model as inner layer. """ assert not model._is_graph_network # Only makes sense for subclassed networks # Retrieve all layers tracked by the model as well as their attribute names attributes_cache = {} for name in dir(model): try: value = getattr(model, name) except (AttributeError, ValueError, TypeError): continue if isinstance(value, Layer): attributes_cache[name] = value assert value in model._layers elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers'): # Handle case: list/tuple of layers (also tracked by the Network API). if value and all(isinstance(val, Layer) for val in value): raise ValueError('We do not support the use of list-of-layers ' 'attributes in subclassed models used with ' '`model_to_estimator` at this time. Found list ' 'model: %s' % name) # Replace layers on the model with fresh layers layers_to_names = {value: key for key, value in attributes_cache.items()} original_layers = model._layers[:] model._layers = data_structures.NoDependency([]) for layer in original_layers: # We preserve layer order. config = layer.get_config() # This will not work for nested subclassed models used as layers. # This would be theoretically possible to support, but would add complexity. # Only do it if users complain. if isinstance(layer, Network) and not layer._is_graph_network: raise ValueError('We do not support the use of nested subclassed models ' 'in `model_to_estimator` at this time. Found nested ' 'model: %s' % layer) fresh_layer = layer.__class__.from_config(config) name = layers_to_names[layer] setattr(model, name, fresh_layer) # Cache original model build attributes (in addition to layers) if (not hasattr(model, '_original_attributes_cache') or model._original_attributes_cache is None): if model.built: attributes_to_cache = [ 'inputs', 'outputs', '_feed_outputs', '_feed_output_names', '_feed_output_shapes', '_feed_loss_fns', 'loss_weights_list', 'targets', '_feed_targets', 'sample_weight_modes', 'weighted_metrics', 'metrics_names', 'metrics_tensors', 'metrics_updates', 'stateful_metric_names', 'total_loss', 'sample_weights', '_feed_sample_weights', 'train_function', 'test_function', 'predict_function', '_collected_trainable_weights', '_feed_inputs', '_feed_input_names', '_feed_input_shapes', 'optimizer', ] for name in attributes_to_cache: attributes_cache[name] = getattr(model, name) model._original_attributes_cache = data_structures.NoDependency( attributes_cache) # Reset built state model.built = False model.inputs = None model.outputs = None
Python
def _clone_and_build_model(mode, keras_model, custom_objects, features=None, labels=None): """Clone and build the given keras_model. Args: mode: training mode. keras_model: an instance of compiled keras model. custom_objects: Dictionary for custom objects. features: Dict of tensors. labels: Dict of tensors, or single tensor instance. Returns: The newly built model. """ # Set to True during training, False for inference. K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN) # Get list of inputs. if features is None: input_tensors = None else: input_tensors = _create_ordered_io(keras_model, estimator_io=features, is_input=True) # Get list of outputs. if labels is None: target_tensors = None elif isinstance(labels, dict): target_tensors = _create_ordered_io(keras_model, estimator_io=labels, is_input=False) else: target_tensors = [ _convert_tensor(labels) ] if keras_model._is_graph_network: if custom_objects: with CustomObjectScope(custom_objects): model = models.clone_model(keras_model, input_tensors=input_tensors) else: model = models.clone_model(keras_model, input_tensors=input_tensors) else: model = keras_model _in_place_subclassed_model_reset(model) if input_tensors is not None: model._set_inputs(input_tensors) # Compile/Build model if mode is model_fn_lib.ModeKeys.PREDICT: if isinstance(model, models.Sequential): model.build() else: if isinstance(keras_model.optimizer, optimizers.TFOptimizer): optimizer = keras_model.optimizer else: optimizer_config = keras_model.optimizer.get_config() optimizer = keras_model.optimizer.__class__.from_config(optimizer_config) optimizer.iterations = training_util.get_or_create_global_step() model.compile( optimizer, keras_model.loss, metrics=keras_model.metrics, loss_weights=keras_model.loss_weights, sample_weight_mode=keras_model.sample_weight_mode, weighted_metrics=keras_model.weighted_metrics, target_tensors=target_tensors) return model
def _clone_and_build_model(mode, keras_model, custom_objects, features=None, labels=None): """Clone and build the given keras_model. Args: mode: training mode. keras_model: an instance of compiled keras model. custom_objects: Dictionary for custom objects. features: Dict of tensors. labels: Dict of tensors, or single tensor instance. Returns: The newly built model. """ # Set to True during training, False for inference. K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN) # Get list of inputs. if features is None: input_tensors = None else: input_tensors = _create_ordered_io(keras_model, estimator_io=features, is_input=True) # Get list of outputs. if labels is None: target_tensors = None elif isinstance(labels, dict): target_tensors = _create_ordered_io(keras_model, estimator_io=labels, is_input=False) else: target_tensors = [ _convert_tensor(labels) ] if keras_model._is_graph_network: if custom_objects: with CustomObjectScope(custom_objects): model = models.clone_model(keras_model, input_tensors=input_tensors) else: model = models.clone_model(keras_model, input_tensors=input_tensors) else: model = keras_model _in_place_subclassed_model_reset(model) if input_tensors is not None: model._set_inputs(input_tensors) # Compile/Build model if mode is model_fn_lib.ModeKeys.PREDICT: if isinstance(model, models.Sequential): model.build() else: if isinstance(keras_model.optimizer, optimizers.TFOptimizer): optimizer = keras_model.optimizer else: optimizer_config = keras_model.optimizer.get_config() optimizer = keras_model.optimizer.__class__.from_config(optimizer_config) optimizer.iterations = training_util.get_or_create_global_step() model.compile( optimizer, keras_model.loss, metrics=keras_model.metrics, loss_weights=keras_model.loss_weights, sample_weight_mode=keras_model.sample_weight_mode, weighted_metrics=keras_model.weighted_metrics, target_tensors=target_tensors) return model
Python
def _create_keras_model_fn(keras_model, custom_objects=None): """Creates model_fn for keras Estimator. Args: keras_model: an instance of compiled keras model. custom_objects: Dictionary for custom objects. Returns: The model_fn for a keras Estimator. """ def model_fn(features, labels, mode): """model_fn for keras Estimator.""" model = _clone_and_build_model(mode, keras_model, custom_objects, features, labels) model_output_names = [] # We need to make sure that the output names of the last layer in the model # is the same for each of the cloned models. This is required for mirrored # strategy when we call regroup. if distribute_lib.has_distribution_strategy(): for name in model.output_names: name = re.compile(r'_\d$').sub('', name) model_output_names.append(name) else: model_output_names = model.output_names # Get inputs to EstimatorSpec predictions = dict(zip(model_output_names, model.outputs)) loss = None train_op = None eval_metric_ops = None # Set loss and metric only during train and evaluate. if mode is not model_fn_lib.ModeKeys.PREDICT: if mode is model_fn_lib.ModeKeys.TRAIN: model._make_train_function() # pylint: disable=protected-access else: model._make_test_function() # pylint: disable=protected-access loss = model.total_loss if model.metrics: # TODO(fchollet): support stateful metrics eval_metric_ops = {} # When each metric maps to an output if isinstance(model.metrics, dict): for i, output_name in enumerate(model.metrics.keys()): metric_name = model.metrics[output_name] if callable(metric_name): metric_name = metric_name.__name__ # When some outputs use the same metric if list(model.metrics.values()).count(metric_name) > 1: metric_name += '_' + output_name eval_metric_ops[metric_name] = metrics_module.mean( model.metrics_tensors[i - len(model.metrics)]) else: for i, metric_name in enumerate(model.metrics): if callable(metric_name): metric_name = metric_name.__name__ eval_metric_ops[metric_name] = metrics_module.mean( model.metrics_tensors[i]) # Set train_op only during train. if mode is model_fn_lib.ModeKeys.TRAIN: train_op = model.train_function.updates_op if not model._is_graph_network: # Reset model state to original state, # to avoid `model_fn` being destructive for the initial model argument. _in_place_subclassed_model_state_restoration(keras_model) return model_fn_lib.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, export_outputs={ _DEFAULT_SERVING_KEY: export_lib.export_output.PredictOutput(predictions) }) return model_fn
def _create_keras_model_fn(keras_model, custom_objects=None): """Creates model_fn for keras Estimator. Args: keras_model: an instance of compiled keras model. custom_objects: Dictionary for custom objects. Returns: The model_fn for a keras Estimator. """ def model_fn(features, labels, mode): """model_fn for keras Estimator.""" model = _clone_and_build_model(mode, keras_model, custom_objects, features, labels) model_output_names = [] # We need to make sure that the output names of the last layer in the model # is the same for each of the cloned models. This is required for mirrored # strategy when we call regroup. if distribute_lib.has_distribution_strategy(): for name in model.output_names: name = re.compile(r'_\d$').sub('', name) model_output_names.append(name) else: model_output_names = model.output_names # Get inputs to EstimatorSpec predictions = dict(zip(model_output_names, model.outputs)) loss = None train_op = None eval_metric_ops = None # Set loss and metric only during train and evaluate. if mode is not model_fn_lib.ModeKeys.PREDICT: if mode is model_fn_lib.ModeKeys.TRAIN: model._make_train_function() # pylint: disable=protected-access else: model._make_test_function() # pylint: disable=protected-access loss = model.total_loss if model.metrics: # TODO(fchollet): support stateful metrics eval_metric_ops = {} # When each metric maps to an output if isinstance(model.metrics, dict): for i, output_name in enumerate(model.metrics.keys()): metric_name = model.metrics[output_name] if callable(metric_name): metric_name = metric_name.__name__ # When some outputs use the same metric if list(model.metrics.values()).count(metric_name) > 1: metric_name += '_' + output_name eval_metric_ops[metric_name] = metrics_module.mean( model.metrics_tensors[i - len(model.metrics)]) else: for i, metric_name in enumerate(model.metrics): if callable(metric_name): metric_name = metric_name.__name__ eval_metric_ops[metric_name] = metrics_module.mean( model.metrics_tensors[i]) # Set train_op only during train. if mode is model_fn_lib.ModeKeys.TRAIN: train_op = model.train_function.updates_op if not model._is_graph_network: # Reset model state to original state, # to avoid `model_fn` being destructive for the initial model argument. _in_place_subclassed_model_state_restoration(keras_model) return model_fn_lib.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, export_outputs={ _DEFAULT_SERVING_KEY: export_lib.export_output.PredictOutput(predictions) }) return model_fn
Python
def _initialize_devices(self, num_gpus_per_worker, cluster_spec, task_type, task_id): """Initialize internal devices. It creates variable devices and compute devices. Variables and operations will be assigned to them respectively. We have one compute device per tower. The variable device is a device function or device string. The default variable device assigns variables to parameter servers in a round-robin fashion. Args: num_gpus_per_worker: number of local GPUs or GPUs per worker. cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. task_type: the current task type. task_id: the current task id. Raises: ValueError: if the cluster_spec doesn't have ps jobs. """ self._task_type = task_type or "worker" self._task_id = task_id or 0 self._worker_device = "/job:%s/task:%d" % (self._task_type, self._task_id) # TODO(yuefengz): maybe clearer to split it into two classes, one for # the distribuetd case and one for the local case, once we have the factory # class/method. # Define compute devices which is a list of device strings and one for each # tower. When there are GPUs, replicate operations on these GPUs. Otherwise, # place operations on CPU. if cluster_spec is None: # Local mode. if num_gpus_per_worker > 0: self._compute_devices = list( map("/device:GPU:{}".format, range(num_gpus_per_worker))) else: self._compute_devices = [_LOCAL_CPU] else: # Distributed mode. if num_gpus_per_worker > 0: self._compute_devices = [ "%s/device:GPU:%d" % (self._worker_device, i) for i in range(num_gpus_per_worker) ] else: self._compute_devices = [self._worker_device] self._compute_devices = list( map(device_util.resolve, self._compute_devices)) self._canonical_compute_device_set = set(self._compute_devices) # Define variable device which is a device string in the local case and a # device function in the distributed case. It is used to open a device scope # where varibles are defined. # The `_parameter_devices` is needed for the `parameter_devices` property # and is a list of all variable devices. if cluster_spec is None: # Local mode. If there is only one GPU, put everything on that GPU. # Otherwise, place variables on CPU. if num_gpus_per_worker == 1: assert len(list(self._compute_devices)) == 1 self._variable_device = _LOCAL_GPU_0 self._parameter_devices = [_LOCAL_GPU_0] else: self._variable_device = _LOCAL_CPU self._parameter_devices = [_LOCAL_CPU] else: # Distributed mode. Place variables on ps jobs in a round-robin fashion. # Note that devices returned from `replica_device_setter` are not # canonical and therefore we don't canonicalize all variable devices to # make them consistent. # TODO(yuefengz): support passing a strategy object to control variable # assignment. # TODO(yuefengz): merge the logic of replica_device_setter into this # class. num_ps_replicas = len(cluster_spec.as_dict().get("ps", [])) if num_ps_replicas == 0: raise ValueError("The cluster spec needs to have `ps` jobs.") self._variable_device = device_setter.replica_device_setter( ps_tasks=num_ps_replicas, worker_device=self._worker_device, merge_devices=True, cluster=cluster_spec) # Parameter devices are all tasks of the "ps" job. self._parameter_devices = map("/job:ps/task:{}".format, range(num_ps_replicas)) # Define the default device in cross-tower mode. In the distributed case, we # set the default device to the corresponding worker to prevent these ops # from being placed on other workers. if cluster_spec is None: self._default_device = None else: self._default_device = self._worker_device
def _initialize_devices(self, num_gpus_per_worker, cluster_spec, task_type, task_id): """Initialize internal devices. It creates variable devices and compute devices. Variables and operations will be assigned to them respectively. We have one compute device per tower. The variable device is a device function or device string. The default variable device assigns variables to parameter servers in a round-robin fashion. Args: num_gpus_per_worker: number of local GPUs or GPUs per worker. cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. task_type: the current task type. task_id: the current task id. Raises: ValueError: if the cluster_spec doesn't have ps jobs. """ self._task_type = task_type or "worker" self._task_id = task_id or 0 self._worker_device = "/job:%s/task:%d" % (self._task_type, self._task_id) # TODO(yuefengz): maybe clearer to split it into two classes, one for # the distribuetd case and one for the local case, once we have the factory # class/method. # Define compute devices which is a list of device strings and one for each # tower. When there are GPUs, replicate operations on these GPUs. Otherwise, # place operations on CPU. if cluster_spec is None: # Local mode. if num_gpus_per_worker > 0: self._compute_devices = list( map("/device:GPU:{}".format, range(num_gpus_per_worker))) else: self._compute_devices = [_LOCAL_CPU] else: # Distributed mode. if num_gpus_per_worker > 0: self._compute_devices = [ "%s/device:GPU:%d" % (self._worker_device, i) for i in range(num_gpus_per_worker) ] else: self._compute_devices = [self._worker_device] self._compute_devices = list( map(device_util.resolve, self._compute_devices)) self._canonical_compute_device_set = set(self._compute_devices) # Define variable device which is a device string in the local case and a # device function in the distributed case. It is used to open a device scope # where varibles are defined. # The `_parameter_devices` is needed for the `parameter_devices` property # and is a list of all variable devices. if cluster_spec is None: # Local mode. If there is only one GPU, put everything on that GPU. # Otherwise, place variables on CPU. if num_gpus_per_worker == 1: assert len(list(self._compute_devices)) == 1 self._variable_device = _LOCAL_GPU_0 self._parameter_devices = [_LOCAL_GPU_0] else: self._variable_device = _LOCAL_CPU self._parameter_devices = [_LOCAL_CPU] else: # Distributed mode. Place variables on ps jobs in a round-robin fashion. # Note that devices returned from `replica_device_setter` are not # canonical and therefore we don't canonicalize all variable devices to # make them consistent. # TODO(yuefengz): support passing a strategy object to control variable # assignment. # TODO(yuefengz): merge the logic of replica_device_setter into this # class. num_ps_replicas = len(cluster_spec.as_dict().get("ps", [])) if num_ps_replicas == 0: raise ValueError("The cluster spec needs to have `ps` jobs.") self._variable_device = device_setter.replica_device_setter( ps_tasks=num_ps_replicas, worker_device=self._worker_device, merge_devices=True, cluster=cluster_spec) # Parameter devices are all tasks of the "ps" job. self._parameter_devices = map("/job:ps/task:{}".format, range(num_ps_replicas)) # Define the default device in cross-tower mode. In the distributed case, we # set the default device to the corresponding worker to prevent these ops # from being placed on other workers. if cluster_spec is None: self._default_device = None else: self._default_device = self._worker_device
Python
def _select_single_value(self, structured): """Select any single values in `structured`.""" def _select_fn(x): # pylint: disable=g-missing-docstring if isinstance(x, values.Mirrored): if len(x.devices) == 1: return list(x._index.values())[0] # pylint: disable=protected-access else: raise ValueError( "You cannot update variable with a Mirrored object with multiple " "components %r when using ParameterServerStrategy. You must " "specify a single value or a Mirrored with a single value." % x) elif isinstance(x, values.PerDevice): raise ValueError( "You cannot update variable with a PerDevice object %r when using " "ParameterServerStrategy. You must specify a single value or a " "Mirrored with a single value" % x) else: return x return nest.map_structure(_select_fn, structured)
def _select_single_value(self, structured): """Select any single values in `structured`.""" def _select_fn(x): # pylint: disable=g-missing-docstring if isinstance(x, values.Mirrored): if len(x.devices) == 1: return list(x._index.values())[0] # pylint: disable=protected-access else: raise ValueError( "You cannot update variable with a Mirrored object with multiple " "components %r when using ParameterServerStrategy. You must " "specify a single value or a Mirrored with a single value." % x) elif isinstance(x, values.PerDevice): raise ValueError( "You cannot update variable with a PerDevice object %r when using " "ParameterServerStrategy. You must specify a single value or a " "Mirrored with a single value" % x) else: return x return nest.map_structure(_select_fn, structured)
Python
def testInGraph(self): """Test it runs in-graph replicated training correctly.""" distribute_coordinator.run_distribute_coordinator( self._in_graph_worker_fn, cluster_spec=self._cluster_spec, between_graph=False) self.assertEqual(self._result_correct, 1)
def testInGraph(self): """Test it runs in-graph replicated training correctly.""" distribute_coordinator.run_distribute_coordinator( self._in_graph_worker_fn, cluster_spec=self._cluster_spec, between_graph=False) self.assertEqual(self._result_correct, 1)
Python
def _init_func(): """Creates an iterator for the input dataset. Returns: A `string` tensor that encapsulates the iterator created. """ # pylint: disable=protected-access ds_variant = self._input_dataset._as_variant_tensor() resource = core_gen_dataset_ops.anonymous_iterator( output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) with ops.control_dependencies( [core_gen_dataset_ops.make_iterator(ds_variant, resource)]): return core_gen_dataset_ops.iterator_to_string_handle(resource)
def _init_func(): """Creates an iterator for the input dataset. Returns: A `string` tensor that encapsulates the iterator created. """ # pylint: disable=protected-access ds_variant = self._input_dataset._as_variant_tensor() resource = core_gen_dataset_ops.anonymous_iterator( output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) with ops.control_dependencies( [core_gen_dataset_ops.make_iterator(ds_variant, resource)]): return core_gen_dataset_ops.iterator_to_string_handle(resource)
Python
def _finalize_func(string_handle): """Destroys the iterator resource created. Args: string_handle: An iterator string handle created by _init_func Returns: Tensor constant 0 """ iterator_resource = core_gen_dataset_ops.iterator_from_string_handle_v2( string_handle, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) with ops.control_dependencies([ resource_variable_ops.destroy_resource_op( iterator_resource, ignore_lookup_error=True)]): return array_ops.constant(0, dtypes.int64)
def _finalize_func(string_handle): """Destroys the iterator resource created. Args: string_handle: An iterator string handle created by _init_func Returns: Tensor constant 0 """ iterator_resource = core_gen_dataset_ops.iterator_from_string_handle_v2( string_handle, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) with ops.control_dependencies([ resource_variable_ops.destroy_resource_op( iterator_resource, ignore_lookup_error=True)]): return array_ops.constant(0, dtypes.int64)
Python
def _memory_per_class(self): """Returns the number of examplars per class.""" if self._args["dataset"]=='cifar100': img_pre_cls = 500 else: img_pre_cls = 1300 if self._fixed_memory: base_exemplar_budget = self._args["memory_size"] else: base_exemplar_budget = self._args["memory_size"]//self._total_n_classes * self._n_classes if self._disable_rmm: self._memory_size = base_exemplar_budget else: self._memory_size = base_exemplar_budget + int(self._current_mem_rate*self._args["increment"]*img_pre_cls) return self._memory_size // self._n_classes
def _memory_per_class(self): """Returns the number of examplars per class.""" if self._args["dataset"]=='cifar100': img_pre_cls = 500 else: img_pre_cls = 1300 if self._fixed_memory: base_exemplar_budget = self._args["memory_size"] else: base_exemplar_budget = self._args["memory_size"]//self._total_n_classes * self._n_classes if self._disable_rmm: self._memory_size = base_exemplar_budget else: self._memory_size = base_exemplar_budget + int(self._current_mem_rate*self._args["increment"]*img_pre_cls) return self._memory_size // self._n_classes
Python
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer, parse_record_fn, num_epochs=1, dtype=tf.float32, datasets_num_private_threads=None, num_parallel_batches=1): """Given a Dataset with raw records, return an iterator over the records. Args: dataset: A Dataset representing raw records is_training: A boolean denoting whether the input is for training. batch_size: The number of samples per batch. shuffle_buffer: The buffer size to use when shuffling records. A larger value results in better randomness, but smaller values reduce startup time and use less memory. parse_record_fn: A function that takes a raw record and returns the corresponding (image, label) pair. num_epochs: The number of epochs to repeat the dataset. dtype: Data type to use for images/features. datasets_num_private_threads: Number of threads for a private threadpool created for all datasets computation. num_parallel_batches: Number of parallel batches for tf.data. Returns: Dataset of (image, label) pairs ready for iteration. """ # KungFu: Shard the dataset if is_training: from kungfu.python import current_cluster_size, current_rank dataset = dataset.shard(num_shards=current_cluster_size(), index=current_rank()) # Prefetches a batch at a time to smooth out the time taken to load input # files for shuffling and processing. dataset = dataset.prefetch(buffer_size=batch_size) if is_training: # Shuffles records before repeating to respect epoch boundaries. # dataset = dataset.shuffle(buffer_size=shuffle_buffer) pass # Repeats the dataset for the number of epochs to train. dataset = dataset.repeat(num_epochs) # Parses the raw records into images and labels. dataset = dataset.apply( tf.contrib.data.map_and_batch( lambda value: parse_record_fn(value, is_training, dtype), batch_size=batch_size, num_parallel_batches=num_parallel_batches, drop_remainder=False)) # Operations between the final prefetch and the get_next call to the iterator # will happen synchronously during run time. We prefetch here again to # background all of the above processing work and keep it out of the # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE # allows DistributionStrategies to adjust how many batches to fetch based # on how many devices are present. dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) # Defines a specific size thread pool for tf.data operations. if datasets_num_private_threads: tf.logging.info('datasets_num_private_threads: %s', datasets_num_private_threads) dataset = threadpool.override_threadpool( dataset, threadpool.PrivateThreadPool( datasets_num_private_threads, display_name='input_pipeline_thread_pool')) return dataset
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer, parse_record_fn, num_epochs=1, dtype=tf.float32, datasets_num_private_threads=None, num_parallel_batches=1): """Given a Dataset with raw records, return an iterator over the records. Args: dataset: A Dataset representing raw records is_training: A boolean denoting whether the input is for training. batch_size: The number of samples per batch. shuffle_buffer: The buffer size to use when shuffling records. A larger value results in better randomness, but smaller values reduce startup time and use less memory. parse_record_fn: A function that takes a raw record and returns the corresponding (image, label) pair. num_epochs: The number of epochs to repeat the dataset. dtype: Data type to use for images/features. datasets_num_private_threads: Number of threads for a private threadpool created for all datasets computation. num_parallel_batches: Number of parallel batches for tf.data. Returns: Dataset of (image, label) pairs ready for iteration. """ # KungFu: Shard the dataset if is_training: from kungfu.python import current_cluster_size, current_rank dataset = dataset.shard(num_shards=current_cluster_size(), index=current_rank()) # Prefetches a batch at a time to smooth out the time taken to load input # files for shuffling and processing. dataset = dataset.prefetch(buffer_size=batch_size) if is_training: # Shuffles records before repeating to respect epoch boundaries. # dataset = dataset.shuffle(buffer_size=shuffle_buffer) pass # Repeats the dataset for the number of epochs to train. dataset = dataset.repeat(num_epochs) # Parses the raw records into images and labels. dataset = dataset.apply( tf.contrib.data.map_and_batch( lambda value: parse_record_fn(value, is_training, dtype), batch_size=batch_size, num_parallel_batches=num_parallel_batches, drop_remainder=False)) # Operations between the final prefetch and the get_next call to the iterator # will happen synchronously during run time. We prefetch here again to # background all of the above processing work and keep it out of the # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE # allows DistributionStrategies to adjust how many batches to fetch based # on how many devices are present. dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) # Defines a specific size thread pool for tf.data operations. if datasets_num_private_threads: tf.logging.info('datasets_num_private_threads: %s', datasets_num_private_threads) dataset = threadpool.override_threadpool( dataset, threadpool.PrivateThreadPool( datasets_num_private_threads, display_name='input_pipeline_thread_pool')) return dataset
Python
def resnet_model_fn(features, labels, mode, model_class, resnet_size, weight_decay, learning_rate_fn, momentum, data_format, resnet_version, loss_scale, loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE, fine_tune=False): """Shared functionality for different resnet model_fns. Initializes the ResnetModel representing the model layers and uses that model to build the necessary EstimatorSpecs for the `mode` in question. For training, this means building losses, the optimizer, and the train op that get passed into the EstimatorSpec. For evaluation and prediction, the EstimatorSpec is returned without a train op, but with the necessary parameters for the given mode. Args: features: tensor representing input images labels: tensor representing class labels for all input images mode: current estimator mode; should be one of `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT` model_class: a class representing a TensorFlow model that has a __call__ function. We assume here that this is a subclass of ResnetModel. resnet_size: A single integer for the size of the ResNet model. weight_decay: weight decay loss rate used to regularize learned variables. learning_rate_fn: function that returns the current learning rate given the current global_step momentum: momentum term used for optimization data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. resnet_version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] loss_scale: The factor to scale the loss for numerical stability. A detailed summary is present in the arg parser help text. loss_filter_fn: function that takes a string variable name and returns True if the var should be included in loss calculation, and False otherwise. If None, batch_normalization variables will be excluded from the loss. dtype: the TensorFlow dtype to use for calculations. fine_tune: If True only train the dense layers(final layers). Returns: EstimatorSpec parameterized according to the input params and the current mode. """ # Generate a summary node for the images tf.summary.image('images', features, max_outputs=6) # Checks that features/images have same data type being used for calculations. assert features.dtype == dtype model = model_class(resnet_size, data_format, resnet_version=resnet_version, dtype=dtype) logits = model(features, mode == tf.estimator.ModeKeys.TRAIN) # This acts as a no-op if the logits are already in fp32 (provided logits are # not a SparseTensor). If dtype is is low precision, logits must be cast to # fp32 for numerical stability. logits = tf.cast(logits, tf.float32) predictions = { 'classes': tf.argmax(logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor') } if mode == tf.estimator.ModeKeys.PREDICT: # Return the predictions and the specification for serving a SavedModel return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, export_outputs={ 'predict': tf.estimator.export.PredictOutput(predictions) }) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=labels) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy') tf.summary.scalar('cross_entropy', cross_entropy) # If no loss_filter_fn is passed, assume we want the default behavior, # which is that batch_normalization variables are excluded from loss. def exclude_batch_norm(name): return 'batch_normalization' not in name loss_filter_fn = loss_filter_fn or exclude_batch_norm # Add weight decay to the loss. l2_loss = weight_decay * tf.add_n( # loss is computed using fp32 for numerical stability. [tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables() if loss_filter_fn(v.name)]) tf.summary.scalar('l2_loss', l2_loss) loss = cross_entropy + l2_loss if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() learning_rate = learning_rate_fn(global_step) # Create a tensor named learning_rate for logging purposes tf.identity(learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', learning_rate) optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=momentum ) # KungFu: wrap optimizer if flags.FLAGS.kungfu_opt == 'ssgd': from kungfu.tensorflow.optimizers import SynchronousSGDOptimizer optimizer = SynchronousSGDOptimizer(optimizer) elif flags.FLAGS.kungfu_opt == 'gns': import kungfu.tensorflow as kf from kungfu.tensorflow.optimizers import \ MonitorGradientNoiseScaleOptimizer # from kungfu_experiment.gns import MonitorGradientNoiseScaleOptimizer init_bs = 32 device_batch_size = kf.get_or_create_batch_size(init_bs) optimizer = MonitorGradientNoiseScaleOptimizer(optimizer, device_batch_size, verbose=False) else: raise RuntimeError('invalid kungfu optimizer %s' % (flags.FLAGS.kungfu_opt)) def _dense_grad_filter(gvs): """Only apply gradient updates to the final layer. This function is used for fine tuning. Args: gvs: list of tuples with gradients and variable info Returns: filtered gradients so that only the dense layer remains """ return [(g, v) for g, v in gvs if 'dense' in v.name] if loss_scale != 1: # When computing fp16 gradients, often intermediate tensor values are # so small, they underflow to 0. To avoid this, we multiply the loss by # loss_scale to make these tensor values loss_scale times bigger. scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale) if fine_tune: scaled_grad_vars = _dense_grad_filter(scaled_grad_vars) # Once the gradient computation is complete we can scale the gradients # back to the correct scale before passing them to the optimizer. unscaled_grad_vars = [(grad / loss_scale, var) for grad, var in scaled_grad_vars] minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step=global_step) else: grad_vars = optimizer.compute_gradients(loss) if fine_tune: grad_vars = _dense_grad_filter(grad_vars) minimize_op = optimizer.apply_gradients(grad_vars, global_step=global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = tf.group(minimize_op, update_ops) else: train_op = None accuracy = tf.metrics.accuracy(labels, predictions['classes']) accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op')) metrics = {'accuracy': accuracy, 'accuracy_top_5': accuracy_top_5} # Create a tensor named train_accuracy for logging purposes tf.identity(accuracy[1], name='train_accuracy') tf.identity(accuracy_top_5[1], name='train_accuracy_top_5') tf.summary.scalar('train_accuracy', accuracy[1]) tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics)
def resnet_model_fn(features, labels, mode, model_class, resnet_size, weight_decay, learning_rate_fn, momentum, data_format, resnet_version, loss_scale, loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE, fine_tune=False): """Shared functionality for different resnet model_fns. Initializes the ResnetModel representing the model layers and uses that model to build the necessary EstimatorSpecs for the `mode` in question. For training, this means building losses, the optimizer, and the train op that get passed into the EstimatorSpec. For evaluation and prediction, the EstimatorSpec is returned without a train op, but with the necessary parameters for the given mode. Args: features: tensor representing input images labels: tensor representing class labels for all input images mode: current estimator mode; should be one of `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT` model_class: a class representing a TensorFlow model that has a __call__ function. We assume here that this is a subclass of ResnetModel. resnet_size: A single integer for the size of the ResNet model. weight_decay: weight decay loss rate used to regularize learned variables. learning_rate_fn: function that returns the current learning rate given the current global_step momentum: momentum term used for optimization data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. resnet_version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] loss_scale: The factor to scale the loss for numerical stability. A detailed summary is present in the arg parser help text. loss_filter_fn: function that takes a string variable name and returns True if the var should be included in loss calculation, and False otherwise. If None, batch_normalization variables will be excluded from the loss. dtype: the TensorFlow dtype to use for calculations. fine_tune: If True only train the dense layers(final layers). Returns: EstimatorSpec parameterized according to the input params and the current mode. """ # Generate a summary node for the images tf.summary.image('images', features, max_outputs=6) # Checks that features/images have same data type being used for calculations. assert features.dtype == dtype model = model_class(resnet_size, data_format, resnet_version=resnet_version, dtype=dtype) logits = model(features, mode == tf.estimator.ModeKeys.TRAIN) # This acts as a no-op if the logits are already in fp32 (provided logits are # not a SparseTensor). If dtype is is low precision, logits must be cast to # fp32 for numerical stability. logits = tf.cast(logits, tf.float32) predictions = { 'classes': tf.argmax(logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor') } if mode == tf.estimator.ModeKeys.PREDICT: # Return the predictions and the specification for serving a SavedModel return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, export_outputs={ 'predict': tf.estimator.export.PredictOutput(predictions) }) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=labels) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy') tf.summary.scalar('cross_entropy', cross_entropy) # If no loss_filter_fn is passed, assume we want the default behavior, # which is that batch_normalization variables are excluded from loss. def exclude_batch_norm(name): return 'batch_normalization' not in name loss_filter_fn = loss_filter_fn or exclude_batch_norm # Add weight decay to the loss. l2_loss = weight_decay * tf.add_n( # loss is computed using fp32 for numerical stability. [tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables() if loss_filter_fn(v.name)]) tf.summary.scalar('l2_loss', l2_loss) loss = cross_entropy + l2_loss if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() learning_rate = learning_rate_fn(global_step) # Create a tensor named learning_rate for logging purposes tf.identity(learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', learning_rate) optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=momentum ) # KungFu: wrap optimizer if flags.FLAGS.kungfu_opt == 'ssgd': from kungfu.tensorflow.optimizers import SynchronousSGDOptimizer optimizer = SynchronousSGDOptimizer(optimizer) elif flags.FLAGS.kungfu_opt == 'gns': import kungfu.tensorflow as kf from kungfu.tensorflow.optimizers import \ MonitorGradientNoiseScaleOptimizer # from kungfu_experiment.gns import MonitorGradientNoiseScaleOptimizer init_bs = 32 device_batch_size = kf.get_or_create_batch_size(init_bs) optimizer = MonitorGradientNoiseScaleOptimizer(optimizer, device_batch_size, verbose=False) else: raise RuntimeError('invalid kungfu optimizer %s' % (flags.FLAGS.kungfu_opt)) def _dense_grad_filter(gvs): """Only apply gradient updates to the final layer. This function is used for fine tuning. Args: gvs: list of tuples with gradients and variable info Returns: filtered gradients so that only the dense layer remains """ return [(g, v) for g, v in gvs if 'dense' in v.name] if loss_scale != 1: # When computing fp16 gradients, often intermediate tensor values are # so small, they underflow to 0. To avoid this, we multiply the loss by # loss_scale to make these tensor values loss_scale times bigger. scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale) if fine_tune: scaled_grad_vars = _dense_grad_filter(scaled_grad_vars) # Once the gradient computation is complete we can scale the gradients # back to the correct scale before passing them to the optimizer. unscaled_grad_vars = [(grad / loss_scale, var) for grad, var in scaled_grad_vars] minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step=global_step) else: grad_vars = optimizer.compute_gradients(loss) if fine_tune: grad_vars = _dense_grad_filter(grad_vars) minimize_op = optimizer.apply_gradients(grad_vars, global_step=global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = tf.group(minimize_op, update_ops) else: train_op = None accuracy = tf.metrics.accuracy(labels, predictions['classes']) accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op')) metrics = {'accuracy': accuracy, 'accuracy_top_5': accuracy_top_5} # Create a tensor named train_accuracy for logging purposes tf.identity(accuracy[1], name='train_accuracy') tf.identity(accuracy_top_5[1], name='train_accuracy_top_5') tf.summary.scalar('train_accuracy', accuracy[1]) tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics)
Python
def resnet_main( flags_obj, model_function, input_function, dataset_name, shape=None): """Shared main loop for ResNet Models. Args: flags_obj: An object containing parsed flags. See define_resnet_flags() for details. model_function: the function that instantiates the Model and builds the ops for train/eval. This will be passed directly into the estimator. input_function: the function that processes the dataset and returns a dataset that the estimator can train on. This will be wrapped with all the relevant flags for running and passed to estimator. dataset_name: the name of the dataset for training and evaluation. This is used for logging purpose. shape: list of ints representing the shape of the images used for training. This is only used if flags_obj.export_dir is passed. """ model_helpers.apply_clean(flags.FLAGS) # Ensures flag override logic is only executed if explicitly triggered. if flags_obj.tf_gpu_thread_mode: override_flags_and_set_envars_for_gpu_thread_pool(flags_obj) # Creates session config. allow_soft_placement = True, is required for # multi-GPU and is not harmful for other modes. session_config = tf.ConfigProto( inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads, intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads, allow_soft_placement=True) distribution_strategy = distribution_utils.get_distribution_strategy( flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg) # Creates a `RunConfig` that checkpoints every 24 hours which essentially # results in checkpoints determined only by `epochs_between_evals`. run_config = tf.estimator.RunConfig( train_distribute=distribution_strategy, session_config=session_config, save_checkpoints_secs=60*60*24) # Initializes model with all but the dense layer from pretrained ResNet. if flags_obj.pretrained_model_checkpoint_path is not None: warm_start_settings = tf.estimator.WarmStartSettings( flags_obj.pretrained_model_checkpoint_path, vars_to_warm_start='^(?!.*dense)') else: warm_start_settings = None classifier = tf.estimator.Estimator( model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config, warm_start_from=warm_start_settings, params={ 'resnet_size': int(flags_obj.resnet_size), 'data_format': flags_obj.data_format, 'batch_size': flags_obj.batch_size, 'resnet_version': int(flags_obj.resnet_version), 'loss_scale': flags_core.get_loss_scale(flags_obj), 'dtype': flags_core.get_tf_dtype(flags_obj), 'fine_tune': flags_obj.fine_tune }) run_params = { 'batch_size': flags_obj.batch_size, 'dtype': flags_core.get_tf_dtype(flags_obj), 'resnet_size': flags_obj.resnet_size, 'resnet_version': flags_obj.resnet_version, 'synthetic_data': flags_obj.use_synthetic_data, 'train_epochs': flags_obj.train_epochs, } if flags_obj.use_synthetic_data: dataset_name = dataset_name + '-synthetic' benchmark_logger = logger.get_benchmark_logger() benchmark_logger.log_run_info('resnet', dataset_name, run_params, test_id=flags_obj.benchmark_test_id) train_hooks = hooks_helper.get_train_hooks( flags_obj.hooks, model_dir=flags_obj.model_dir, batch_size=flags_obj.batch_size) # KungFu from kungfu.tensorflow.initializer import BroadcastGlobalVariablesHook train_hooks.append(BroadcastGlobalVariablesHook()) def input_fn_train(num_epochs, device_batch_size): return input_function( is_training=True, data_dir=flags_obj.data_dir, batch_size=device_batch_size, num_epochs=num_epochs, dtype=flags_core.get_tf_dtype(flags_obj), datasets_num_private_threads=flags_obj.datasets_num_private_threads, num_parallel_batches=flags_obj.datasets_num_parallel_batches) def input_fn_eval(): return input_function( is_training=False, data_dir=flags_obj.data_dir, batch_size=distribution_utils.per_device_batch_size( flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)), num_epochs=1, dtype=flags_core.get_tf_dtype(flags_obj)) if flags_obj.eval_only or not flags_obj.train_epochs: # If --eval_only is set, perform a single loop with zero train epochs. schedule, n_loops = [0], 1 else: # Compute the number of times to loop while training. All but the last # pass will train for `epochs_between_evals` epochs, while the last will # train for the number needed to reach `training_epochs`. For instance if # train_epochs = 25 and epochs_between_evals = 10 # schedule will be set to [10, 10, 5]. That is to say, the loop will: # Train for 10 epochs and then evaluate. # Train for another 10 epochs and then evaluate. # Train for a final 5 epochs (to reach 25 epochs) and then evaluate. n_loops = math.ceil(flags_obj.train_epochs / flags_obj.epochs_between_evals) schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))] schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1]) # over counting. epoch = 0 # boundary_epochs = [] boundary_epochs = [91, 136, 182] # device_batch_size = distribution_utils.per_device_batch_size(flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)) # get the gns policy from kungfu.tensorflow.policy import PolicyHook for h in train_hooks: if isinstance(h, PolicyHook): kungfu_policy = h.policies[0] assert kungfu_policy is not None device_batch_size = kungfu_policy.get_batch_size() eval_on_start = True if eval_on_start: eval_results = classifier.evaluate(input_fn=input_fn_eval, steps=flags_obj.max_train_steps) benchmark_logger.log_evaluation_result(eval_results) print('#%d: %s' % (0, eval_results)) trained_epoch = 0 for cycle_index, num_train_epochs in enumerate(schedule): tf.logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops)) if num_train_epochs: device_batch_size = kungfu_policy.get_batch_size() # for e in boundary_epochs: # if trained_epoch <= e and e < trained_epoch + num_train_epochs: # print('change batch size at cycle %d' % (cycle_index)) # device_batch_size *= 2 # break print('begin cycle %d, training %d epochs with bs=%d' % (cycle_index, num_train_epochs, device_batch_size)) cycle_begin = time.time() classifier.train(input_fn=lambda: input_fn_train(num_train_epochs, device_batch_size), hooks=train_hooks, max_steps=flags_obj.max_train_steps) trained_epoch += num_train_epochs # for i in range(num_train_epochs): # epoch += 1 # if epoch in boundary_epochs: # device_batch_size *= 2 # print('epoch %d, device_batch_size=%d' % (epoch, device_batch_size)) # t0 = time.time() # print('BEGIN iter %d of cycle %d' % (i, cycle_index)) # classifier.train(input_fn=lambda: input_fn_train(1, device_batch_size), # hooks=train_hooks, max_steps=flags_obj.max_train_steps) # took = time.time() - t0 # print('END iter %d of cycle %d, took %.2fs' % (i, cycle_index, took)) cycle_took = time.time() - cycle_begin print('end cycle %d, trained %d epochs took %.2fs' % (cycle_index, num_train_epochs, cycle_took)) tf.logging.info('Starting to evaluate.') # flags_obj.max_train_steps is generally associated with testing and # profiling. As a result it is frequently called with synthetic data, which # will iterate forever. Passing steps=flags_obj.max_train_steps allows the # eval (which is generally unimportant in those circumstances) to terminate. # Note that eval will run for max_train_steps each loop, regardless of the # global_step count. eval_begin = time.time() eval_results = classifier.evaluate(input_fn=input_fn_eval, steps=flags_obj.max_train_steps) benchmark_logger.log_evaluation_result(eval_results) eval_dur = time.time() - eval_begin print('evaluate cycle %d took %.2fs' % (cycle_index, eval_dur)) print('#%d: %s' % (cycle_index + 1, eval_results)) if model_helpers.past_stop_threshold( flags_obj.stop_threshold, eval_results['accuracy']): break if flags_obj.export_dir is not None: # Exports a saved model for the given classifier. export_dtype = flags_core.get_tf_dtype(flags_obj) if flags_obj.image_bytes_as_serving_input: input_receiver_fn = functools.partial( image_bytes_serving_input_fn, shape, dtype=export_dtype) else: input_receiver_fn = export.build_tensor_serving_input_receiver_fn( shape, batch_size=flags_obj.batch_size, dtype=export_dtype) classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn, strip_default_attrs=True)
def resnet_main( flags_obj, model_function, input_function, dataset_name, shape=None): """Shared main loop for ResNet Models. Args: flags_obj: An object containing parsed flags. See define_resnet_flags() for details. model_function: the function that instantiates the Model and builds the ops for train/eval. This will be passed directly into the estimator. input_function: the function that processes the dataset and returns a dataset that the estimator can train on. This will be wrapped with all the relevant flags for running and passed to estimator. dataset_name: the name of the dataset for training and evaluation. This is used for logging purpose. shape: list of ints representing the shape of the images used for training. This is only used if flags_obj.export_dir is passed. """ model_helpers.apply_clean(flags.FLAGS) # Ensures flag override logic is only executed if explicitly triggered. if flags_obj.tf_gpu_thread_mode: override_flags_and_set_envars_for_gpu_thread_pool(flags_obj) # Creates session config. allow_soft_placement = True, is required for # multi-GPU and is not harmful for other modes. session_config = tf.ConfigProto( inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads, intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads, allow_soft_placement=True) distribution_strategy = distribution_utils.get_distribution_strategy( flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg) # Creates a `RunConfig` that checkpoints every 24 hours which essentially # results in checkpoints determined only by `epochs_between_evals`. run_config = tf.estimator.RunConfig( train_distribute=distribution_strategy, session_config=session_config, save_checkpoints_secs=60*60*24) # Initializes model with all but the dense layer from pretrained ResNet. if flags_obj.pretrained_model_checkpoint_path is not None: warm_start_settings = tf.estimator.WarmStartSettings( flags_obj.pretrained_model_checkpoint_path, vars_to_warm_start='^(?!.*dense)') else: warm_start_settings = None classifier = tf.estimator.Estimator( model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config, warm_start_from=warm_start_settings, params={ 'resnet_size': int(flags_obj.resnet_size), 'data_format': flags_obj.data_format, 'batch_size': flags_obj.batch_size, 'resnet_version': int(flags_obj.resnet_version), 'loss_scale': flags_core.get_loss_scale(flags_obj), 'dtype': flags_core.get_tf_dtype(flags_obj), 'fine_tune': flags_obj.fine_tune }) run_params = { 'batch_size': flags_obj.batch_size, 'dtype': flags_core.get_tf_dtype(flags_obj), 'resnet_size': flags_obj.resnet_size, 'resnet_version': flags_obj.resnet_version, 'synthetic_data': flags_obj.use_synthetic_data, 'train_epochs': flags_obj.train_epochs, } if flags_obj.use_synthetic_data: dataset_name = dataset_name + '-synthetic' benchmark_logger = logger.get_benchmark_logger() benchmark_logger.log_run_info('resnet', dataset_name, run_params, test_id=flags_obj.benchmark_test_id) train_hooks = hooks_helper.get_train_hooks( flags_obj.hooks, model_dir=flags_obj.model_dir, batch_size=flags_obj.batch_size) # KungFu from kungfu.tensorflow.initializer import BroadcastGlobalVariablesHook train_hooks.append(BroadcastGlobalVariablesHook()) def input_fn_train(num_epochs, device_batch_size): return input_function( is_training=True, data_dir=flags_obj.data_dir, batch_size=device_batch_size, num_epochs=num_epochs, dtype=flags_core.get_tf_dtype(flags_obj), datasets_num_private_threads=flags_obj.datasets_num_private_threads, num_parallel_batches=flags_obj.datasets_num_parallel_batches) def input_fn_eval(): return input_function( is_training=False, data_dir=flags_obj.data_dir, batch_size=distribution_utils.per_device_batch_size( flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)), num_epochs=1, dtype=flags_core.get_tf_dtype(flags_obj)) if flags_obj.eval_only or not flags_obj.train_epochs: # If --eval_only is set, perform a single loop with zero train epochs. schedule, n_loops = [0], 1 else: # Compute the number of times to loop while training. All but the last # pass will train for `epochs_between_evals` epochs, while the last will # train for the number needed to reach `training_epochs`. For instance if # train_epochs = 25 and epochs_between_evals = 10 # schedule will be set to [10, 10, 5]. That is to say, the loop will: # Train for 10 epochs and then evaluate. # Train for another 10 epochs and then evaluate. # Train for a final 5 epochs (to reach 25 epochs) and then evaluate. n_loops = math.ceil(flags_obj.train_epochs / flags_obj.epochs_between_evals) schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))] schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1]) # over counting. epoch = 0 # boundary_epochs = [] boundary_epochs = [91, 136, 182] # device_batch_size = distribution_utils.per_device_batch_size(flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)) # get the gns policy from kungfu.tensorflow.policy import PolicyHook for h in train_hooks: if isinstance(h, PolicyHook): kungfu_policy = h.policies[0] assert kungfu_policy is not None device_batch_size = kungfu_policy.get_batch_size() eval_on_start = True if eval_on_start: eval_results = classifier.evaluate(input_fn=input_fn_eval, steps=flags_obj.max_train_steps) benchmark_logger.log_evaluation_result(eval_results) print('#%d: %s' % (0, eval_results)) trained_epoch = 0 for cycle_index, num_train_epochs in enumerate(schedule): tf.logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops)) if num_train_epochs: device_batch_size = kungfu_policy.get_batch_size() # for e in boundary_epochs: # if trained_epoch <= e and e < trained_epoch + num_train_epochs: # print('change batch size at cycle %d' % (cycle_index)) # device_batch_size *= 2 # break print('begin cycle %d, training %d epochs with bs=%d' % (cycle_index, num_train_epochs, device_batch_size)) cycle_begin = time.time() classifier.train(input_fn=lambda: input_fn_train(num_train_epochs, device_batch_size), hooks=train_hooks, max_steps=flags_obj.max_train_steps) trained_epoch += num_train_epochs # for i in range(num_train_epochs): # epoch += 1 # if epoch in boundary_epochs: # device_batch_size *= 2 # print('epoch %d, device_batch_size=%d' % (epoch, device_batch_size)) # t0 = time.time() # print('BEGIN iter %d of cycle %d' % (i, cycle_index)) # classifier.train(input_fn=lambda: input_fn_train(1, device_batch_size), # hooks=train_hooks, max_steps=flags_obj.max_train_steps) # took = time.time() - t0 # print('END iter %d of cycle %d, took %.2fs' % (i, cycle_index, took)) cycle_took = time.time() - cycle_begin print('end cycle %d, trained %d epochs took %.2fs' % (cycle_index, num_train_epochs, cycle_took)) tf.logging.info('Starting to evaluate.') # flags_obj.max_train_steps is generally associated with testing and # profiling. As a result it is frequently called with synthetic data, which # will iterate forever. Passing steps=flags_obj.max_train_steps allows the # eval (which is generally unimportant in those circumstances) to terminate. # Note that eval will run for max_train_steps each loop, regardless of the # global_step count. eval_begin = time.time() eval_results = classifier.evaluate(input_fn=input_fn_eval, steps=flags_obj.max_train_steps) benchmark_logger.log_evaluation_result(eval_results) eval_dur = time.time() - eval_begin print('evaluate cycle %d took %.2fs' % (cycle_index, eval_dur)) print('#%d: %s' % (cycle_index + 1, eval_results)) if model_helpers.past_stop_threshold( flags_obj.stop_threshold, eval_results['accuracy']): break if flags_obj.export_dir is not None: # Exports a saved model for the given classifier. export_dtype = flags_core.get_tf_dtype(flags_obj) if flags_obj.image_bytes_as_serving_input: input_receiver_fn = functools.partial( image_bytes_serving_input_fn, shape, dtype=export_dtype) else: input_receiver_fn = export.build_tensor_serving_input_receiver_fn( shape, batch_size=flags_obj.batch_size, dtype=export_dtype) classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn, strip_default_attrs=True)
Python
def define_resnet_flags(resnet_size_choices=None): """Add flags and validators for ResNet.""" flags_core.define_base() flags_core.define_performance(num_parallel_calls=False, tf_gpu_thread_mode=True, datasets_num_private_threads=True, datasets_num_parallel_batches=True) flags_core.define_image() flags_core.define_benchmark() flags.adopt_module_key_flags(flags_core) flags.DEFINE_enum( name='resnet_version', short_name='rv', default='1', enum_values=['1', '2'], help=flags_core.help_wrap( 'Version of ResNet. (1 or 2) See README.md for details.')) flags.DEFINE_bool( name='fine_tune', short_name='ft', default=False, help=flags_core.help_wrap( 'If True do not train any parameters except for the final layer.')) flags.DEFINE_string( name='pretrained_model_checkpoint_path', short_name='pmcp', default=None, help=flags_core.help_wrap( 'If not None initialize all the network except the final layer with ' 'these values')) flags.DEFINE_boolean( name='eval_only', default=False, help=flags_core.help_wrap('Skip training and only perform evaluation on ' 'the latest checkpoint.')) flags.DEFINE_boolean( name='image_bytes_as_serving_input', default=False, help=flags_core.help_wrap( 'If True exports savedmodel with serving signature that accepts ' 'JPEG image bytes instead of a fixed size [HxWxC] tensor that ' 'represents the image. The former is easier to use for serving at ' 'the expense of image resize/cropping being done as part of model ' 'inference. Note, this flag only applies to ImageNet and cannot ' 'be used for CIFAR.')) choice_kwargs = dict( name='resnet_size', short_name='rs', default='50', help=flags_core.help_wrap('The size of the ResNet model to use.')) if resnet_size_choices is None: flags.DEFINE_string(**choice_kwargs) else: flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)
def define_resnet_flags(resnet_size_choices=None): """Add flags and validators for ResNet.""" flags_core.define_base() flags_core.define_performance(num_parallel_calls=False, tf_gpu_thread_mode=True, datasets_num_private_threads=True, datasets_num_parallel_batches=True) flags_core.define_image() flags_core.define_benchmark() flags.adopt_module_key_flags(flags_core) flags.DEFINE_enum( name='resnet_version', short_name='rv', default='1', enum_values=['1', '2'], help=flags_core.help_wrap( 'Version of ResNet. (1 or 2) See README.md for details.')) flags.DEFINE_bool( name='fine_tune', short_name='ft', default=False, help=flags_core.help_wrap( 'If True do not train any parameters except for the final layer.')) flags.DEFINE_string( name='pretrained_model_checkpoint_path', short_name='pmcp', default=None, help=flags_core.help_wrap( 'If not None initialize all the network except the final layer with ' 'these values')) flags.DEFINE_boolean( name='eval_only', default=False, help=flags_core.help_wrap('Skip training and only perform evaluation on ' 'the latest checkpoint.')) flags.DEFINE_boolean( name='image_bytes_as_serving_input', default=False, help=flags_core.help_wrap( 'If True exports savedmodel with serving signature that accepts ' 'JPEG image bytes instead of a fixed size [HxWxC] tensor that ' 'represents the image. The former is easier to use for serving at ' 'the expense of image resize/cropping being done as part of model ' 'inference. Note, this flag only applies to ImageNet and cannot ' 'be used for CIFAR.')) choice_kwargs = dict( name='resnet_size', short_name='rs', default='50', help=flags_core.help_wrap('The size of the ResNet model to use.')) if resnet_size_choices is None: flags.DEFINE_string(**choice_kwargs) else: flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)
Python
async def change_status(): """ Task for changing the activity status. loops through the cycle of the STATUS list and sets that as bot presence """ await bot.change_presence(activity=discord.Game(next(STATUS))) # NOTE- There are other methods, that can be utilised instead of just 'playing'
async def change_status(): """ Task for changing the activity status. loops through the cycle of the STATUS list and sets that as bot presence """ await bot.change_presence(activity=discord.Game(next(STATUS))) # NOTE- There are other methods, that can be utilised instead of just 'playing'
Python
def metrics(): ''' Read the data from Redis and print each value ''' metrics = [] for key in _redis.scan_iter(): metrics.append(str(_redis.get(key))) return "\n".join(metrics)
def metrics(): ''' Read the data from Redis and print each value ''' metrics = [] for key in _redis.scan_iter(): metrics.append(str(_redis.get(key))) return "\n".join(metrics)
Python
def from_id(id): """ Generates an iso-8859-1 encoded byte string that contains an XML file for OpenMedia/WINA :param id: The DWD ID for the event to be processed :return: A byte string XML for use with OpenMedia """ event = db.by_id(id) sent = generate.local_time(event['sent']).strftime('%Y%m%dT%H%M%S,000') title = generate.title(event, variant='wina_headline') text = generate.description(event) keywords = generate.keywords(event) breaking = False if not db.breaking_memo(): breaking = True elif event['severity'] == 'Extreme': breaking = True return wina_xml(sent, title, text, keywords, breaking)
def from_id(id): """ Generates an iso-8859-1 encoded byte string that contains an XML file for OpenMedia/WINA :param id: The DWD ID for the event to be processed :return: A byte string XML for use with OpenMedia """ event = db.by_id(id) sent = generate.local_time(event['sent']).strftime('%Y%m%dT%H%M%S,000') title = generate.title(event, variant='wina_headline') text = generate.description(event) keywords = generate.keywords(event) breaking = False if not db.breaking_memo(): breaking = True elif event['severity'] == 'Extreme': breaking = True return wina_xml(sent, title, text, keywords, breaking)
Python
def upload(files): """ Uploads a WINA-XML file to a provided server via explicit FTP with TLS Protocol. :param: files: List of DWD BytesIO(from_id(id)) :return: Status code """ logins = [ ('NVS_FTP_URL', 'NVS_FTP_USER', 'NVS_FTP_PASS'), ('NVS_FTP_URL_SECONDARY', 'NVS_FTP_USER_SECONDARY', 'NVS_FTP_PASS_SECONDARY'), ] for url, user, passw in logins: try: ftps = FTP_TLS( host=os.environ[url], user=os.environ[user], passwd=os.environ[passw], context=ssl.create_default_context(), ) except KeyError: print(f'Environment variable {url}, {user}, {passw} for ftp connection not found') continue ftps.prot_p() for file in files: file.seek(0) print(ftps.storbinary(f'STOR uwa_{uuid4()}.xml', file)) print(ftps.quit())
def upload(files): """ Uploads a WINA-XML file to a provided server via explicit FTP with TLS Protocol. :param: files: List of DWD BytesIO(from_id(id)) :return: Status code """ logins = [ ('NVS_FTP_URL', 'NVS_FTP_USER', 'NVS_FTP_PASS'), ('NVS_FTP_URL_SECONDARY', 'NVS_FTP_USER_SECONDARY', 'NVS_FTP_PASS_SECONDARY'), ] for url, user, passw in logins: try: ftps = FTP_TLS( host=os.environ[url], user=os.environ[user], passwd=os.environ[passw], context=ssl.create_default_context(), ) except KeyError: print(f'Environment variable {url}, {user}, {passw} for ftp connection not found') continue ftps.prot_p() for file in files: file.seek(0) print(ftps.storbinary(f'STOR uwa_{uuid4()}.xml', file)) print(ftps.quit())
Python
def filter_symbol_table(disasm, symbols_table): """ Given the path to a symbols table, trim the table to only contain embedded data objects, save the trimeed table to a file, and also return the trimmed table as a list of strings. """ sro_start, sro_end = get_sro_range(disasm) # trim garbage lines at the beginning and end trimmed_lines = symbols_table[4:-4] filtered = [] for line in trimmed_lines: # if it is an object, and it's in the code, # and the address is within the range of srodata address = int(line[:8], 16) if address >= sro_start and address < sro_end: filtered.append(line) return filtered
def filter_symbol_table(disasm, symbols_table): """ Given the path to a symbols table, trim the table to only contain embedded data objects, save the trimeed table to a file, and also return the trimmed table as a list of strings. """ sro_start, sro_end = get_sro_range(disasm) # trim garbage lines at the beginning and end trimmed_lines = symbols_table[4:-4] filtered = [] for line in trimmed_lines: # if it is an object, and it's in the code, # and the address is within the range of srodata address = int(line[:8], 16) if address >= sro_start and address < sro_end: filtered.append(line) return filtered
Python
def num_slice_types(slices): """ Gather the number of slice in getitem slices. """ num_slice = 0 for s in slices: if isinstance(s, slice) or isinstance(s, int): num_slice += 1 return num_slice
def num_slice_types(slices): """ Gather the number of slice in getitem slices. """ num_slice = 0 for s in slices: if isinstance(s, slice) or isinstance(s, int): num_slice += 1 return num_slice
Python
def slice_to_trt_params(py_slice, dim_size): """ Convert python slice to TensorRT slice layer parameters. """ start = get_positive_dim(py_slice.start, dim_size) if py_slice.start else 0 stride = py_slice.step if py_slice.step else 1 stop = get_positive_dim(py_slice.stop, dim_size) if py_slice.stop else dim_size size = math.ceil((stop - start) * 1.0 / stride) return start, size, stride
def slice_to_trt_params(py_slice, dim_size): """ Convert python slice to TensorRT slice layer parameters. """ start = get_positive_dim(py_slice.start, dim_size) if py_slice.start else 0 stride = py_slice.step if py_slice.step else 1 stop = get_positive_dim(py_slice.stop, dim_size) if py_slice.stop else dim_size size = math.ceil((stop - start) * 1.0 / stride) return start, size, stride
Python
def write_outputs(self, variable_name: str, filename: str) -> None: """Write a file containing the list of all outputs which are generated by this script.""" content = 'set({}\n {})'.format( variable_name, '\n '.join('"' + name + '"' for name in sorted(self.filenames))) self._write_if_changed(filename, content)
def write_outputs(self, variable_name: str, filename: str) -> None: """Write a file containing the list of all outputs which are generated by this script.""" content = 'set({}\n {})'.format( variable_name, '\n '.join('"' + name + '"' for name in sorted(self.filenames))) self._write_if_changed(filename, content)
Python
def create_python_bindings( fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], module: Optional[str], filename: str, *, method: bool, ) -> None: """Generates Python bindings to ATen functions""" py_methods: List[str] = [] py_method_defs: List[str] = [] py_forwards: List[str] = [] grouped = group_filter_overloads(pairs, pred) for name in sorted(grouped.keys(), key=lambda x: str(x)): overloads = grouped[name] py_methods.append(method_impl(name, module, overloads, method=method)) py_method_defs.append(method_def(name, module, overloads, method=method)) py_forwards.extend(forward_decls(name, overloads, method=method)) fm.write_with_template(filename, filename, lambda: { 'generated_comment': '@' + f'generated from {fm.template_dir}/{filename}', 'py_forwards': py_forwards, 'py_methods': py_methods, 'py_method_defs': py_method_defs, })
def create_python_bindings( fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], module: Optional[str], filename: str, *, method: bool, ) -> None: """Generates Python bindings to ATen functions""" py_methods: List[str] = [] py_method_defs: List[str] = [] py_forwards: List[str] = [] grouped = group_filter_overloads(pairs, pred) for name in sorted(grouped.keys(), key=lambda x: str(x)): overloads = grouped[name] py_methods.append(method_impl(name, module, overloads, method=method)) py_method_defs.append(method_def(name, module, overloads, method=method)) py_forwards.extend(forward_decls(name, overloads, method=method)) fm.write_with_template(filename, filename, lambda: { 'generated_comment': '@' + f'generated from {fm.template_dir}/{filename}', 'py_forwards': py_forwards, 'py_methods': py_methods, 'py_method_defs': py_method_defs, })
Python
def create_python_return_type_bindings( fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], filename: str, ) -> None: """ Generate function to initialize and return named tuple for native functions which returns named tuple and relevant entry for the map in `python_return_types.cpp`. """ py_return_types_definition: List[str] = [] py_return_types_map: List[str] = [] grouped = group_filter_overloads(pairs, pred) for name in sorted(grouped.keys(), key=lambda x: str(x)): overloads = grouped[name] definitions, map_entries = generate_return_type_definition_and_map_entry(overloads) py_return_types_definition.append("" if not definitions else "\n".join(definitions)) py_return_types_map.append("" if not map_entries else "\n".join(map_entries)) fm.write_with_template(filename, filename, lambda: { 'generated_comment': '@' + f'generated from {fm.template_dir}/{filename}', 'py_return_types': py_return_types_definition, 'py_return_types_map' : py_return_types_map, })
def create_python_return_type_bindings( fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], filename: str, ) -> None: """ Generate function to initialize and return named tuple for native functions which returns named tuple and relevant entry for the map in `python_return_types.cpp`. """ py_return_types_definition: List[str] = [] py_return_types_map: List[str] = [] grouped = group_filter_overloads(pairs, pred) for name in sorted(grouped.keys(), key=lambda x: str(x)): overloads = grouped[name] definitions, map_entries = generate_return_type_definition_and_map_entry(overloads) py_return_types_definition.append("" if not definitions else "\n".join(definitions)) py_return_types_map.append("" if not map_entries else "\n".join(map_entries)) fm.write_with_template(filename, filename, lambda: { 'generated_comment': '@' + f'generated from {fm.template_dir}/{filename}', 'py_return_types': py_return_types_definition, 'py_return_types_map' : py_return_types_map, })
Python
def create_python_bindings_sharded( fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], module: Optional[str], filename: str, *, method: bool, num_shards: int ) -> None: """Generates Python bindings to ATen functions""" grouped = group_filter_overloads(pairs, pred) def key_func(kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]) -> str: return str(kv[0]) def env_func( kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]] ) -> Dict[str, List[str]]: return { 'py_forwards': list(forward_decls(kv[0], kv[1], method=method)), 'py_methods': [method_impl(kv[0], module, kv[1], method=method)], 'py_method_defs': [method_def(kv[0], module, kv[1], method=method)], } fm.write_sharded( filename, grouped.items(), base_env={ 'generated_comment': '@' + f'generated from {fm.template_dir}/{filename}', }, key_fn=key_func, env_callable=env_func, num_shards=num_shards, sharded_keys={'py_forwards', 'py_methods', 'py_method_defs'} )
def create_python_bindings_sharded( fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], module: Optional[str], filename: str, *, method: bool, num_shards: int ) -> None: """Generates Python bindings to ATen functions""" grouped = group_filter_overloads(pairs, pred) def key_func(kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]) -> str: return str(kv[0]) def env_func( kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]] ) -> Dict[str, List[str]]: return { 'py_forwards': list(forward_decls(kv[0], kv[1], method=method)), 'py_methods': [method_impl(kv[0], module, kv[1], method=method)], 'py_method_defs': [method_def(kv[0], module, kv[1], method=method)], } fm.write_sharded( filename, grouped.items(), base_env={ 'generated_comment': '@' + f'generated from {fm.template_dir}/{filename}', }, key_fn=key_func, env_callable=env_func, num_shards=num_shards, sharded_keys={'py_forwards', 'py_methods', 'py_method_defs'} )
Python
def emit_namedtuple_call( overloads: Sequence[PythonSignatureNativeFunctionPair] ) -> Tuple[List[str], Dict[str, str]]: """ Generate block of named tuple type def inits, and add typeref snippets to declarations that use them """ typenames: Dict[str, str] = {} # map from unique name + field name lists to typedef name typedefs: List[str] = [] # typedef declarations and init code for overload in overloads: fieldnames = namedtuple_fieldnames(overload.function.func.returns) if not fieldnames: continue name = cpp.name(overload.function.func) # use @with_native_function? tn_key = gen_namedtuple_typename_key(overload.function) typename = typenames.get(tn_key) if typename is None: typename = f'NamedTuple{"" if not typedefs else len(typedefs)}' typenames[tn_key] = typename typedefs.append(f"""\ static PyTypeObject* {typename} = get_namedtuple("{name}");""") return typedefs, typenames
def emit_namedtuple_call( overloads: Sequence[PythonSignatureNativeFunctionPair] ) -> Tuple[List[str], Dict[str, str]]: """ Generate block of named tuple type def inits, and add typeref snippets to declarations that use them """ typenames: Dict[str, str] = {} # map from unique name + field name lists to typedef name typedefs: List[str] = [] # typedef declarations and init code for overload in overloads: fieldnames = namedtuple_fieldnames(overload.function.func.returns) if not fieldnames: continue name = cpp.name(overload.function.func) # use @with_native_function? tn_key = gen_namedtuple_typename_key(overload.function) typename = typenames.get(tn_key) if typename is None: typename = f'NamedTuple{"" if not typedefs else len(typedefs)}' typenames[tn_key] = typename typedefs.append(f"""\ static PyTypeObject* {typename} = get_namedtuple("{name}");""") return typedefs, typenames
Python
def generate_return_type_definition_and_map_entry( overloads: Sequence[PythonSignatureNativeFunctionPair], ) -> Tuple[List[str], List[str]]: """ Generate block of function in `python_return_types.cpp` to initialize and return named tuple for a native function which returns named tuple and relevant entry for the map in same file. """ typenames: Dict[str, str] = {} # map from unique name + field name lists to typedef name definitions: List[str] = [] # function defintion to register the typedef map_entries: List[str] = [] # C++ map entry of <function_name, function creates it namedtuple> for overload in overloads: fieldnames = namedtuple_fieldnames(overload.function.func.returns) if not fieldnames: continue fields = ', '.join(f'{{"{fn}", ""}}' for fn in fieldnames) name = cpp.name(overload.function.func) # use @with_native_function? tn_key = gen_namedtuple_typename_key(overload.function) typename = typenames.get(tn_key) if typename is None: typename = f'{name}NamedTuple{"" if not definitions else len(definitions)}' typenames[tn_key] = typename definitions.append(f"""\ PyTypeObject* get_{name}_namedtuple() {{ static PyStructSequence_Field NamedTuple_fields[] = {{ {fields}, {{nullptr}} }}; static PyTypeObject {typename}; static bool is_initialized = false; static PyStructSequence_Desc desc = {{ "torch.return_types.{name}", nullptr, NamedTuple_fields, {len(fieldnames)} }}; if (!is_initialized) {{ PyStructSequence_InitType(&{typename}, &desc); {typename}.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; }} return &{typename}; }} """) map_entries.append(f'{{"{name}", get_{name}_namedtuple()}}, ') return definitions, map_entries
def generate_return_type_definition_and_map_entry( overloads: Sequence[PythonSignatureNativeFunctionPair], ) -> Tuple[List[str], List[str]]: """ Generate block of function in `python_return_types.cpp` to initialize and return named tuple for a native function which returns named tuple and relevant entry for the map in same file. """ typenames: Dict[str, str] = {} # map from unique name + field name lists to typedef name definitions: List[str] = [] # function defintion to register the typedef map_entries: List[str] = [] # C++ map entry of <function_name, function creates it namedtuple> for overload in overloads: fieldnames = namedtuple_fieldnames(overload.function.func.returns) if not fieldnames: continue fields = ', '.join(f'{{"{fn}", ""}}' for fn in fieldnames) name = cpp.name(overload.function.func) # use @with_native_function? tn_key = gen_namedtuple_typename_key(overload.function) typename = typenames.get(tn_key) if typename is None: typename = f'{name}NamedTuple{"" if not definitions else len(definitions)}' typenames[tn_key] = typename definitions.append(f"""\ PyTypeObject* get_{name}_namedtuple() {{ static PyStructSequence_Field NamedTuple_fields[] = {{ {fields}, {{nullptr}} }}; static PyTypeObject {typename}; static bool is_initialized = false; static PyStructSequence_Desc desc = {{ "torch.return_types.{name}", nullptr, NamedTuple_fields, {len(fieldnames)} }}; if (!is_initialized) {{ PyStructSequence_InitType(&{typename}, &desc); {typename}.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; }} return &{typename}; }} """) map_entries.append(f'{{"{name}", get_{name}_namedtuple()}}, ') return definitions, map_entries
Python
def method_impl( name: BaseOperatorName, module: Optional[str], overloads: Sequence[PythonSignatureNativeFunctionPair], *, method: bool ) -> str: """ Generate a python binding for all overloads of an op. """ pycname = get_pycname(name) noarg = is_noarg(overloads) namedtuple_inits, namedtuple_typenames = emit_namedtuple_call(overloads) method_header = ['HANDLE_TH_ERRORS'] method_header += namedtuple_inits method_header += [ "const Tensor& self = THPVariable_Unpack(self_);" ] if method else [] method_footer = ([] if noarg else ['Py_RETURN_NONE;']) + ['END_HANDLE_TH_ERRORS'] traceable = 'true' if all(should_trace(o.function) for o in overloads) else 'false' grouped_overloads: Sequence[PythonSignatureGroup] = group_overloads(overloads) is_singleton = len(grouped_overloads) == 1 signatures: List[str] = [] dispatch: List[str] = [] for overload_index, overload in enumerate(grouped_overloads): signature = overload.signature.signature_str() signatures.append(f'{cpp_string(str(signature))},') dispatch_body = emit_dispatch_case(overload, namedtuple_typenames) dispatch.append( PY_VARIABLE_CASE.substitute(overload_index=overload_index, body=dispatch_body) if not is_singleton else dispatch_body) if noarg: template = PY_VARIABLE_METHOD_NOARGS elif is_singleton: template = PY_VARIABLE_METHOD_VARARGS_SINGLETON else: template = PY_VARIABLE_METHOD_VARARGS return template.substitute( name=name, pycname=pycname, method_header=method_header, max_args=max(map(lambda o: o.signature.arguments_count(), overloads)), signatures=signatures, traceable=traceable, check_has_torch_function=gen_has_torch_function_check( name=name, module=module, noarg=noarg, method=method, ), dispatch=dispatch, method_footer=method_footer, self_="self_" if method else "nullptr", )
def method_impl( name: BaseOperatorName, module: Optional[str], overloads: Sequence[PythonSignatureNativeFunctionPair], *, method: bool ) -> str: """ Generate a python binding for all overloads of an op. """ pycname = get_pycname(name) noarg = is_noarg(overloads) namedtuple_inits, namedtuple_typenames = emit_namedtuple_call(overloads) method_header = ['HANDLE_TH_ERRORS'] method_header += namedtuple_inits method_header += [ "const Tensor& self = THPVariable_Unpack(self_);" ] if method else [] method_footer = ([] if noarg else ['Py_RETURN_NONE;']) + ['END_HANDLE_TH_ERRORS'] traceable = 'true' if all(should_trace(o.function) for o in overloads) else 'false' grouped_overloads: Sequence[PythonSignatureGroup] = group_overloads(overloads) is_singleton = len(grouped_overloads) == 1 signatures: List[str] = [] dispatch: List[str] = [] for overload_index, overload in enumerate(grouped_overloads): signature = overload.signature.signature_str() signatures.append(f'{cpp_string(str(signature))},') dispatch_body = emit_dispatch_case(overload, namedtuple_typenames) dispatch.append( PY_VARIABLE_CASE.substitute(overload_index=overload_index, body=dispatch_body) if not is_singleton else dispatch_body) if noarg: template = PY_VARIABLE_METHOD_NOARGS elif is_singleton: template = PY_VARIABLE_METHOD_VARARGS_SINGLETON else: template = PY_VARIABLE_METHOD_VARARGS return template.substitute( name=name, pycname=pycname, method_header=method_header, max_args=max(map(lambda o: o.signature.arguments_count(), overloads)), signatures=signatures, traceable=traceable, check_has_torch_function=gen_has_torch_function_check( name=name, module=module, noarg=noarg, method=method, ), dispatch=dispatch, method_footer=method_footer, self_="self_" if method else "nullptr", )
Python
def emit_dispatch_case( overload: PythonSignatureGroup, namedtuple_typenames: Dict[str, str], ) -> str: """ Emit dispatch code for a single parsed signature. This corresponds to either a single native function, or a pair that differ only in output params. In the latter case, a single python signature is used for both and dispatching switches on the presence/absence of passed output args. """ if overload.outplace is not None: # dispatch output and no-output variants, branch on _r.isNone(<out_idx>) return PY_VARIABLE_OUT.substitute( out_idx=overload.signature.output_idx(), call_dispatch=emit_single_dispatch( overload.signature, overload.base, namedtuple_typenames), call_dispatch_out=emit_single_dispatch( overload.signature, overload.outplace, namedtuple_typenames), ) else: # no-output version only return emit_single_dispatch( overload.signature, overload.base, namedtuple_typenames)
def emit_dispatch_case( overload: PythonSignatureGroup, namedtuple_typenames: Dict[str, str], ) -> str: """ Emit dispatch code for a single parsed signature. This corresponds to either a single native function, or a pair that differ only in output params. In the latter case, a single python signature is used for both and dispatching switches on the presence/absence of passed output args. """ if overload.outplace is not None: # dispatch output and no-output variants, branch on _r.isNone(<out_idx>) return PY_VARIABLE_OUT.substitute( out_idx=overload.signature.output_idx(), call_dispatch=emit_single_dispatch( overload.signature, overload.base, namedtuple_typenames), call_dispatch_out=emit_single_dispatch( overload.signature, overload.outplace, namedtuple_typenames), ) else: # no-output version only return emit_single_dispatch( overload.signature, overload.base, namedtuple_typenames)
Python
def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool: """Returns True if s1 < s2 in the partial order.""" args1, args2 = s1.arguments(skip_outputs=True), s2.arguments(skip_outputs=True) if len(args1) != len(args2): return False # TODO: should use some canonical form instead of 'str(arg.type)' - see comments # above. The old codegen used the deprecated 'dynamic_type(arg.type)', which # ignores the optional annotation, i.e. 'Scalar' and 'Scalar?'. equal = all(arg1.type == arg2.type for arg1, arg2 in zip(args1, args2)) smaller_or_equal = all(str(arg1.type) == str(arg2.type) or is_arg_smaller(arg1.type, arg2.type) for arg1, arg2 in zip(args1, args2)) return smaller_or_equal and not equal
def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool: """Returns True if s1 < s2 in the partial order.""" args1, args2 = s1.arguments(skip_outputs=True), s2.arguments(skip_outputs=True) if len(args1) != len(args2): return False # TODO: should use some canonical form instead of 'str(arg.type)' - see comments # above. The old codegen used the deprecated 'dynamic_type(arg.type)', which # ignores the optional annotation, i.e. 'Scalar' and 'Scalar?'. equal = all(arg1.type == arg2.type for arg1, arg2 in zip(args1, args2)) smaller_or_equal = all(str(arg1.type) == str(arg2.type) or is_arg_smaller(arg1.type, arg2.type) for arg1, arg2 in zip(args1, args2)) return smaller_or_equal and not equal
Python
def emit_single_dispatch( ps: PythonSignature, f: NativeFunction, namedtuple_typenames: Dict[str, str] ) -> str: """ Emit dispatch code for a single native function. """ @with_native_function def go(f: NativeFunction) -> str: # header comments deprecated = '[deprecated] ' if ps.deprecated else '' schema_comment = f'// {deprecated}aten::{f.func}' # dispatch lambda signature name = cpp.name(f.func) lambda_formals = ', '.join(map(lambda a: f"{a.type_str} {a.name}", dispatch_lambda_args(ps, f))) lambda_return = dispatch_lambda_return_str(f) # dispatch lambda body dispatch_callee = cpp_dispatch_target(f) dispatch_args = ', '.join(cpp_dispatch_exprs(f, python_signature=ps)) # from arg parser outputs to dispatch lambda arguments parser_outputs = arg_parser_output_exprs(ps, f) lambda_arg_exprs = dispatch_lambda_exprs(ps, f) inits = '\n'.join(lambda_arg_exprs.inits) lambda_args = ', '.join(lambda_arg_exprs.exprs) # scatter fields # TODO: Checking `ps.method and ('requires_grad' in parser_outputs)` is a hacky # solution for enabling the 'requires_grad' argument for tensor methods # new_full, new_empty, and new_zeros. A much better but more difficult to # implement solution involves refactoring according to Ed's description here: # https://github.com/pytorch/pytorch/issues/36455#issuecomment-614767589 need_set_requires_grad = ps.tensor_options_args and (not has_tensor_options(f) or ( ps.method and ('requires_grad' in parser_outputs))) set_requires_grad = f'.set_requires_grad({parser_outputs["requires_grad"].expr})' \ if need_set_requires_grad else '' if lambda_return == 'void': return f"""\ {schema_comment} {inits} auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{ pybind11::gil_scoped_release no_gil; {dispatch_callee}({dispatch_args}); }}; dispatch_{name}({lambda_args}){set_requires_grad}; Py_RETURN_NONE; """ else: typename = namedtuple_typenames.get(gen_namedtuple_typename_key(f)) namedtuple_typeref = f'{typename}, ' if typename is not None else '' return f"""\ {schema_comment} {inits} auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{ pybind11::gil_scoped_release no_gil; return {dispatch_callee}({dispatch_args}); }}; return wrap({namedtuple_typeref}dispatch_{name}({lambda_args}){set_requires_grad}); """ return go(f)
def emit_single_dispatch( ps: PythonSignature, f: NativeFunction, namedtuple_typenames: Dict[str, str] ) -> str: """ Emit dispatch code for a single native function. """ @with_native_function def go(f: NativeFunction) -> str: # header comments deprecated = '[deprecated] ' if ps.deprecated else '' schema_comment = f'// {deprecated}aten::{f.func}' # dispatch lambda signature name = cpp.name(f.func) lambda_formals = ', '.join(map(lambda a: f"{a.type_str} {a.name}", dispatch_lambda_args(ps, f))) lambda_return = dispatch_lambda_return_str(f) # dispatch lambda body dispatch_callee = cpp_dispatch_target(f) dispatch_args = ', '.join(cpp_dispatch_exprs(f, python_signature=ps)) # from arg parser outputs to dispatch lambda arguments parser_outputs = arg_parser_output_exprs(ps, f) lambda_arg_exprs = dispatch_lambda_exprs(ps, f) inits = '\n'.join(lambda_arg_exprs.inits) lambda_args = ', '.join(lambda_arg_exprs.exprs) # scatter fields # TODO: Checking `ps.method and ('requires_grad' in parser_outputs)` is a hacky # solution for enabling the 'requires_grad' argument for tensor methods # new_full, new_empty, and new_zeros. A much better but more difficult to # implement solution involves refactoring according to Ed's description here: # https://github.com/pytorch/pytorch/issues/36455#issuecomment-614767589 need_set_requires_grad = ps.tensor_options_args and (not has_tensor_options(f) or ( ps.method and ('requires_grad' in parser_outputs))) set_requires_grad = f'.set_requires_grad({parser_outputs["requires_grad"].expr})' \ if need_set_requires_grad else '' if lambda_return == 'void': return f"""\ {schema_comment} {inits} auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{ pybind11::gil_scoped_release no_gil; {dispatch_callee}({dispatch_args}); }}; dispatch_{name}({lambda_args}){set_requires_grad}; Py_RETURN_NONE; """ else: typename = namedtuple_typenames.get(gen_namedtuple_typename_key(f)) namedtuple_typeref = f'{typename}, ' if typename is not None else '' return f"""\ {schema_comment} {inits} auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{ pybind11::gil_scoped_release no_gil; return {dispatch_callee}({dispatch_args}); }}; return wrap({namedtuple_typeref}dispatch_{name}({lambda_args}){set_requires_grad}); """ return go(f)
Python
def assert_valid_qconfig(qconfig: Optional[Union[QConfig, QConfigDynamic]], mod: torch.nn.Module) -> None: """ Verifies that this `qconfig` is valid. """ if qconfig is None: return is_conv_transpose_mod = ( isinstance(mod, torch.nn.ConvTranspose1d) or isinstance(mod, torch.nn.ConvTranspose2d) or isinstance(mod, torch.nn.ConvTranspose3d)) if is_conv_transpose_mod: if qconfig.weight is None: # for now, we assume that any qconfig for ConvTranspose without a weight is valid return example_observer = qconfig.weight() is_per_channel = ( isinstance(example_observer, torch.ao.quantization.PerChannelMinMaxObserver) or isinstance(example_observer, torch.ao.quantization.MovingAveragePerChannelMinMaxObserver) ) assert not is_per_channel, \ 'Per channel weight observer is not supported yet for ConvTranspose{n}d.'
def assert_valid_qconfig(qconfig: Optional[Union[QConfig, QConfigDynamic]], mod: torch.nn.Module) -> None: """ Verifies that this `qconfig` is valid. """ if qconfig is None: return is_conv_transpose_mod = ( isinstance(mod, torch.nn.ConvTranspose1d) or isinstance(mod, torch.nn.ConvTranspose2d) or isinstance(mod, torch.nn.ConvTranspose3d)) if is_conv_transpose_mod: if qconfig.weight is None: # for now, we assume that any qconfig for ConvTranspose without a weight is valid return example_observer = qconfig.weight() is_per_channel = ( isinstance(example_observer, torch.ao.quantization.PerChannelMinMaxObserver) or isinstance(example_observer, torch.ao.quantization.MovingAveragePerChannelMinMaxObserver) ) assert not is_per_channel, \ 'Per channel weight observer is not supported yet for ConvTranspose{n}d.'
Python
def add_module_to_qconfig_obs_ctr( qconfig: QConfigAny, module: Union[nn.Module, None]) -> Any: r"""This is a helper function for use in quantization prepare that updates a qconfig so that the constructors stored in the qconfig will create observers on the same device that 'module' is on. This is intended to be used when the qconfigs are propagated to each module in order to avoid potential device alignment issues. Args: qconfig: QConfig or QConfigDynamic with obs constructors stored in activation and weight module: module which the qconfig is related to Return: qconfig: configured so that obs constructors set to construct on the same device as module """ if module is None or qconfig is None or qconfig._fields != ('activation', 'weight'): return qconfig def get_factory_kwargs_based_on_module_device(): assert isinstance(module, torch.nn.Module) devices = {p.device for p in module.parameters()} | \ {p.device for p in module.buffers()} device = next(iter(devices)) if len(devices) > 0 else None return None if device is None else {'device': device} def configure_constructor_to_put_obs_on_module_device(original_constructor): try: # check if constructor can accept factory_kwargs check = original_constructor.with_args(factory_kwargs=None) check() return original_constructor.with_callable_args(factory_kwargs=get_factory_kwargs_based_on_module_device) except AttributeError: # qconfig doesn't have activation or weight return original_constructor except TypeError: # the class doesn't accept factory_kwargs argument return original_constructor activation = configure_constructor_to_put_obs_on_module_device(qconfig.activation) weight = configure_constructor_to_put_obs_on_module_device(qconfig.weight) if isinstance(qconfig, QConfig): return QConfig(activation, weight) else: return QConfigDynamic(activation, weight)
def add_module_to_qconfig_obs_ctr( qconfig: QConfigAny, module: Union[nn.Module, None]) -> Any: r"""This is a helper function for use in quantization prepare that updates a qconfig so that the constructors stored in the qconfig will create observers on the same device that 'module' is on. This is intended to be used when the qconfigs are propagated to each module in order to avoid potential device alignment issues. Args: qconfig: QConfig or QConfigDynamic with obs constructors stored in activation and weight module: module which the qconfig is related to Return: qconfig: configured so that obs constructors set to construct on the same device as module """ if module is None or qconfig is None or qconfig._fields != ('activation', 'weight'): return qconfig def get_factory_kwargs_based_on_module_device(): assert isinstance(module, torch.nn.Module) devices = {p.device for p in module.parameters()} | \ {p.device for p in module.buffers()} device = next(iter(devices)) if len(devices) > 0 else None return None if device is None else {'device': device} def configure_constructor_to_put_obs_on_module_device(original_constructor): try: # check if constructor can accept factory_kwargs check = original_constructor.with_args(factory_kwargs=None) check() return original_constructor.with_callable_args(factory_kwargs=get_factory_kwargs_based_on_module_device) except AttributeError: # qconfig doesn't have activation or weight return original_constructor except TypeError: # the class doesn't accept factory_kwargs argument return original_constructor activation = configure_constructor_to_put_obs_on_module_device(qconfig.activation) weight = configure_constructor_to_put_obs_on_module_device(qconfig.weight) if isinstance(qconfig, QConfig): return QConfig(activation, weight) else: return QConfigDynamic(activation, weight)
Python
def activation_is_memoryless(qconfig: QConfig): """ Return whether the observer for activations defined in the given QConfig is memoryless. """ def _is_memoryless(observer): return hasattr(observer, "memoryless") and observer.memoryless act = qconfig.activation() if isinstance(act, FakeQuantizeBase) and hasattr(act, "activation_post_process"): return _is_memoryless(act.activation_post_process) else: return _is_memoryless(act)
def activation_is_memoryless(qconfig: QConfig): """ Return whether the observer for activations defined in the given QConfig is memoryless. """ def _is_memoryless(observer): return hasattr(observer, "memoryless") and observer.memoryless act = qconfig.activation() if isinstance(act, FakeQuantizeBase) and hasattr(act, "activation_post_process"): return _is_memoryless(act.activation_post_process) else: return _is_memoryless(act)
Python
def node_ctor_arg_rvalue_string(arg: NamedCType) -> str: """ Given a NamedCType from a lazy IR schema, generate a c++ string for materializing an rvalue of that arg for passing into a lazy Node constructor. """ if isValueType(arg.type): if isinstance(arg.type, BaseCType): return f"lazy_{arg.name}.GetIrValue()" elif isinstance(arg.type, OptionalCType): return f"lazy_{arg.name} ? " \ f"c10::make_optional(lazy_{arg.name}.GetIrValue()) : " \ "c10::nullopt" else: raise AssertionError("TODO not sure if there are other valid types to handle here") else: if isinstance(arg.type, VectorCType) and isinstance(arg.type.elem, BaseCType): return f"std::vector<{arg.type.elem.type}>({arg.name}.begin(), {arg.name}.end())" elif (isinstance(arg.type, OptionalCType) and isinstance(arg.type.elem, VectorCType) and isinstance(arg.type.elem.elem, BaseCType)): return f"torch::lazy::ToOptionalVector<{arg.type.elem.elem.type}>({arg.name})" else: return f"{arg.name}"
def node_ctor_arg_rvalue_string(arg: NamedCType) -> str: """ Given a NamedCType from a lazy IR schema, generate a c++ string for materializing an rvalue of that arg for passing into a lazy Node constructor. """ if isValueType(arg.type): if isinstance(arg.type, BaseCType): return f"lazy_{arg.name}.GetIrValue()" elif isinstance(arg.type, OptionalCType): return f"lazy_{arg.name} ? " \ f"c10::make_optional(lazy_{arg.name}.GetIrValue()) : " \ "c10::nullopt" else: raise AssertionError("TODO not sure if there are other valid types to handle here") else: if isinstance(arg.type, VectorCType) and isinstance(arg.type.elem, BaseCType): return f"std::vector<{arg.type.elem.type}>({arg.name}.begin(), {arg.name}.end())" elif (isinstance(arg.type, OptionalCType) and isinstance(arg.type.elem, VectorCType) and isinstance(arg.type.elem.elem, BaseCType)): return f"torch::lazy::ToOptionalVector<{arg.type.elem.elem.type}>({arg.name})" else: return f"{arg.name}"
Python
def node_ctor_inputs(func: LazyIrSchema) -> str: """ Produce a formatted string with the arguments as passed into the constructor of a node class. """ node_ctor_values = [node_ctor_arg_rvalue_string(arg) for arg in func.filtered_types()] return ",\n ".join(node_ctor_values)
def node_ctor_inputs(func: LazyIrSchema) -> str: """ Produce a formatted string with the arguments as passed into the constructor of a node class. """ node_ctor_values = [node_ctor_arg_rvalue_string(arg) for arg in func.filtered_types()] return ",\n ".join(node_ctor_values)
Python
def _handle_col_wise_sharding(input, world_size, weight, local_shard_t, bias, pg): """ Entry-point function to handle the logic of col-wise sharding of weight for Linear. (Detailed explanations of the logic can be found in the comment for sharded_linear.) Args: input: matrix to be multiplied with the sharded weight. world_size: number of ranks. weight: shareded weight tensor. local_shard_t: row-wise shared local weight used for lookup. bias: bias term of linear op. pg: process group. Returns: final result of linear operation. """ return ( _handle_col_wise_sharding_base( torch.matmul, weight.size(0), len(input.size()) - 1, input, world_size, weight, local_shard_t, pg, ) + bias )
def _handle_col_wise_sharding(input, world_size, weight, local_shard_t, bias, pg): """ Entry-point function to handle the logic of col-wise sharding of weight for Linear. (Detailed explanations of the logic can be found in the comment for sharded_linear.) Args: input: matrix to be multiplied with the sharded weight. world_size: number of ranks. weight: shareded weight tensor. local_shard_t: row-wise shared local weight used for lookup. bias: bias term of linear op. pg: process group. Returns: final result of linear operation. """ return ( _handle_col_wise_sharding_base( torch.matmul, weight.size(0), len(input.size()) - 1, input, world_size, weight, local_shard_t, pg, ) + bias )
Python
def _handle_row_wise_sharding(input, world_size, weight, rank, local_shard_t, bias, pg): """ Entry-point function to handle the logic of row-wise sharding of weight for Linear. (Detailed explanations of the logic can be found in the comment for sharded_linear.) Args: input: matrix to be multiplied with the sharded weight. world_size: number of ranks. weight: shareded weight tensor. rank: # of cuda process. local_shard_t: row-wise shared local weight used for lookup. bias: bias term of linear op. pg: process group. Returns: final result of linear operation. """ # alltoall to gather all the appropriate inputs. input_t = input.t().contiguous() input_t_size = input_t.size() # Compute expected size split_size = get_split_size(input_t_size[0], world_size) input_split_sizes = [0] * world_size rearrange_rows = False for idx, placement in enumerate(weight._sharding_spec.placements): sharded_dim_size = get_chunked_dim_size(input_t_size[0], split_size, idx) input_split_sizes[placement.rank()] = sharded_dim_size if placement.rank() != idx: rearrange_rows = True if rearrange_rows: # Need to re-arrange rows of input_t for all2all. indices: List[List[int]] = [[0]] * world_size # When we do the chunk split, we always ensure the first N - 1 chunks get max out # and then the Nth chunk gets the rest. So input_split_sizes like [3, 3, 3, 4] # are not possible. The expected split size will be [4, 4, 4, 1]. sharded_dim_size_max = max(input_split_sizes) for idx, placement in enumerate(weight._sharding_spec.placements): split_size = input_split_sizes[placement.rank()] offset_start_idx = idx * sharded_dim_size_max indices[placement.rank()] = list( range(offset_start_idx, offset_start_idx + split_size) ) indices_flatten = list(idx for indice in indices for idx in indice) input_t = input_t.index_select( 0, torch.tensor(indices_flatten, device=input_t.device) ) gathered_input = torch.empty(input_split_sizes[rank] * world_size, input_t_size[1], device=input_t.device) # Perform autograd enabled alltoall all_to_all_single(gathered_input, input_t, input_split_sizes=input_split_sizes, group=pg) gathered_input = gathered_input.t() # Perform local matmuls for all shards shard_size = local_shard_t.size()[0] results = [] for r in range(world_size): inp = torch.narrow(gathered_input, 1, r * shard_size, shard_size) results.append(inp.matmul(local_shard_t)) # Gather all the results appropriately. local_result = torch.empty_like(results[rank]) local_result = reduce_scatter(local_result, results, group=pg) # Return the appropriate local result. return local_result + bias
def _handle_row_wise_sharding(input, world_size, weight, rank, local_shard_t, bias, pg): """ Entry-point function to handle the logic of row-wise sharding of weight for Linear. (Detailed explanations of the logic can be found in the comment for sharded_linear.) Args: input: matrix to be multiplied with the sharded weight. world_size: number of ranks. weight: shareded weight tensor. rank: # of cuda process. local_shard_t: row-wise shared local weight used for lookup. bias: bias term of linear op. pg: process group. Returns: final result of linear operation. """ # alltoall to gather all the appropriate inputs. input_t = input.t().contiguous() input_t_size = input_t.size() # Compute expected size split_size = get_split_size(input_t_size[0], world_size) input_split_sizes = [0] * world_size rearrange_rows = False for idx, placement in enumerate(weight._sharding_spec.placements): sharded_dim_size = get_chunked_dim_size(input_t_size[0], split_size, idx) input_split_sizes[placement.rank()] = sharded_dim_size if placement.rank() != idx: rearrange_rows = True if rearrange_rows: # Need to re-arrange rows of input_t for all2all. indices: List[List[int]] = [[0]] * world_size # When we do the chunk split, we always ensure the first N - 1 chunks get max out # and then the Nth chunk gets the rest. So input_split_sizes like [3, 3, 3, 4] # are not possible. The expected split size will be [4, 4, 4, 1]. sharded_dim_size_max = max(input_split_sizes) for idx, placement in enumerate(weight._sharding_spec.placements): split_size = input_split_sizes[placement.rank()] offset_start_idx = idx * sharded_dim_size_max indices[placement.rank()] = list( range(offset_start_idx, offset_start_idx + split_size) ) indices_flatten = list(idx for indice in indices for idx in indice) input_t = input_t.index_select( 0, torch.tensor(indices_flatten, device=input_t.device) ) gathered_input = torch.empty(input_split_sizes[rank] * world_size, input_t_size[1], device=input_t.device) # Perform autograd enabled alltoall all_to_all_single(gathered_input, input_t, input_split_sizes=input_split_sizes, group=pg) gathered_input = gathered_input.t() # Perform local matmuls for all shards shard_size = local_shard_t.size()[0] results = [] for r in range(world_size): inp = torch.narrow(gathered_input, 1, r * shard_size, shard_size) results.append(inp.matmul(local_shard_t)) # Gather all the results appropriately. local_result = torch.empty_like(results[rank]) local_result = reduce_scatter(local_result, results, group=pg) # Return the appropriate local result. return local_result + bias
Python
def update_qconfig_for_fusion( model: GraphModule, qconfig_dict: Any, ) -> Any: """ Update the qconfig_dict to account for fused modules such as LinearReLU. """ object_type_dict = qconfig_dict.get("object_type", None) if object_type_dict is None: return qconfig_dict modules = dict(model.named_modules()) for node in model.graph.nodes: if node.op == 'call_module' and node.target in modules: module_type = type(modules[str(node.target)]) if module_type not in list(DEFAULT_OP_LIST_TO_FUSER_METHOD.values()): continue for ops, fuser in DEFAULT_OP_LIST_TO_FUSER_METHOD.items(): if module_type == fuser: fused_qconfig = object_type_dict.get(ops[0], None) # Raise an error if the modules in the fused module have # different qconfigs specified in the qconfig_dict for op in ops: if not qconfig_equals(object_type_dict.get(op, None), fused_qconfig): raise LookupError("During fusion, we need to specify the same " + f"qconfigs for both modules in {module_type}.") if fused_qconfig is not None: object_type_dict[module_type] = fused_qconfig return qconfig_dict
def update_qconfig_for_fusion( model: GraphModule, qconfig_dict: Any, ) -> Any: """ Update the qconfig_dict to account for fused modules such as LinearReLU. """ object_type_dict = qconfig_dict.get("object_type", None) if object_type_dict is None: return qconfig_dict modules = dict(model.named_modules()) for node in model.graph.nodes: if node.op == 'call_module' and node.target in modules: module_type = type(modules[str(node.target)]) if module_type not in list(DEFAULT_OP_LIST_TO_FUSER_METHOD.values()): continue for ops, fuser in DEFAULT_OP_LIST_TO_FUSER_METHOD.items(): if module_type == fuser: fused_qconfig = object_type_dict.get(ops[0], None) # Raise an error if the modules in the fused module have # different qconfigs specified in the qconfig_dict for op in ops: if not qconfig_equals(object_type_dict.get(op, None), fused_qconfig): raise LookupError("During fusion, we need to specify the same " + f"qconfigs for both modules in {module_type}.") if fused_qconfig is not None: object_type_dict[module_type] = fused_qconfig return qconfig_dict
Python
def is_input_arg_dtype_supported_by_backend( arg: Argument, node: Node, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], dtype_config: Dict[str, torch.dtype], ) -> bool: """ Check if the configured qconfig for the argument is supported by the backend or not """ if isinstance(arg, (list, tuple)): return all(map(lambda a: is_input_arg_dtype_supported_by_backend(a, node, node_name_to_target_dtype, dtype_config), arg)) if not isinstance(arg, Node): return True # TODO: support check for standalone module is_weight = node_arg_is_weight(node, arg) is_bias = node_arg_is_bias(node, arg) is_activation = not is_weight and not is_bias if is_activation: input_activation_dtype = dtype_config.get("input_activation_dtype", None) return input_activation_dtype is None or \ node_name_to_target_dtype[node.name]["input_activation_dtype"] == input_activation_dtype elif is_weight: weight_dtype = dtype_config.get("weight_dtype", None) return weight_dtype is None or node_name_to_target_dtype[node.name]["weight_dtype"] == weight_dtype else: # bias bias_dtype = dtype_config.get("bias_dtype", None) return bias_dtype is None or node_name_to_target_dtype[node.name]["bias_dtype"] == bias_dtype
def is_input_arg_dtype_supported_by_backend( arg: Argument, node: Node, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], dtype_config: Dict[str, torch.dtype], ) -> bool: """ Check if the configured qconfig for the argument is supported by the backend or not """ if isinstance(arg, (list, tuple)): return all(map(lambda a: is_input_arg_dtype_supported_by_backend(a, node, node_name_to_target_dtype, dtype_config), arg)) if not isinstance(arg, Node): return True # TODO: support check for standalone module is_weight = node_arg_is_weight(node, arg) is_bias = node_arg_is_bias(node, arg) is_activation = not is_weight and not is_bias if is_activation: input_activation_dtype = dtype_config.get("input_activation_dtype", None) return input_activation_dtype is None or \ node_name_to_target_dtype[node.name]["input_activation_dtype"] == input_activation_dtype elif is_weight: weight_dtype = dtype_config.get("weight_dtype", None) return weight_dtype is None or node_name_to_target_dtype[node.name]["weight_dtype"] == weight_dtype else: # bias bias_dtype = dtype_config.get("bias_dtype", None) return bias_dtype is None or node_name_to_target_dtype[node.name]["bias_dtype"] == bias_dtype
Python
def is_output_dtype_supported_by_backend( node: Node, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], dtype_config: Dict[str, torch.dtype], ) -> bool: """ Check if the configured qconfig for the output is supported by the backend or not """ output_dtype = dtype_config.get("output_dtype", None) return output_dtype is None or \ output_dtype == node_name_to_target_dtype[node.name]["output_activation_dtype"]
def is_output_dtype_supported_by_backend( node: Node, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], dtype_config: Dict[str, torch.dtype], ) -> bool: """ Check if the configured qconfig for the output is supported by the backend or not """ output_dtype = dtype_config.get("output_dtype", None) return output_dtype is None or \ output_dtype == node_name_to_target_dtype[node.name]["output_activation_dtype"]
Python
def is_pattern_dtype_config_supported_by_backend( pattern: Optional[Pattern], matched_nodes: Optional[List[Node]], node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], backend_config_dict: Optional[Dict[str, Any]] ) -> bool: """ Check is the dtype configuration of a pattern is supported by the backend or not """ if backend_config_dict is None or pattern is None: return True assert matched_nodes is not None and len(matched_nodes) >= 1 pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config_dict) dtype_configs: List[Dict[str, torch.dtype]] = pattern_to_dtype_configs.get(pattern, []) input_node = matched_nodes[0] output_node = matched_nodes[-1] for dtype_config in dtype_configs: # check if arg dtype are supported supported = True for arg in input_node.args: supported = supported and \ is_input_arg_dtype_supported_by_backend( arg, input_node, node_name_to_target_dtype, dtype_config) for k, arg in input_node.kwargs.items(): supported = supported and \ is_input_arg_dtype_supported_by_backend( arg, input_node, node_name_to_target_dtype, dtype_config) # check if output dtype is supported supported = supported and is_output_dtype_supported_by_backend( output_node, node_name_to_target_dtype, dtype_config) if supported: return True return False
def is_pattern_dtype_config_supported_by_backend( pattern: Optional[Pattern], matched_nodes: Optional[List[Node]], node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], backend_config_dict: Optional[Dict[str, Any]] ) -> bool: """ Check is the dtype configuration of a pattern is supported by the backend or not """ if backend_config_dict is None or pattern is None: return True assert matched_nodes is not None and len(matched_nodes) >= 1 pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config_dict) dtype_configs: List[Dict[str, torch.dtype]] = pattern_to_dtype_configs.get(pattern, []) input_node = matched_nodes[0] output_node = matched_nodes[-1] for dtype_config in dtype_configs: # check if arg dtype are supported supported = True for arg in input_node.args: supported = supported and \ is_input_arg_dtype_supported_by_backend( arg, input_node, node_name_to_target_dtype, dtype_config) for k, arg in input_node.kwargs.items(): supported = supported and \ is_input_arg_dtype_supported_by_backend( arg, input_node, node_name_to_target_dtype, dtype_config) # check if output dtype is supported supported = supported and is_output_dtype_supported_by_backend( output_node, node_name_to_target_dtype, dtype_config) if supported: return True return False
Python
def maybe_insert_observers_before_graph_output( graph_output_node: Node, output_quantized_idxs: List[int], node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], qconfig_map: Dict[str, QConfigAny], model: torch.nn.Module, modules: Dict[str, torch.nn.Module], graph: Graph, ) -> None: """ If the output needs to be quantized and there are any nodes in the output which are not already observed, inserts observers for those nodes. """ # TODO(future PR): update the output_quantized_idxs API to match # arbitrary data structures. There is always a single output, and # that output can have arbitrary nesting of values. List[int] is # not the right data type for this. assert output_quantized_idxs == [0] or output_quantized_idxs == [], \ 'unrecognized format of output_quantized_idxs' # Currently dequants are inserted in the convert step. So, we only # have to do anything if the output is hardcoded to be quantized if output_quantized_idxs == []: return # TODO(future PR): support more dtypes in model outputs, if necessary output_target_dtype = torch.quint8 def _recursive_maybe_replace_node_with_obs( maybe_node: Argument, target_dtype: torch.dtype, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], qconfig_map: Dict[str, QConfigAny], model: torch.nn.Module, modules: Dict[str, torch.nn.Module], graph: Graph, ) -> Argument: """ Navigate an arbitrary data structure of lists, tuples, dicts. For each container type, recurse on all inputs. Once any Node is found, insert an observer if needed and do not recurse further. For example, given a structure of {'foo1': [[bar1]], 'foo2': {'foo3': [[[bar3]]]}} we recurse down to bar1 and bar3, observe them if necessary, and if we inserted an observer then replace the original node with its observer. Returns the data structure with all nodes needing observation being replaced by their observers. """ if isinstance(maybe_node, Node): # check dtype of this node this_node_dtype = get_arg_target_dtype_as_output( maybe_node, modules, node_name_to_target_dtype) if this_node_dtype != target_dtype: # insert observer qconfig = qconfig_map.get(maybe_node.name) # TODO(future PR): see if we need to allow specifying qconfig # on output nodes, to remove the restriction below. assert qconfig is not None, \ 'Quantizing the output node without a qconfig is not supported' observer_mod = qconfig.activation() observer_node = insert_observer( maybe_node, maybe_node, observer_mod, model, modules, graph) return observer_node else: return maybe_node elif isinstance(maybe_node, (list, tuple)): results = [] for inner_node in maybe_node: results.append(_recursive_maybe_replace_node_with_obs( inner_node, target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph)) if isinstance(maybe_node, list): return results else: return tuple(results) elif isinstance(maybe_node, dict): results_dict = {} for k, inner_v in maybe_node.items(): results_dict[k] = _recursive_maybe_replace_node_with_obs( inner_v, target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph) return results_dict else: return results new_args = [] for old_arg in graph_output_node.args: new_args.append( _recursive_maybe_replace_node_with_obs( old_arg, output_target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph)) graph_output_node.args = tuple(new_args)
def maybe_insert_observers_before_graph_output( graph_output_node: Node, output_quantized_idxs: List[int], node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], qconfig_map: Dict[str, QConfigAny], model: torch.nn.Module, modules: Dict[str, torch.nn.Module], graph: Graph, ) -> None: """ If the output needs to be quantized and there are any nodes in the output which are not already observed, inserts observers for those nodes. """ # TODO(future PR): update the output_quantized_idxs API to match # arbitrary data structures. There is always a single output, and # that output can have arbitrary nesting of values. List[int] is # not the right data type for this. assert output_quantized_idxs == [0] or output_quantized_idxs == [], \ 'unrecognized format of output_quantized_idxs' # Currently dequants are inserted in the convert step. So, we only # have to do anything if the output is hardcoded to be quantized if output_quantized_idxs == []: return # TODO(future PR): support more dtypes in model outputs, if necessary output_target_dtype = torch.quint8 def _recursive_maybe_replace_node_with_obs( maybe_node: Argument, target_dtype: torch.dtype, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], qconfig_map: Dict[str, QConfigAny], model: torch.nn.Module, modules: Dict[str, torch.nn.Module], graph: Graph, ) -> Argument: """ Navigate an arbitrary data structure of lists, tuples, dicts. For each container type, recurse on all inputs. Once any Node is found, insert an observer if needed and do not recurse further. For example, given a structure of {'foo1': [[bar1]], 'foo2': {'foo3': [[[bar3]]]}} we recurse down to bar1 and bar3, observe them if necessary, and if we inserted an observer then replace the original node with its observer. Returns the data structure with all nodes needing observation being replaced by their observers. """ if isinstance(maybe_node, Node): # check dtype of this node this_node_dtype = get_arg_target_dtype_as_output( maybe_node, modules, node_name_to_target_dtype) if this_node_dtype != target_dtype: # insert observer qconfig = qconfig_map.get(maybe_node.name) # TODO(future PR): see if we need to allow specifying qconfig # on output nodes, to remove the restriction below. assert qconfig is not None, \ 'Quantizing the output node without a qconfig is not supported' observer_mod = qconfig.activation() observer_node = insert_observer( maybe_node, maybe_node, observer_mod, model, modules, graph) return observer_node else: return maybe_node elif isinstance(maybe_node, (list, tuple)): results = [] for inner_node in maybe_node: results.append(_recursive_maybe_replace_node_with_obs( inner_node, target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph)) if isinstance(maybe_node, list): return results else: return tuple(results) elif isinstance(maybe_node, dict): results_dict = {} for k, inner_v in maybe_node.items(): results_dict[k] = _recursive_maybe_replace_node_with_obs( inner_v, target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph) return results_dict else: return results new_args = [] for old_arg in graph_output_node.args: new_args.append( _recursive_maybe_replace_node_with_obs( old_arg, output_target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph)) graph_output_node.args = tuple(new_args)
Python
def _recursive_maybe_replace_node_with_obs( maybe_node: Argument, target_dtype: torch.dtype, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], qconfig_map: Dict[str, QConfigAny], model: torch.nn.Module, modules: Dict[str, torch.nn.Module], graph: Graph, ) -> Argument: """ Navigate an arbitrary data structure of lists, tuples, dicts. For each container type, recurse on all inputs. Once any Node is found, insert an observer if needed and do not recurse further. For example, given a structure of {'foo1': [[bar1]], 'foo2': {'foo3': [[[bar3]]]}} we recurse down to bar1 and bar3, observe them if necessary, and if we inserted an observer then replace the original node with its observer. Returns the data structure with all nodes needing observation being replaced by their observers. """ if isinstance(maybe_node, Node): # check dtype of this node this_node_dtype = get_arg_target_dtype_as_output( maybe_node, modules, node_name_to_target_dtype) if this_node_dtype != target_dtype: # insert observer qconfig = qconfig_map.get(maybe_node.name) # TODO(future PR): see if we need to allow specifying qconfig # on output nodes, to remove the restriction below. assert qconfig is not None, \ 'Quantizing the output node without a qconfig is not supported' observer_mod = qconfig.activation() observer_node = insert_observer( maybe_node, maybe_node, observer_mod, model, modules, graph) return observer_node else: return maybe_node elif isinstance(maybe_node, (list, tuple)): results = [] for inner_node in maybe_node: results.append(_recursive_maybe_replace_node_with_obs( inner_node, target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph)) if isinstance(maybe_node, list): return results else: return tuple(results) elif isinstance(maybe_node, dict): results_dict = {} for k, inner_v in maybe_node.items(): results_dict[k] = _recursive_maybe_replace_node_with_obs( inner_v, target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph) return results_dict else: return results
def _recursive_maybe_replace_node_with_obs( maybe_node: Argument, target_dtype: torch.dtype, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], qconfig_map: Dict[str, QConfigAny], model: torch.nn.Module, modules: Dict[str, torch.nn.Module], graph: Graph, ) -> Argument: """ Navigate an arbitrary data structure of lists, tuples, dicts. For each container type, recurse on all inputs. Once any Node is found, insert an observer if needed and do not recurse further. For example, given a structure of {'foo1': [[bar1]], 'foo2': {'foo3': [[[bar3]]]}} we recurse down to bar1 and bar3, observe them if necessary, and if we inserted an observer then replace the original node with its observer. Returns the data structure with all nodes needing observation being replaced by their observers. """ if isinstance(maybe_node, Node): # check dtype of this node this_node_dtype = get_arg_target_dtype_as_output( maybe_node, modules, node_name_to_target_dtype) if this_node_dtype != target_dtype: # insert observer qconfig = qconfig_map.get(maybe_node.name) # TODO(future PR): see if we need to allow specifying qconfig # on output nodes, to remove the restriction below. assert qconfig is not None, \ 'Quantizing the output node without a qconfig is not supported' observer_mod = qconfig.activation() observer_node = insert_observer( maybe_node, maybe_node, observer_mod, model, modules, graph) return observer_node else: return maybe_node elif isinstance(maybe_node, (list, tuple)): results = [] for inner_node in maybe_node: results.append(_recursive_maybe_replace_node_with_obs( inner_node, target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph)) if isinstance(maybe_node, list): return results else: return tuple(results) elif isinstance(maybe_node, dict): results_dict = {} for k, inner_v in maybe_node.items(): results_dict[k] = _recursive_maybe_replace_node_with_obs( inner_v, target_dtype, node_name_to_target_dtype, qconfig_map, model, modules, graph) return results_dict else: return results
Python
def maybe_propagate_dtype_for_node( node: Node, target_dtype: torch.dtype, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], matches: Dict[str, MatchResult], ) -> None: """ Assigns `target_dtype` to `node`. If `node` is a general tensor shape op (see GeneralTensorShapeOpQuantizeHandler in quantization_patterns.py for more details) also call this function recursively on the first argument, to propagate the dtype to the caller. """ node_name_to_target_dtype[node.name]["input_activation_dtype"] = target_dtype node_name_to_target_dtype[node.name]["output_activation_dtype"] = target_dtype # if this is a copy node, propagate to first arg root_node, matched_nodes, pattern, qhandler, qconfig = matches.get( node.name, (None, None, None, None, None)) if qhandler is not None and qhandler.is_general_tensor_shape_op(): prev_node = node.args[0] if isinstance(prev_node, Node): maybe_propagate_dtype_for_node( prev_node, target_dtype, node_name_to_target_dtype, matches)
def maybe_propagate_dtype_for_node( node: Node, target_dtype: torch.dtype, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], matches: Dict[str, MatchResult], ) -> None: """ Assigns `target_dtype` to `node`. If `node` is a general tensor shape op (see GeneralTensorShapeOpQuantizeHandler in quantization_patterns.py for more details) also call this function recursively on the first argument, to propagate the dtype to the caller. """ node_name_to_target_dtype[node.name]["input_activation_dtype"] = target_dtype node_name_to_target_dtype[node.name]["output_activation_dtype"] = target_dtype # if this is a copy node, propagate to first arg root_node, matched_nodes, pattern, qhandler, qconfig = matches.get( node.name, (None, None, None, None, None)) if qhandler is not None and qhandler.is_general_tensor_shape_op(): prev_node = node.args[0] if isinstance(prev_node, Node): maybe_propagate_dtype_for_node( prev_node, target_dtype, node_name_to_target_dtype, matches)
Python
def propagate_dtypes_for_known_nodes( graph: Graph, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], matches: Dict[str, MatchResult], ) -> None: """ Currently we assume that inputs to the graph are either `torch.float` or `torch.quint8`, which is not always correct. For ops such as `x.masked_fill(mask, value)`, we know that the dtype of `mask` is a `BoolTensor`. Propagate this information throughout the graph. Note: not all dtypes in the graph will be correct after this pass, but a higher percentage of them will be correct. Hopefully in the future we can replace this with a better way to reason about dtypes of tensors. """ for node in graph.nodes: bool_arg_idxs = node_bool_tensor_arg_indexes(node) for bool_arg_idx in bool_arg_idxs: cur_node = node.args[bool_arg_idx] maybe_propagate_dtype_for_node( cur_node, torch.bool, node_name_to_target_dtype, matches)
def propagate_dtypes_for_known_nodes( graph: Graph, node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]], matches: Dict[str, MatchResult], ) -> None: """ Currently we assume that inputs to the graph are either `torch.float` or `torch.quint8`, which is not always correct. For ops such as `x.masked_fill(mask, value)`, we know that the dtype of `mask` is a `BoolTensor`. Propagate this information throughout the graph. Note: not all dtypes in the graph will be correct after this pass, but a higher percentage of them will be correct. Hopefully in the future we can replace this with a better way to reason about dtypes of tensors. """ for node in graph.nodes: bool_arg_idxs = node_bool_tensor_arg_indexes(node) for bool_arg_idx in bool_arg_idxs: cur_node = node.args[bool_arg_idx] maybe_propagate_dtype_for_node( cur_node, torch.bool, node_name_to_target_dtype, matches)
Python
def insert_observers_for_model( model: GraphModule, modules: Dict[str, torch.nn.Module], matches: Dict[str, MatchResult], qconfig_map: Dict[str, QConfigAny], graph: Graph, prepare_custom_config_dict: Dict[str, Any], equalization_config_map: Dict[str, Any], input_quantized_idxs: List[int], output_quantized_idxs: List[int], backend_config_dict: Optional[Dict[str, Any]], observed_node_names: Set[str], ) -> Optional[Node]: """ Inserts observers, using the following high level algorithm: For each node in the graph: 1. determine the target dtype of this node in the quantized graph, and save it for future steps 2. determine the target dtype or all args and kwargs of this node 3. if any arg or kwarg's target dtype does not match the current node's dtype, insert an observer 4. if the current node needs an output observer, insert it For example: - starting graph: x0 -> linear -> x1 - observed graph after processing x0: x0(fp32) - observed graph after processing linear: x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) - observed graph after processing x1: x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) -> x1 After a node is processed, the naive observer placement is guaranteed to be complete for that node and all of its predecessors. There can be future passes which optimize the graph by deduplicating observers, etc. """ # name of Node in original FX Graph to the target dtype information # that's derived from qconfig for the Node, for example, if we have # a conv2d node that has a qconfig # { # # information for input and bias node omitted # # for getattr node # # weight = getattr(self, 'weight') # 'weight': { # 'output_activation_dtype': torch.float, # } # # for conv2d node # # conv2d = call_function[target=torch.nn.functional.conv2d]( # # args=(input, weight, bias)) # 'conv2d': { # 'input_activation_dtype': torch.quint8, # 'weight_dtype': torch.qint8, # 'bias_dtype': torch.float, # 'output_activation_dtype': torch.quint8, # } # } # # TODO: rename this to node_name_to_target_dtype_info node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]] = defaultdict(dict) cache_for_no_tensor_check: Dict[Node, bool] = dict() inputs_seen_counter = 0 outputs_seen_counter = 0 results_node = None # first, populate the dtype map based only on qconfig and qhandler # this assumes: # graph inputs are fp32 by default, and int8 where overriden # other nodes output dtype is specified by the qconfig modules = dict(model.named_modules(remove_duplicate=False)) for node in model.graph.nodes: root_node, matched_nodes, pattern, qhandler, qconfig = matches.get( node.name, (None, None, None, None, None)) node_name_to_target_dtype[node.name] = get_target_activation_dtype_for_node( node, qconfig, inputs_seen_counter, outputs_seen_counter, input_quantized_idxs, output_quantized_idxs, qhandler, modules, cache_for_no_tensor_check) # Second, for nodes with known input dtypes, propagate them throughout the # graph. For example, if there is a call such as # x1 = x0.masked_fill(mask, 1) # we propagate the type of mask to be torch.bool propagate_dtypes_for_known_nodes( model.graph, node_name_to_target_dtype, matches) # After this point, the current node and all of its arguments # have a dtype assigned. Now, we insert observers for inputs # of this node (if needed for this node), and the output of this node # (if needed for this node). # Since we are mutating the graph as we go, we iterate over the original # nodes before observer insertion, instead of model.graph.nodes. nodes_before_observation = list(model.graph.nodes) for node in nodes_before_observation: if node.op == 'placeholder': # if a graph input is in fp32, it does not need observation # if a graph input is in int8, we assume the observation happens # outside of the graph, and no additional observation is needed pass elif node.op in ('call_module', 'call_method', 'call_function', 'output'): # check for matches root_node, matched_nodes, pattern, qhandler, qconfig = matches.get( node.name, (None, None, None, None, None)) equalization_qconfig = equalization_config_map.get(node.name, None) this_node_dtype = node_name_to_target_dtype[node.name] output_not_a_tensor = this_node_dtype is None # TODO(future PR): consider stopping matching getitem is_getitem = node.op == 'call_function' and \ node.target == operator.getitem skip_inserting_observers = ( (qconfig is None) or output_not_a_tensor or is_getitem ) and ( not node.op == 'output' ) is_supported_by_backend = is_pattern_dtype_config_supported_by_backend( pattern, matched_nodes, node_name_to_target_dtype, backend_config_dict) if not skip_inserting_observers and is_supported_by_backend: modules = dict(model.named_modules(remove_duplicate=False)) if node.op != 'output': assert matched_nodes is not None # add matched nodes to the observed node name set for n in matched_nodes: observed_node_names.add(n.name) # This is currently only used for equalization. # Checks if the current node is in a branch in which the two # first layers are both being quantized. # # ex. conv2 # / # x -> conv1 # # If this is the case, we will not apply equalization to the # initial two layers. is_quantized_branch = False if ( len(node.args) > 0 and isinstance(node.args[0], Node) and len(node.args[0].users) > 1 ): for user in node.args[0].users: # Checks if there exists another user being quantized is_user_quantized = ( qconfig_map.get(user.name, None) is not None or (user.op == 'call_module' and isinstance(modules[str(user.target)], ObserverBase)) ) if user != node and is_user_quantized: is_quantized_branch = True # this modifies node inplace maybe_insert_input_observers_for_node( node, qconfig, model, modules, graph, node_name_to_target_dtype, qhandler, prepare_custom_config_dict) # Insert equalization input observers if needed maybe_insert_input_equalization_observers_for_node( node, equalization_qconfig, model, modules, graph, node_name_to_target_dtype, is_quantized_branch) is_last_node_of_pattern = root_node is node is_general_tensor_value_op = \ (qhandler is not None and qhandler.is_general_tensor_value_op()) is_general_tensor_shape_op = \ (qhandler is not None and qhandler.is_general_tensor_shape_op()) is_reuse_input_qconfig_ = is_reuse_input_qconfig(qconfig) if is_last_node_of_pattern: # this returns the new observer node if it was needed maybe_output_obs_node = maybe_insert_output_observer_for_node( node, model, modules, graph, matches, node_name_to_target_dtype, pattern, qhandler) if maybe_output_obs_node is not None: # Update users of original node to use the output observer # instead. For example, change # # next_node # / # cur_node -> obs # # to # # next_node # / # cur_node -> obs # # We need to save orig users before updating uses because # the list of users will change as we update uses orig_users = list(node.users.keys()) for user_node in orig_users: if user_node is maybe_output_obs_node: continue user_node.replace_input_with(node, maybe_output_obs_node) # for general tensor value ops, we modify the graph # to make all inputs and outputs use the first input's # observer if is_general_tensor_value_op or is_general_tensor_shape_op or is_reuse_input_qconfig_: if not maybe_make_input_output_share_observers(node, model, modules): remove_output_observer(node, model, modules) if isinstance(qhandler, CustomModuleQuantizeHandler): swap_custom_module_to_observed(node, qconfig, modules, prepare_custom_config_dict) else: # output maybe_insert_observers_before_graph_output( node, output_quantized_idxs, node_name_to_target_dtype, qconfig_map, model, modules, graph) # # After this point, the current node has input and output observers # that it needs for itself inserted. # # increment the counters, so future inputs and outputs are assigned # correct dtypes if node.op == 'placeholder': inputs_seen_counter += 1 elif node.op == 'output': outputs_seen_counter += 1 results_node = node return results_node
def insert_observers_for_model( model: GraphModule, modules: Dict[str, torch.nn.Module], matches: Dict[str, MatchResult], qconfig_map: Dict[str, QConfigAny], graph: Graph, prepare_custom_config_dict: Dict[str, Any], equalization_config_map: Dict[str, Any], input_quantized_idxs: List[int], output_quantized_idxs: List[int], backend_config_dict: Optional[Dict[str, Any]], observed_node_names: Set[str], ) -> Optional[Node]: """ Inserts observers, using the following high level algorithm: For each node in the graph: 1. determine the target dtype of this node in the quantized graph, and save it for future steps 2. determine the target dtype or all args and kwargs of this node 3. if any arg or kwarg's target dtype does not match the current node's dtype, insert an observer 4. if the current node needs an output observer, insert it For example: - starting graph: x0 -> linear -> x1 - observed graph after processing x0: x0(fp32) - observed graph after processing linear: x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) - observed graph after processing x1: x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) -> x1 After a node is processed, the naive observer placement is guaranteed to be complete for that node and all of its predecessors. There can be future passes which optimize the graph by deduplicating observers, etc. """ # name of Node in original FX Graph to the target dtype information # that's derived from qconfig for the Node, for example, if we have # a conv2d node that has a qconfig # { # # information for input and bias node omitted # # for getattr node # # weight = getattr(self, 'weight') # 'weight': { # 'output_activation_dtype': torch.float, # } # # for conv2d node # # conv2d = call_function[target=torch.nn.functional.conv2d]( # # args=(input, weight, bias)) # 'conv2d': { # 'input_activation_dtype': torch.quint8, # 'weight_dtype': torch.qint8, # 'bias_dtype': torch.float, # 'output_activation_dtype': torch.quint8, # } # } # # TODO: rename this to node_name_to_target_dtype_info node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]] = defaultdict(dict) cache_for_no_tensor_check: Dict[Node, bool] = dict() inputs_seen_counter = 0 outputs_seen_counter = 0 results_node = None # first, populate the dtype map based only on qconfig and qhandler # this assumes: # graph inputs are fp32 by default, and int8 where overriden # other nodes output dtype is specified by the qconfig modules = dict(model.named_modules(remove_duplicate=False)) for node in model.graph.nodes: root_node, matched_nodes, pattern, qhandler, qconfig = matches.get( node.name, (None, None, None, None, None)) node_name_to_target_dtype[node.name] = get_target_activation_dtype_for_node( node, qconfig, inputs_seen_counter, outputs_seen_counter, input_quantized_idxs, output_quantized_idxs, qhandler, modules, cache_for_no_tensor_check) # Second, for nodes with known input dtypes, propagate them throughout the # graph. For example, if there is a call such as # x1 = x0.masked_fill(mask, 1) # we propagate the type of mask to be torch.bool propagate_dtypes_for_known_nodes( model.graph, node_name_to_target_dtype, matches) # After this point, the current node and all of its arguments # have a dtype assigned. Now, we insert observers for inputs # of this node (if needed for this node), and the output of this node # (if needed for this node). # Since we are mutating the graph as we go, we iterate over the original # nodes before observer insertion, instead of model.graph.nodes. nodes_before_observation = list(model.graph.nodes) for node in nodes_before_observation: if node.op == 'placeholder': # if a graph input is in fp32, it does not need observation # if a graph input is in int8, we assume the observation happens # outside of the graph, and no additional observation is needed pass elif node.op in ('call_module', 'call_method', 'call_function', 'output'): # check for matches root_node, matched_nodes, pattern, qhandler, qconfig = matches.get( node.name, (None, None, None, None, None)) equalization_qconfig = equalization_config_map.get(node.name, None) this_node_dtype = node_name_to_target_dtype[node.name] output_not_a_tensor = this_node_dtype is None # TODO(future PR): consider stopping matching getitem is_getitem = node.op == 'call_function' and \ node.target == operator.getitem skip_inserting_observers = ( (qconfig is None) or output_not_a_tensor or is_getitem ) and ( not node.op == 'output' ) is_supported_by_backend = is_pattern_dtype_config_supported_by_backend( pattern, matched_nodes, node_name_to_target_dtype, backend_config_dict) if not skip_inserting_observers and is_supported_by_backend: modules = dict(model.named_modules(remove_duplicate=False)) if node.op != 'output': assert matched_nodes is not None # add matched nodes to the observed node name set for n in matched_nodes: observed_node_names.add(n.name) # This is currently only used for equalization. # Checks if the current node is in a branch in which the two # first layers are both being quantized. # # ex. conv2 # / # x -> conv1 # # If this is the case, we will not apply equalization to the # initial two layers. is_quantized_branch = False if ( len(node.args) > 0 and isinstance(node.args[0], Node) and len(node.args[0].users) > 1 ): for user in node.args[0].users: # Checks if there exists another user being quantized is_user_quantized = ( qconfig_map.get(user.name, None) is not None or (user.op == 'call_module' and isinstance(modules[str(user.target)], ObserverBase)) ) if user != node and is_user_quantized: is_quantized_branch = True # this modifies node inplace maybe_insert_input_observers_for_node( node, qconfig, model, modules, graph, node_name_to_target_dtype, qhandler, prepare_custom_config_dict) # Insert equalization input observers if needed maybe_insert_input_equalization_observers_for_node( node, equalization_qconfig, model, modules, graph, node_name_to_target_dtype, is_quantized_branch) is_last_node_of_pattern = root_node is node is_general_tensor_value_op = \ (qhandler is not None and qhandler.is_general_tensor_value_op()) is_general_tensor_shape_op = \ (qhandler is not None and qhandler.is_general_tensor_shape_op()) is_reuse_input_qconfig_ = is_reuse_input_qconfig(qconfig) if is_last_node_of_pattern: # this returns the new observer node if it was needed maybe_output_obs_node = maybe_insert_output_observer_for_node( node, model, modules, graph, matches, node_name_to_target_dtype, pattern, qhandler) if maybe_output_obs_node is not None: # Update users of original node to use the output observer # instead. For example, change # # next_node # / # cur_node -> obs # # to # # next_node # / # cur_node -> obs # # We need to save orig users before updating uses because # the list of users will change as we update uses orig_users = list(node.users.keys()) for user_node in orig_users: if user_node is maybe_output_obs_node: continue user_node.replace_input_with(node, maybe_output_obs_node) # for general tensor value ops, we modify the graph # to make all inputs and outputs use the first input's # observer if is_general_tensor_value_op or is_general_tensor_shape_op or is_reuse_input_qconfig_: if not maybe_make_input_output_share_observers(node, model, modules): remove_output_observer(node, model, modules) if isinstance(qhandler, CustomModuleQuantizeHandler): swap_custom_module_to_observed(node, qconfig, modules, prepare_custom_config_dict) else: # output maybe_insert_observers_before_graph_output( node, output_quantized_idxs, node_name_to_target_dtype, qconfig_map, model, modules, graph) # # After this point, the current node has input and output observers # that it needs for itself inserted. # # increment the counters, so future inputs and outputs are assigned # correct dtypes if node.op == 'placeholder': inputs_seen_counter += 1 elif node.op == 'output': outputs_seen_counter += 1 results_node = node return results_node
Python
def prepare( model: GraphModule, qconfig_dict: Any, node_name_to_scope: Dict[str, Tuple[str, type]], prepare_custom_config_dict: Optional[Dict[str, Any]] = None, equalization_qconfig_dict: Optional[Dict[str, Any]] = None, backend_config_dict: Optional[Dict[str, Any]] = None, is_standalone_module: bool = False) -> ObservedGraphModule: """ standalone_module means it a submodule that is not inlined in parent module, and will be quantized separately as one unit. How the standalone module is observed is specified by `input_quantized_idxs` and `output_quantized_idxs` in the prepare_custom_config for the standalone module Args: node_name_to_scope: mapping from node name to the scope of the module which contains the node. The scope is a tuple of fully qualified path of the module and the type of the module Returns: model(GraphModule): prepared standalone module attributes: _standalone_module_input_quantized_idxs(List[Int]): a list of indexes for the graph input that is expected to be quantized, same as input_quantized_idxs configuration provided for the standalone module _standalone_module_output_quantized_idxs(List[Int]): a list of indexs for the graph output that is quantized same as input_quantized_idxs configuration provided for the standalone module """ if prepare_custom_config_dict is None: prepare_custom_config_dict = {} if equalization_qconfig_dict is None: equalization_qconfig_dict = {} additional_quant_patterns = \ prepare_custom_config_dict.get("additional_quant_pattern", {}) # mapping from a tuple of nodes in reverse order to uninitialized # QuantizeHandler subclass. For example, # { # # match a single node # (<class 'torch.nn.modules.conv.Conv3d'>: # <class 'torch.ao.quantization.fx.quantize.ConvRelu'>), # # match multiple nodes in reverse order # ((<function relu at 0x7f766a7360d0>, <built-in function add>): # <class 'torch.ao.quantization.fx.quantize.Add'>), # } patterns: Dict[Pattern, QuantizeHandler] = {} if backend_config_dict is None: quant_patterns = get_default_quant_patterns() patterns = get_combined_dict( quant_patterns, additional_quant_patterns) else: patterns = get_pattern_to_quantize_handlers(backend_config_dict) # TODO: make WEIGHT_INDEX_DICT and BIAS_INDEX_DICT an argument to the functions that needs them # TODO: refactor this part to return WEIGHT_INDEX_DICT and BIAS_INDEX_DICT pattern_to_input_type_to_index = get_pattern_to_input_type_to_index(backend_config_dict) for pattern, input_type_to_index in pattern_to_input_type_to_index.items(): for input_type, index in input_type_to_index.items(): index_dicts = { "weight": WEIGHT_INDEX_DICT, "bias": BIAS_INDEX_DICT, "input": {} # not used right now } assert input_type in index_dicts.keys(), \ f"input type must be one of {index_dicts.keys()} but got: {input_type}" index_dict = index_dicts[input_type] if pattern in index_dict: # type: ignore[operator] index_dict[pattern].append(index) # type: ignore[index] else: index_dict[pattern] = [index] # type: ignore[index] convert_dict_to_ordered_dict(qconfig_dict) convert_dict_to_ordered_dict(equalization_qconfig_dict) flattened_qconfig_dict = get_flattened_qconfig_dict(qconfig_dict) # TODO: support regex as well propagate_qconfig_(model, flattened_qconfig_dict) if model.training: additional_qat_module_mapping = prepare_custom_config_dict.get( "additional_qat_module_mapping", {}) qat_swap_modules(model, additional_qat_module_mapping) qconfig_dict = update_qconfig_for_qat(qconfig_dict, additional_qat_module_mapping) qconfig_dict = update_qconfig_for_fusion(model, qconfig_dict) equalization_qconfig_dict = update_qconfig_for_fusion(model, equalization_qconfig_dict) # mapping from fully qualified module name to module instance # for example, # { # '': Model(...), # 'linear': Linear(...), # 'linear.weight_fake_quant': PerChannelMinMaxObserver(...), # } modules = dict(model.named_modules()) # fill qconfig_map, a map from node name to qconfig, used in find_matches equalization_qconfig_map = generate_qconfig_map(model, modules, model.graph, equalization_qconfig_dict, node_name_to_scope) qconfig_map = generate_qconfig_map(model, modules, model.graph, qconfig_dict, node_name_to_scope) # match the patterns that will get quantized standalone_module_name_configs = prepare_custom_config_dict.get( "standalone_module_name", []) standalone_module_class_configs = prepare_custom_config_dict.get( "standalone_module_class", []) standalone_module_names = [config[0] for config in standalone_module_name_configs] standalone_module_classes = [config[0] for config in standalone_module_class_configs] custom_module_classes = get_custom_module_class_keys( prepare_custom_config_dict, "float_to_observed_custom_module_class") matches = find_matches( model.graph, modules, patterns, qconfig_map, standalone_module_names, standalone_module_classes, custom_module_classes) input_quantized_idxs: List[int] = prepare_custom_config_dict.get( "input_quantized_idxs", []) output_quantized_idxs: List[int] = prepare_custom_config_dict.get( "output_quantized_idxs", []) run_prepare_fx_on_standalone_modules( model, modules, matches, prepare_custom_config_dict) # record names for the set of observed node, so that in convert step # we know whether we need to convert a floating point module to reference # quantized module or not observed_node_names: Set[str] = set() result_node = insert_observers_for_model( model, modules, matches, qconfig_map, model.graph, prepare_custom_config_dict, equalization_qconfig_map, input_quantized_idxs, output_quantized_idxs, backend_config_dict, observed_node_names) save_state(model, qconfig_map, node_name_to_scope, patterns, prepare_custom_config_dict, equalization_qconfig_map, qconfig_dict, model.training, observed_node_names) preserved_attributes = set(prepare_custom_config_dict.get("preserved_attributes", [])) model = ObservedGraphModule(model, model.graph, preserved_attributes) if is_standalone_module: assert result_node is not None assert isinstance(result_node.args[0], Node), \ "standalone module only supports returning simple value currently"\ "(not tuple, dict etc.)" # these inputs are observed in parent # converting List[int] to Tensor since module attribute is # Union[Tensor, Module] model._standalone_module_input_quantized_idxs = \ torch.tensor(input_quantized_idxs) model._standalone_module_output_quantized_idxs = torch.tensor(output_quantized_idxs) return model
def prepare( model: GraphModule, qconfig_dict: Any, node_name_to_scope: Dict[str, Tuple[str, type]], prepare_custom_config_dict: Optional[Dict[str, Any]] = None, equalization_qconfig_dict: Optional[Dict[str, Any]] = None, backend_config_dict: Optional[Dict[str, Any]] = None, is_standalone_module: bool = False) -> ObservedGraphModule: """ standalone_module means it a submodule that is not inlined in parent module, and will be quantized separately as one unit. How the standalone module is observed is specified by `input_quantized_idxs` and `output_quantized_idxs` in the prepare_custom_config for the standalone module Args: node_name_to_scope: mapping from node name to the scope of the module which contains the node. The scope is a tuple of fully qualified path of the module and the type of the module Returns: model(GraphModule): prepared standalone module attributes: _standalone_module_input_quantized_idxs(List[Int]): a list of indexes for the graph input that is expected to be quantized, same as input_quantized_idxs configuration provided for the standalone module _standalone_module_output_quantized_idxs(List[Int]): a list of indexs for the graph output that is quantized same as input_quantized_idxs configuration provided for the standalone module """ if prepare_custom_config_dict is None: prepare_custom_config_dict = {} if equalization_qconfig_dict is None: equalization_qconfig_dict = {} additional_quant_patterns = \ prepare_custom_config_dict.get("additional_quant_pattern", {}) # mapping from a tuple of nodes in reverse order to uninitialized # QuantizeHandler subclass. For example, # { # # match a single node # (<class 'torch.nn.modules.conv.Conv3d'>: # <class 'torch.ao.quantization.fx.quantize.ConvRelu'>), # # match multiple nodes in reverse order # ((<function relu at 0x7f766a7360d0>, <built-in function add>): # <class 'torch.ao.quantization.fx.quantize.Add'>), # } patterns: Dict[Pattern, QuantizeHandler] = {} if backend_config_dict is None: quant_patterns = get_default_quant_patterns() patterns = get_combined_dict( quant_patterns, additional_quant_patterns) else: patterns = get_pattern_to_quantize_handlers(backend_config_dict) # TODO: make WEIGHT_INDEX_DICT and BIAS_INDEX_DICT an argument to the functions that needs them # TODO: refactor this part to return WEIGHT_INDEX_DICT and BIAS_INDEX_DICT pattern_to_input_type_to_index = get_pattern_to_input_type_to_index(backend_config_dict) for pattern, input_type_to_index in pattern_to_input_type_to_index.items(): for input_type, index in input_type_to_index.items(): index_dicts = { "weight": WEIGHT_INDEX_DICT, "bias": BIAS_INDEX_DICT, "input": {} # not used right now } assert input_type in index_dicts.keys(), \ f"input type must be one of {index_dicts.keys()} but got: {input_type}" index_dict = index_dicts[input_type] if pattern in index_dict: # type: ignore[operator] index_dict[pattern].append(index) # type: ignore[index] else: index_dict[pattern] = [index] # type: ignore[index] convert_dict_to_ordered_dict(qconfig_dict) convert_dict_to_ordered_dict(equalization_qconfig_dict) flattened_qconfig_dict = get_flattened_qconfig_dict(qconfig_dict) # TODO: support regex as well propagate_qconfig_(model, flattened_qconfig_dict) if model.training: additional_qat_module_mapping = prepare_custom_config_dict.get( "additional_qat_module_mapping", {}) qat_swap_modules(model, additional_qat_module_mapping) qconfig_dict = update_qconfig_for_qat(qconfig_dict, additional_qat_module_mapping) qconfig_dict = update_qconfig_for_fusion(model, qconfig_dict) equalization_qconfig_dict = update_qconfig_for_fusion(model, equalization_qconfig_dict) # mapping from fully qualified module name to module instance # for example, # { # '': Model(...), # 'linear': Linear(...), # 'linear.weight_fake_quant': PerChannelMinMaxObserver(...), # } modules = dict(model.named_modules()) # fill qconfig_map, a map from node name to qconfig, used in find_matches equalization_qconfig_map = generate_qconfig_map(model, modules, model.graph, equalization_qconfig_dict, node_name_to_scope) qconfig_map = generate_qconfig_map(model, modules, model.graph, qconfig_dict, node_name_to_scope) # match the patterns that will get quantized standalone_module_name_configs = prepare_custom_config_dict.get( "standalone_module_name", []) standalone_module_class_configs = prepare_custom_config_dict.get( "standalone_module_class", []) standalone_module_names = [config[0] for config in standalone_module_name_configs] standalone_module_classes = [config[0] for config in standalone_module_class_configs] custom_module_classes = get_custom_module_class_keys( prepare_custom_config_dict, "float_to_observed_custom_module_class") matches = find_matches( model.graph, modules, patterns, qconfig_map, standalone_module_names, standalone_module_classes, custom_module_classes) input_quantized_idxs: List[int] = prepare_custom_config_dict.get( "input_quantized_idxs", []) output_quantized_idxs: List[int] = prepare_custom_config_dict.get( "output_quantized_idxs", []) run_prepare_fx_on_standalone_modules( model, modules, matches, prepare_custom_config_dict) # record names for the set of observed node, so that in convert step # we know whether we need to convert a floating point module to reference # quantized module or not observed_node_names: Set[str] = set() result_node = insert_observers_for_model( model, modules, matches, qconfig_map, model.graph, prepare_custom_config_dict, equalization_qconfig_map, input_quantized_idxs, output_quantized_idxs, backend_config_dict, observed_node_names) save_state(model, qconfig_map, node_name_to_scope, patterns, prepare_custom_config_dict, equalization_qconfig_map, qconfig_dict, model.training, observed_node_names) preserved_attributes = set(prepare_custom_config_dict.get("preserved_attributes", [])) model = ObservedGraphModule(model, model.graph, preserved_attributes) if is_standalone_module: assert result_node is not None assert isinstance(result_node.args[0], Node), \ "standalone module only supports returning simple value currently"\ "(not tuple, dict etc.)" # these inputs are observed in parent # converting List[int] to Tensor since module attribute is # Union[Tensor, Module] model._standalone_module_input_quantized_idxs = \ torch.tensor(input_quantized_idxs) model._standalone_module_output_quantized_idxs = torch.tensor(output_quantized_idxs) return model
Python
def has_dynamic_shape(shape: Shape) -> bool: """ Determine if the given shape has dynamic dim. i.e. if there're -1 in shape. Args: shape (Shape): Shape of a tensor. Essentially is a sequence of integers. Returns: A boolean value indicates whether there's dynamic dim in the shape. """ return any(s == -1 for s in shape)
def has_dynamic_shape(shape: Shape) -> bool: """ Determine if the given shape has dynamic dim. i.e. if there're -1 in shape. Args: shape (Shape): Shape of a tensor. Essentially is a sequence of integers. Returns: A boolean value indicates whether there's dynamic dim in the shape. """ return any(s == -1 for s in shape)