language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def _ensureNoNewerConfigs(fname, checkpointIdx, checkpoints, configurations, overwrite): """! Check if there are configurations or checkpoints with indices greater than checkpointIdx. If so and `overwrite==True`, erase them. """ latestCheckpoint = checkpoints[-1] if latestCheckpoint > checkpointIdx: message = f"Output file {fname} contains checkpoints with greater index than HMC starting point.\n" \ f" Greatest index is {latestCheckpoint}, start index is {checkpointIdx}." if not overwrite: getLogger(__name__).error(message) raise RuntimeError("HMC start index is not latest") else: getLogger(__name__).warning(message+"\n Overwriting") _removeGreaterThan(fname, "checkpoints", checkpointIdx) latestConfig = configurations[-1] if latestConfig > checkpointIdx: message = f"Output file {fname} contains configurations with greater index than HMC starting point.\n" \ f" Greatest index is {latestConfig}, start index is {checkpointIdx}." if not overwrite: getLogger(__name__).error(message) raise RuntimeError("HMC start index is not latest") else: getLogger(__name__).warning(message+"\n Overwriting") _removeGreaterThan(fname, "configuration", checkpointIdx)
def _ensureNoNewerConfigs(fname, checkpointIdx, checkpoints, configurations, overwrite): """! Check if there are configurations or checkpoints with indices greater than checkpointIdx. If so and `overwrite==True`, erase them. """ latestCheckpoint = checkpoints[-1] if latestCheckpoint > checkpointIdx: message = f"Output file {fname} contains checkpoints with greater index than HMC starting point.\n" \ f" Greatest index is {latestCheckpoint}, start index is {checkpointIdx}." if not overwrite: getLogger(__name__).error(message) raise RuntimeError("HMC start index is not latest") else: getLogger(__name__).warning(message+"\n Overwriting") _removeGreaterThan(fname, "checkpoints", checkpointIdx) latestConfig = configurations[-1] if latestConfig > checkpointIdx: message = f"Output file {fname} contains configurations with greater index than HMC starting point.\n" \ f" Greatest index is {latestConfig}, start index is {checkpointIdx}." if not overwrite: getLogger(__name__).error(message) raise RuntimeError("HMC start index is not latest") else: getLogger(__name__).warning(message+"\n Overwriting") _removeGreaterThan(fname, "configuration", checkpointIdx)
Python
def _removeGreaterThan(fname, groupPath, maxIdx): """! Remove all elements under groupPath in file that are greater then maxidx. """ with h5.File(str(fname), "a") as h5f: grp = h5f[groupPath] for idx in grp.keys(): if int(idx) > maxIdx: del grp[idx]
def _removeGreaterThan(fname, groupPath, maxIdx): """! Remove all elements under groupPath in file that are greater then maxidx. """ with h5.File(str(fname), "a") as h5f: grp = h5f[groupPath] for idx in grp.keys(): if int(idx) > maxIdx: del grp[idx]
Python
def _loadCheckpoint(fname, startIdx, checkpoints, evManager, action, lattice): """! Load a checkpoint from file allowing for negative indices. """ if startIdx < 0: startIdx = checkpoints[-1] + (startIdx+1) # +1 so that startIdx=-1 gives last point if startIdx < 0 or startIdx > checkpoints[-1]: getLogger(__name__).error("Start index for HMC continuation is out of range: %d", startIdx) raise ValueError("Start index out of range") if startIdx not in checkpoints: getLogger(__name__).error("There is no checkpoint matching the given start index: %d", startIdx) raise ValueError("No checkpoint matching start index") with h5.File(str(fname), "r") as h5f: rng, cfgGrp, evolver = fileio.h5.loadCheckpoint(h5f["checkpoint"], startIdx, evManager, action, lattice) stage = EvolutionStage.fromH5(cfgGrp) return startIdx, rng, stage, evolver
def _loadCheckpoint(fname, startIdx, checkpoints, evManager, action, lattice): """! Load a checkpoint from file allowing for negative indices. """ if startIdx < 0: startIdx = checkpoints[-1] + (startIdx+1) # +1 so that startIdx=-1 gives last point if startIdx < 0 or startIdx > checkpoints[-1]: getLogger(__name__).error("Start index for HMC continuation is out of range: %d", startIdx) raise ValueError("Start index out of range") if startIdx not in checkpoints: getLogger(__name__).error("There is no checkpoint matching the given start index: %d", startIdx) raise ValueError("No checkpoint matching start index") with h5.File(str(fname), "r") as h5f: rng, cfgGrp, evolver = fileio.h5.loadCheckpoint(h5f["checkpoint"], startIdx, evManager, action, lattice) stage = EvolutionStage.fromH5(cfgGrp) return startIdx, rng, stage, evolver
Python
def _iterTrajectories(ntr, maxNtr): """! Iterator for production. Either iterate ntr times or infinitely long if ntr is None. Shows a progressbar in both cases. """ if ntr is not None: yield from cli.progressRange(ntr, message="HMC evolution", updateRate=max(ntr//100, 1)) else: with cli.trackProgress(ntr, message="HMC evolution", updateRate=1) as pbar: count = 0 while True: yield count count += 1 if maxNtr is not None and count >= maxNtr: getLogger(__name__).warning("Reached maximum number of trajectories for " "'unbounded' evolution.") return pbar.advance()
def _iterTrajectories(ntr, maxNtr): """! Iterator for production. Either iterate ntr times or infinitely long if ntr is None. Shows a progressbar in both cases. """ if ntr is not None: yield from cli.progressRange(ntr, message="HMC evolution", updateRate=max(ntr//100, 1)) else: with cli.trackProgress(ntr, message="HMC evolution", updateRate=1) as pbar: count = 0 while True: yield count count += 1 if maxNtr is not None and count >= maxNtr: getLogger(__name__).warning("Reached maximum number of trajectories for " "'unbounded' evolution.") return pbar.advance()
Python
def selectTrajPoint(self, energy0, energy1): r"""! Select a trajectory point using Metropolis accept/reject. \param energy0 Energy at point 0 including the artificial kinetic term . \param energy1 Energy at point 1 including the artificial kinetic term. \return `0` if `energy0` was selected, `1` otherwise. """ deltaE = np.real(energy1 - energy0) return 1 if deltaE < 0 or np.exp(-deltaE) > self.rng.uniform(0, 1) \ else 0
def selectTrajPoint(self, energy0, energy1): r"""! Select a trajectory point using Metropolis accept/reject. \param energy0 Energy at point 0 including the artificial kinetic term . \param energy1 Energy at point 1 including the artificial kinetic term. \return `0` if `energy0` was selected, `1` otherwise. """ deltaE = np.real(energy1 - energy0) return 1 if deltaE < 0 or np.exp(-deltaE) > self.rng.uniform(0, 1) \ else 0
Python
def selectTrajectory(self, energy0, data0, energy1, data1): r"""! Select a trajectory point and pass along extra data. \param energy0 Energy at point 0 including the artificial kinetic term . \param data0 Arbitrary data assiciated with point 0. \param energy1 Energy at point 1 including the artificial kinetic term. \param data1 Arbitrary data assiciated with point 1. \return `(energy0, data0, 0)` if `energy0` was selected, otherwise `(energy1, data1, 1)`. """ return (energy1, data1, 1) if self.selectTrajPoint(energy0, energy1) == 1 \ else (energy0, data0, 0)
def selectTrajectory(self, energy0, data0, energy1, data1): r"""! Select a trajectory point and pass along extra data. \param energy0 Energy at point 0 including the artificial kinetic term . \param data0 Arbitrary data assiciated with point 0. \param energy1 Energy at point 1 including the artificial kinetic term. \param data1 Arbitrary data assiciated with point 1. \return `(energy0, data0, 0)` if `energy0` was selected, otherwise `(energy1, data1, 1)`. """ return (energy1, data1, 1) if self.selectTrajPoint(energy0, energy1) == 1 \ else (energy0, data0, 0)
Python
def mock_program_to_qir(num_qubits: int, input_file: str) -> str: """ Parses a Mock program and generates QIR based on the syntax tree. Usually the language-specific compiler would fully validate and potentially optimize the program before QIR is generated, but for illustration purposes we omit that from this example. :param num_qubits: The total number of qubits used in the program. :param input_file: Path of the file containing the Mock program. """ lexer = MockLanguageLexer(FileStream(input_file)) stream = CommonTokenStream(lexer) parser = MockLanguageParser(stream) tree = parser.document() generator = QirGenerator(Path(input_file).stem, num_qubits) walker = ParseTreeWalker() walker.walk(generator, tree) return generator.ir()
def mock_program_to_qir(num_qubits: int, input_file: str) -> str: """ Parses a Mock program and generates QIR based on the syntax tree. Usually the language-specific compiler would fully validate and potentially optimize the program before QIR is generated, but for illustration purposes we omit that from this example. :param num_qubits: The total number of qubits used in the program. :param input_file: Path of the file containing the Mock program. """ lexer = MockLanguageLexer(FileStream(input_file)) stream = CommonTokenStream(lexer) parser = MockLanguageParser(stream) tree = parser.document() generator = QirGenerator(Path(input_file).stem, num_qubits) walker = ParseTreeWalker() walker.walk(generator, tree) return generator.ir()
Python
def eval(self, file_path: str, gateset: GateSet, entry_point: Optional[str] = None, result_stream: Optional[List[bool]] = None): """ JIT compiles and evaluates the QIR program, delegating quantum operations to the supplied gate set. The result stream will be read in order by the measurement instruction. Each measurement will pop a result from the beginning of the stream. If the stream runs out of results, measurement returns zero. Right now the evaluator does not have a full runtime environment and can JIT QIR produced by the pyqir-generator, but cannot use any external function calls. :param file_path: file path of existing QIR in a ll or bc file :param gateset: python GateSet based object defining the operations :param entry_point: entry point name; required if QIR contains multiple entry points :param result_stream: list of boolean result values representing the QIS measure results """ self._jit.eval(file_path, gateset, entry_point, result_stream)
def eval(self, file_path: str, gateset: GateSet, entry_point: Optional[str] = None, result_stream: Optional[List[bool]] = None): """ JIT compiles and evaluates the QIR program, delegating quantum operations to the supplied gate set. The result stream will be read in order by the measurement instruction. Each measurement will pop a result from the beginning of the stream. If the stream runs out of results, measurement returns zero. Right now the evaluator does not have a full runtime environment and can JIT QIR produced by the pyqir-generator, but cannot use any external function calls. :param file_path: file path of existing QIR in a ll or bc file :param gateset: python GateSet based object defining the operations :param entry_point: entry point name; required if QIR contains multiple entry points :param result_stream: list of boolean result values representing the QIS measure results """ self._jit.eval(file_path, gateset, entry_point, result_stream)
Python
def finish(self, metadata: dict): """ Called at the end of QIR evaluation supplying run metadata. """ pass
def finish(self, metadata: dict): """ Called at the end of QIR evaluation supplying run metadata. """ pass
Python
def verify_age_requirment(user_Data): """ Algorithm for selecting cadnidates within the desired age limits. """ if(user_Data["age"] > MAXIMUM_AGE): #print("Applicant <"+str(user_Data["name"])+"> is older than the required age limit of <"+str(MAXIMUM_AGE)+ " YEARS>") return False elif(user_Data["age"] < MINIMUM_AGE): #print("Applicant <"+str(user_Data["name"])+"> is younger than the minimum required age limit of <"+str(MINIMUM_AGE)+ " YEARS>") return False else: #print("Applicant <"+str(user_Data["name"])+"> is within the designated age") return True
def verify_age_requirment(user_Data): """ Algorithm for selecting cadnidates within the desired age limits. """ if(user_Data["age"] > MAXIMUM_AGE): #print("Applicant <"+str(user_Data["name"])+"> is older than the required age limit of <"+str(MAXIMUM_AGE)+ " YEARS>") return False elif(user_Data["age"] < MINIMUM_AGE): #print("Applicant <"+str(user_Data["name"])+"> is younger than the minimum required age limit of <"+str(MINIMUM_AGE)+ " YEARS>") return False else: #print("Applicant <"+str(user_Data["name"])+"> is within the designated age") return True
Python
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes): """ Create the layers for a fully convolutional network. Build skip-layers using the vgg layers. :param vgg_layer3_out: TF Tensor for VGG Layer 3 output :param vgg_layer4_out: TF Tensor for VGG Layer 4 output :param vgg_layer7_out: TF Tensor for VGG Layer 7 output :param num_classes: Number of classes to classify :return: The Tensor for the last layer of output """ # TODO: Implement function conv_1x1_layer7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) # Upsample upsample_layer7 = tf.layers.conv2d_transpose(conv_1x1_layer7, num_classes, 4, 2, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) conv_1x1_layer4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) # Skip connection layer4_out = tf.add(upsample_layer7, conv_1x1_layer4) # upsample upsample_layer4_out = tf.layers.conv2d_transpose(layer4_out, num_classes, 4, 2, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) conv_1x1_layer3 = conv_1x1_layer4 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) # Skip connection layer3_out = tf.add(upsample_layer4_out, conv_1x1_layer3) # Upsample nn_last_layer = tf.layers.conv2d_transpose(layer3_out, num_classes, 16, 8, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) return nn_last_layer
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes): """ Create the layers for a fully convolutional network. Build skip-layers using the vgg layers. :param vgg_layer3_out: TF Tensor for VGG Layer 3 output :param vgg_layer4_out: TF Tensor for VGG Layer 4 output :param vgg_layer7_out: TF Tensor for VGG Layer 7 output :param num_classes: Number of classes to classify :return: The Tensor for the last layer of output """ # TODO: Implement function conv_1x1_layer7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) # Upsample upsample_layer7 = tf.layers.conv2d_transpose(conv_1x1_layer7, num_classes, 4, 2, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) conv_1x1_layer4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) # Skip connection layer4_out = tf.add(upsample_layer7, conv_1x1_layer4) # upsample upsample_layer4_out = tf.layers.conv2d_transpose(layer4_out, num_classes, 4, 2, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) conv_1x1_layer3 = conv_1x1_layer4 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) # Skip connection layer3_out = tf.add(upsample_layer4_out, conv_1x1_layer3) # Upsample nn_last_layer = tf.layers.conv2d_transpose(layer3_out, num_classes, 16, 8, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) return nn_last_layer
Python
def optimize(nn_last_layer, correct_label, learning_rate, num_classes): """ Build the TensorFLow loss and optimizer operations. :param nn_last_layer: TF Tensor of the last layer in the neural network :param correct_label: TF Placeholder for the correct label image :param learning_rate: TF Placeholder for the learning rate :param num_classes: Number of classes to classify :return: Tuple of (logits, train_op, cross_entropy_loss) """ # TODO: Implement function logits = tf.reshape(nn_last_layer, (-1, num_classes)) correct_label = tf.reshape (correct_label, (-1, num_classes)) cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label)) reg_loss = tf.losses.get_regularization_losses() optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(cross_entropy_loss+tf.reduce_sum(reg_loss)) return logits, train_op, cross_entropy_loss
def optimize(nn_last_layer, correct_label, learning_rate, num_classes): """ Build the TensorFLow loss and optimizer operations. :param nn_last_layer: TF Tensor of the last layer in the neural network :param correct_label: TF Placeholder for the correct label image :param learning_rate: TF Placeholder for the learning rate :param num_classes: Number of classes to classify :return: Tuple of (logits, train_op, cross_entropy_loss) """ # TODO: Implement function logits = tf.reshape(nn_last_layer, (-1, num_classes)) correct_label = tf.reshape (correct_label, (-1, num_classes)) cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label)) reg_loss = tf.losses.get_regularization_losses() optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(cross_entropy_loss+tf.reduce_sum(reg_loss)) return logits, train_op, cross_entropy_loss
Python
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate): """ Train neural network and print out the loss during training. :param sess: TF Session :param epochs: Number of epochs :param batch_size: Batch size :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size) :param train_op: TF Operation to train the neural network :param cross_entropy_loss: TF Tensor for the amount of loss :param input_image: TF Placeholder for input images :param correct_label: TF Placeholder for label images :param keep_prob: TF Placeholder for dropout keep probability :param learning_rate: TF Placeholder for learning rate """ # TODO: Implement function sess.run(tf.global_variables_initializer()) print("Training starts...") print("##################") startTime = time.time() for i in range(epochs): print("EPOCH {} ...".format(i+1)) for image, label in get_batches_fn(batch_size): _, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label, keep_prob:0.5, learning_rate:0.00085}) print("Loss: ={:.3f}".format(loss)) print("Used time: {}".format(time.time() - startTime))
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate): """ Train neural network and print out the loss during training. :param sess: TF Session :param epochs: Number of epochs :param batch_size: Batch size :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size) :param train_op: TF Operation to train the neural network :param cross_entropy_loss: TF Tensor for the amount of loss :param input_image: TF Placeholder for input images :param correct_label: TF Placeholder for label images :param keep_prob: TF Placeholder for dropout keep probability :param learning_rate: TF Placeholder for learning rate """ # TODO: Implement function sess.run(tf.global_variables_initializer()) print("Training starts...") print("##################") startTime = time.time() for i in range(epochs): print("EPOCH {} ...".format(i+1)) for image, label in get_batches_fn(batch_size): _, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label, keep_prob:0.5, learning_rate:0.00085}) print("Loss: ={:.3f}".format(loss)) print("Used time: {}".format(time.time() - startTime))
Python
def _draw_pdp_distplot(hist_data, hist_ax, plot_params): """Data point distribution plot for numeric feature""" font_family = plot_params.get('font_family', 'Arial') color = plot_params.get('pdp_color', '#1A4E5D') dist_xticks_color = '#424242' dist_xticks_size = 10 hist_ax.plot(hist_data, [1] * len(hist_data), '|', color=color, markersize=20) _modify_legend_ax(hist_ax, font_family=font_family) hist_ax.set_title('distribution of data points', fontdict={'family': font_family, 'color': dist_xticks_color}, fontsize=dist_xticks_size)
def _draw_pdp_distplot(hist_data, hist_ax, plot_params): """Data point distribution plot for numeric feature""" font_family = plot_params.get('font_family', 'Arial') color = plot_params.get('pdp_color', '#1A4E5D') dist_xticks_color = '#424242' dist_xticks_size = 10 hist_ax.plot(hist_data, [1] * len(hist_data), '|', color=color, markersize=20) _modify_legend_ax(hist_ax, font_family=font_family) hist_ax.set_title('distribution of data points', fontdict={'family': font_family, 'color': dist_xticks_color}, fontsize=dist_xticks_size)
Python
def _pdp_plot(pdp_isolate_out, feature_name, center, plot_lines, frac_to_plot, cluster, n_cluster_centers, cluster_method, x_quantile, show_percentile, pdp_ax, count_data, count_ax, plot_params): """Internal helper function for pdp plot""" font_family = plot_params.get('font_family', 'Arial') xticks_rotation = plot_params.get('xticks_rotation', 0) feature_type = pdp_isolate_out.feature_type feature_grids = pdp_isolate_out.feature_grids display_columns = pdp_isolate_out.display_columns percentile_info = pdp_isolate_out.percentile_info percentile_xticklabels = list(percentile_info) if feature_type == 'binary' or feature_type == 'onehot' or x_quantile: # x for original pdp # anyway, pdp is started from x=0 x = range(len(feature_grids)) # xticks is for the major plot xticks = x xticklabels = list(display_columns) if count_ax is not None: # need to plot data distribution if x_quantile: count_display_columns = count_data['xticklabels'].values # number of grids = number of bins + 1 # count_x: min -> max + 1 count_x = range(count_data['x'].min(), count_data['x'].max() + 2) # instead of just x xticks = count_x if count_x[0] == -1: # xticklabels include the minimum value xticklabels = [float(count_display_columns[0].split(',')[0].replace('[', ''))] + xticklabels percentile_xticklabels = ['(0.0)'] + percentile_xticklabels if count_x[-1] == len(feature_grids): # include the maximum value xticklabels = xticklabels + [float(count_display_columns[-1].split(',')[1].replace(']', ''))] percentile_xticklabels = percentile_xticklabels + ['(100.0)'] else: # if it is not numeric feature, xticks can be ignored xticklabels = [] pdp_ax.set_xlim(xticks[0] - 0.5, xticks[-1] + 0.5) pdp_ax.set_xticks(xticks) pdp_ax.set_xticklabels(xticklabels, rotation=xticks_rotation) else: # for numeric feature when x_quantile=False # no need to set xticks x = feature_grids ice_lines = copy.deepcopy(pdp_isolate_out.ice_lines) pdp_y = copy.deepcopy(pdp_isolate_out.pdp) # default: fill between std upper and lower # don't need to highlight pdp line std_fill = True pdp_hl = False # center the plot if center: pdp_y -= pdp_y[0] for col in feature_grids[1:]: ice_lines[col] -= ice_lines[feature_grids[0]] ice_lines[feature_grids[0]] = 0 # cluster or plot lines if cluster or plot_lines: std_fill = False pdp_hl = True lines_params = {'x': x, 'feature_grids': feature_grids, 'ax': pdp_ax, 'plot_params': plot_params} if cluster: _ice_cluster_plot(ice_lines=ice_lines, n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, **lines_params) else: ice_plot_data = _sample_data(ice_lines=ice_lines, frac_to_plot=frac_to_plot) _ice_line_plot(ice_plot_data=ice_plot_data, **lines_params) # pdp std = ice_lines[feature_grids].std().values _pdp_std_plot(x=x, y=pdp_y, std=std, std_fill=std_fill, pdp_hl=pdp_hl, ax=pdp_ax, plot_params=plot_params) _axes_modify(font_family, pdp_ax) # add data distribution plot if count_ax is not None: if not x_quantile and feature_type == 'numeric': hist_data = copy.deepcopy(pdp_isolate_out.hist_data) _draw_pdp_distplot(hist_data=hist_data, hist_ax=count_ax, plot_params=plot_params) else: _draw_pdp_countplot(count_data=count_data, count_ax=count_ax, pdp_ax=pdp_ax, feature_type=feature_type, display_columns=display_columns, plot_params=plot_params) count_ax.set_xlabel(feature_name, fontsize=11, fontdict={'family': font_family}) else: pdp_ax.set_xlabel(feature_name, fontsize=11, fontdict={'family': font_family}) # show grid percentile info if show_percentile and len(percentile_info) > 0: percentile_pdp_ax = pdp_ax.twiny() percentile_pdp_ax.set_xticks(pdp_ax.get_xticks()) percentile_pdp_ax.set_xbound(pdp_ax.get_xbound()) percentile_pdp_ax.set_xticklabels(percentile_xticklabels, rotation=xticks_rotation) percentile_pdp_ax.set_xlabel('percentile info') _axes_modify(font_family=font_family, ax=percentile_pdp_ax, top=True)
def _pdp_plot(pdp_isolate_out, feature_name, center, plot_lines, frac_to_plot, cluster, n_cluster_centers, cluster_method, x_quantile, show_percentile, pdp_ax, count_data, count_ax, plot_params): """Internal helper function for pdp plot""" font_family = plot_params.get('font_family', 'Arial') xticks_rotation = plot_params.get('xticks_rotation', 0) feature_type = pdp_isolate_out.feature_type feature_grids = pdp_isolate_out.feature_grids display_columns = pdp_isolate_out.display_columns percentile_info = pdp_isolate_out.percentile_info percentile_xticklabels = list(percentile_info) if feature_type == 'binary' or feature_type == 'onehot' or x_quantile: # x for original pdp # anyway, pdp is started from x=0 x = range(len(feature_grids)) # xticks is for the major plot xticks = x xticklabels = list(display_columns) if count_ax is not None: # need to plot data distribution if x_quantile: count_display_columns = count_data['xticklabels'].values # number of grids = number of bins + 1 # count_x: min -> max + 1 count_x = range(count_data['x'].min(), count_data['x'].max() + 2) # instead of just x xticks = count_x if count_x[0] == -1: # xticklabels include the minimum value xticklabels = [float(count_display_columns[0].split(',')[0].replace('[', ''))] + xticklabels percentile_xticklabels = ['(0.0)'] + percentile_xticklabels if count_x[-1] == len(feature_grids): # include the maximum value xticklabels = xticklabels + [float(count_display_columns[-1].split(',')[1].replace(']', ''))] percentile_xticklabels = percentile_xticklabels + ['(100.0)'] else: # if it is not numeric feature, xticks can be ignored xticklabels = [] pdp_ax.set_xlim(xticks[0] - 0.5, xticks[-1] + 0.5) pdp_ax.set_xticks(xticks) pdp_ax.set_xticklabels(xticklabels, rotation=xticks_rotation) else: # for numeric feature when x_quantile=False # no need to set xticks x = feature_grids ice_lines = copy.deepcopy(pdp_isolate_out.ice_lines) pdp_y = copy.deepcopy(pdp_isolate_out.pdp) # default: fill between std upper and lower # don't need to highlight pdp line std_fill = True pdp_hl = False # center the plot if center: pdp_y -= pdp_y[0] for col in feature_grids[1:]: ice_lines[col] -= ice_lines[feature_grids[0]] ice_lines[feature_grids[0]] = 0 # cluster or plot lines if cluster or plot_lines: std_fill = False pdp_hl = True lines_params = {'x': x, 'feature_grids': feature_grids, 'ax': pdp_ax, 'plot_params': plot_params} if cluster: _ice_cluster_plot(ice_lines=ice_lines, n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, **lines_params) else: ice_plot_data = _sample_data(ice_lines=ice_lines, frac_to_plot=frac_to_plot) _ice_line_plot(ice_plot_data=ice_plot_data, **lines_params) # pdp std = ice_lines[feature_grids].std().values _pdp_std_plot(x=x, y=pdp_y, std=std, std_fill=std_fill, pdp_hl=pdp_hl, ax=pdp_ax, plot_params=plot_params) _axes_modify(font_family, pdp_ax) # add data distribution plot if count_ax is not None: if not x_quantile and feature_type == 'numeric': hist_data = copy.deepcopy(pdp_isolate_out.hist_data) _draw_pdp_distplot(hist_data=hist_data, hist_ax=count_ax, plot_params=plot_params) else: _draw_pdp_countplot(count_data=count_data, count_ax=count_ax, pdp_ax=pdp_ax, feature_type=feature_type, display_columns=display_columns, plot_params=plot_params) count_ax.set_xlabel(feature_name, fontsize=11, fontdict={'family': font_family}) else: pdp_ax.set_xlabel(feature_name, fontsize=11, fontdict={'family': font_family}) # show grid percentile info if show_percentile and len(percentile_info) > 0: percentile_pdp_ax = pdp_ax.twiny() percentile_pdp_ax.set_xticks(pdp_ax.get_xticks()) percentile_pdp_ax.set_xbound(pdp_ax.get_xbound()) percentile_pdp_ax.set_xticklabels(percentile_xticklabels, rotation=xticks_rotation) percentile_pdp_ax.set_xlabel('percentile info') _axes_modify(font_family=font_family, ax=percentile_pdp_ax, top=True)
Python
def _pdp_inter_grid(pdp_mx, inter_ax, cmap, norm, inter_fill_alpha, fontsize, plot_params): """Interact grid plot (heatmap)""" font_family = plot_params.get('font_family', 'Arial') im = inter_ax.imshow(pdp_mx, cmap=cmap, norm=norm, origin='lower', aspect='auto', alpha=inter_fill_alpha) for r in range(pdp_mx.shape[0]): for c in range(pdp_mx.shape[1]): text_color = 'w' if pdp_mx[r, c] >= norm.vmin + (norm.vmax - norm.vmin) * 0.5: text_color = 'black' # column -> x, row -> y inter_ax.text(c, r, round(pdp_mx[r, c], 3), ha="center", va="center", color=text_color, size=fontsize, fontdict={'family': font_family}) # draw the white gaps inter_ax.set_xticks(np.arange(pdp_mx.shape[1] - 1) + 0.5, minor=True) inter_ax.set_yticks(np.arange(pdp_mx.shape[0] - 1) + 0.5, minor=True) inter_ax.grid(which="minor", color="w", linestyle='-', linewidth=1) # return the color mapping object for colorbar return im
def _pdp_inter_grid(pdp_mx, inter_ax, cmap, norm, inter_fill_alpha, fontsize, plot_params): """Interact grid plot (heatmap)""" font_family = plot_params.get('font_family', 'Arial') im = inter_ax.imshow(pdp_mx, cmap=cmap, norm=norm, origin='lower', aspect='auto', alpha=inter_fill_alpha) for r in range(pdp_mx.shape[0]): for c in range(pdp_mx.shape[1]): text_color = 'w' if pdp_mx[r, c] >= norm.vmin + (norm.vmax - norm.vmin) * 0.5: text_color = 'black' # column -> x, row -> y inter_ax.text(c, r, round(pdp_mx[r, c], 3), ha="center", va="center", color=text_color, size=fontsize, fontdict={'family': font_family}) # draw the white gaps inter_ax.set_xticks(np.arange(pdp_mx.shape[1] - 1) + 0.5, minor=True) inter_ax.set_yticks(np.arange(pdp_mx.shape[0] - 1) + 0.5, minor=True) inter_ax.grid(which="minor", color="w", linestyle='-', linewidth=1) # return the color mapping object for colorbar return im
Python
def _pdp_inter_one(pdp_interact_out, feature_names, plot_type, inter_ax, x_quantile, plot_params, norm, ticks=True): """Plot single PDP interact Parameters ---------- norm: matplotlib colors normalize ticks: bool, default=True whether to set ticks for the plot, False when it is called by _pdp_inter_three """ cmap = plot_params.get('cmap', 'viridis') inter_fill_alpha = plot_params.get('inter_fill_alpha', 0.8) fontsize = plot_params.get('inter_fontsize', 9) font_family = plot_params.get('font_family', 'Arial') # prepare pdp_mx pdp_mx_temp = copy.deepcopy(pdp_interact_out.pdp) for feature, feature_type, mark in zip(pdp_interact_out.features, pdp_interact_out.feature_types, ['x', 'y']): if feature_type in ['numeric', 'binary']: pdp_mx_temp[mark] = pdp_mx_temp[feature] else: # for onehot encoding feature, need to map to numeric representation pdp_mx_temp[mark] = pdp_mx_temp[feature].apply(lambda x: list(x).index(1), axis=1) pdp_mx_temp = pdp_mx_temp[['x', 'y', 'preds']].sort_values(by=['x', 'y'], ascending=True) pdp_inter = copy.deepcopy(pdp_mx_temp['preds'].values) n_grids_x, n_grids_y = len(pdp_interact_out.feature_grids[0]), len(pdp_interact_out.feature_grids[1]) # pdp_inter.reshape((n_grids_x, n_grids_y)): each row represents grids_x # pdp_inter.reshape((n_grids_x, n_grids_y)).T: each row represents grids_y pdp_mx = pdp_inter.reshape((n_grids_x, n_grids_y)).T # if it is called by _pdp_inter_three, norm is not None if norm is None: pdp_min, pdp_max = np.min(pdp_inter), np.max(pdp_inter) norm = mpl.colors.Normalize(vmin=pdp_min, vmax=pdp_max) inter_params = { 'pdp_mx': pdp_mx, 'inter_ax': inter_ax, 'cmap': cmap, 'norm': norm, 'inter_fill_alpha': inter_fill_alpha, 'fontsize': fontsize, 'plot_params': plot_params } if plot_type == 'contour': if x_quantile: # because we have transpose the matrix # pdp_max.shape[1]: x, pdp_max.shape[0]: y X, Y = np.meshgrid(range(pdp_mx.shape[1]), range(pdp_mx.shape[0])) else: # for numeric not quantile X, Y = np.meshgrid(pdp_interact_out.feature_grids[0], pdp_interact_out.feature_grids[1]) im = _pdp_contour_plot(X=X, Y=Y, **inter_params) elif plot_type == 'grid': im = _pdp_inter_grid(**inter_params) else: raise ValueError("plot_type: should be 'contour' or 'grid'") if ticks: # if it is call by _pdp_inter_three, no need to set ticks _axes_modify(font_family=font_family, ax=inter_ax, grid=True) if pdp_interact_out.feature_types[0] != 'numeric' or x_quantile: inter_ax.set_xticks(range(len(pdp_interact_out.pdp_isolate_outs[0].display_columns))) inter_ax.set_xticklabels(pdp_interact_out.pdp_isolate_outs[0].display_columns) if pdp_interact_out.feature_types[1] != 'numeric' or x_quantile: inter_ax.set_yticks(range(len(pdp_interact_out.pdp_isolate_outs[1].display_columns))) inter_ax.set_yticklabels(pdp_interact_out.pdp_isolate_outs[1].display_columns) inter_ax.set_xlabel(feature_names[0], fontsize=12, fontdict={'family': font_family}) inter_ax.set_ylabel(feature_names[1], fontsize=12, fontdict={'family': font_family}) # insert colorbar inter_ax_divider = make_axes_locatable(inter_ax) cax = inter_ax_divider.append_axes("right", size="5%", pad="2%") if plot_type == 'grid': cb_num_grids = np.max([np.min([n_grids_x, n_grids_y, 8]), 8]) boundaries = [round(v, 3) for v in np.linspace(norm.vmin, norm.vmax, cb_num_grids)] cb = plt.colorbar(im, cax=cax, boundaries=boundaries) else: cb = plt.colorbar(im, cax=cax, format='%.3f') _axes_modify(font_family=font_family, ax=cax, right=True, grid=True) cb.outline.set_visible(False) inter_ax.tick_params(which="minor", bottom=False, left=False) return im
def _pdp_inter_one(pdp_interact_out, feature_names, plot_type, inter_ax, x_quantile, plot_params, norm, ticks=True): """Plot single PDP interact Parameters ---------- norm: matplotlib colors normalize ticks: bool, default=True whether to set ticks for the plot, False when it is called by _pdp_inter_three """ cmap = plot_params.get('cmap', 'viridis') inter_fill_alpha = plot_params.get('inter_fill_alpha', 0.8) fontsize = plot_params.get('inter_fontsize', 9) font_family = plot_params.get('font_family', 'Arial') # prepare pdp_mx pdp_mx_temp = copy.deepcopy(pdp_interact_out.pdp) for feature, feature_type, mark in zip(pdp_interact_out.features, pdp_interact_out.feature_types, ['x', 'y']): if feature_type in ['numeric', 'binary']: pdp_mx_temp[mark] = pdp_mx_temp[feature] else: # for onehot encoding feature, need to map to numeric representation pdp_mx_temp[mark] = pdp_mx_temp[feature].apply(lambda x: list(x).index(1), axis=1) pdp_mx_temp = pdp_mx_temp[['x', 'y', 'preds']].sort_values(by=['x', 'y'], ascending=True) pdp_inter = copy.deepcopy(pdp_mx_temp['preds'].values) n_grids_x, n_grids_y = len(pdp_interact_out.feature_grids[0]), len(pdp_interact_out.feature_grids[1]) # pdp_inter.reshape((n_grids_x, n_grids_y)): each row represents grids_x # pdp_inter.reshape((n_grids_x, n_grids_y)).T: each row represents grids_y pdp_mx = pdp_inter.reshape((n_grids_x, n_grids_y)).T # if it is called by _pdp_inter_three, norm is not None if norm is None: pdp_min, pdp_max = np.min(pdp_inter), np.max(pdp_inter) norm = mpl.colors.Normalize(vmin=pdp_min, vmax=pdp_max) inter_params = { 'pdp_mx': pdp_mx, 'inter_ax': inter_ax, 'cmap': cmap, 'norm': norm, 'inter_fill_alpha': inter_fill_alpha, 'fontsize': fontsize, 'plot_params': plot_params } if plot_type == 'contour': if x_quantile: # because we have transpose the matrix # pdp_max.shape[1]: x, pdp_max.shape[0]: y X, Y = np.meshgrid(range(pdp_mx.shape[1]), range(pdp_mx.shape[0])) else: # for numeric not quantile X, Y = np.meshgrid(pdp_interact_out.feature_grids[0], pdp_interact_out.feature_grids[1]) im = _pdp_contour_plot(X=X, Y=Y, **inter_params) elif plot_type == 'grid': im = _pdp_inter_grid(**inter_params) else: raise ValueError("plot_type: should be 'contour' or 'grid'") if ticks: # if it is call by _pdp_inter_three, no need to set ticks _axes_modify(font_family=font_family, ax=inter_ax, grid=True) if pdp_interact_out.feature_types[0] != 'numeric' or x_quantile: inter_ax.set_xticks(range(len(pdp_interact_out.pdp_isolate_outs[0].display_columns))) inter_ax.set_xticklabels(pdp_interact_out.pdp_isolate_outs[0].display_columns) if pdp_interact_out.feature_types[1] != 'numeric' or x_quantile: inter_ax.set_yticks(range(len(pdp_interact_out.pdp_isolate_outs[1].display_columns))) inter_ax.set_yticklabels(pdp_interact_out.pdp_isolate_outs[1].display_columns) inter_ax.set_xlabel(feature_names[0], fontsize=12, fontdict={'family': font_family}) inter_ax.set_ylabel(feature_names[1], fontsize=12, fontdict={'family': font_family}) # insert colorbar inter_ax_divider = make_axes_locatable(inter_ax) cax = inter_ax_divider.append_axes("right", size="5%", pad="2%") if plot_type == 'grid': cb_num_grids = np.max([np.min([n_grids_x, n_grids_y, 8]), 8]) boundaries = [round(v, 3) for v in np.linspace(norm.vmin, norm.vmax, cb_num_grids)] cb = plt.colorbar(im, cax=cax, boundaries=boundaries) else: cb = plt.colorbar(im, cax=cax, format='%.3f') _axes_modify(font_family=font_family, ax=cax, right=True, grid=True) cb.outline.set_visible(False) inter_ax.tick_params(which="minor", bottom=False, left=False) return im
Python
def _pdp_inter_three(pdp_interact_out, feature_names, plot_type, chart_grids, x_quantile, fig, plot_params): """Plot PDP interact with pdp isolate color bar Parameters ---------- chart_grids: matplotlib subplot gridspec """ cmap = plot_params.get('cmap', 'viridis') font_family = plot_params.get('font_family', 'Arial') pdp_x_ax = fig.add_subplot(chart_grids[1]) pdp_y_ax = fig.add_subplot(chart_grids[2]) inter_ax = fig.add_subplot(chart_grids[3], sharex=pdp_x_ax, sharey=pdp_y_ax) pdp_x = copy.deepcopy(pdp_interact_out.pdp_isolate_outs[0].pdp) pdp_y = copy.deepcopy(pdp_interact_out.pdp_isolate_outs[1].pdp) pdp_inter = copy.deepcopy(pdp_interact_out.pdp['preds'].values) pdp_values = np.concatenate((pdp_x, pdp_y, pdp_inter)) pdp_min, pdp_max = np.min(pdp_values), np.max(pdp_values) norm = mpl.colors.Normalize(vmin=pdp_min, vmax=pdp_max) vmean = norm.vmin + (norm.vmax - norm.vmin) * 0.5 feature_grids = pdp_interact_out.feature_grids pdp_xy_params = {'cmap': cmap, 'norm': norm, 'vmean': vmean, 'plot_params': plot_params, 'plot_type': plot_type} _pdp_xy(pdp_values=pdp_x, pdp_ax=pdp_x_ax, ticklabels=pdp_interact_out.pdp_isolate_outs[0].display_columns, feature_name=feature_names[0], y=False, **pdp_xy_params) _pdp_xy(pdp_values=pdp_y, pdp_ax=pdp_y_ax, ticklabels=pdp_interact_out.pdp_isolate_outs[1].display_columns, feature_name=feature_names[1], y=True, **pdp_xy_params) im = _pdp_inter_one(pdp_interact_out=pdp_interact_out, feature_names=feature_names, plot_type=plot_type, inter_ax=inter_ax, x_quantile=x_quantile, plot_params=plot_params, norm=norm, ticks=False) inter_ax.set_frame_on(False) plt.setp(inter_ax.get_xticklabels(), visible=False) plt.setp(inter_ax.get_yticklabels(), visible=False) inter_ax.tick_params(which="minor", bottom=False, left=False) inter_ax.tick_params(which="major", bottom=False, left=False) # insert colorbar if plot_type == 'grid': cax = inset_axes(inter_ax, width="100%", height="100%", loc='right', bbox_to_anchor=(1.05, 0., 0.05, 1), bbox_transform=inter_ax.transAxes, borderpad=0) cb_num_grids = np.max([np.min([len(feature_grids[0]), len(feature_grids[1]), 8]), 8]) boundaries = [round(v, 3) for v in np.linspace(norm.vmin, norm.vmax, cb_num_grids)] cb = plt.colorbar(im, cax=cax, boundaries=boundaries) else: cax = inset_axes(inter_ax, width="5%", height="80%", loc='right') cb = plt.colorbar(im, cax=cax, format='%.3f') _axes_modify(font_family=font_family, ax=cax, right=True, grid=True) cb.outline.set_visible(False) return { '_pdp_x_ax': pdp_x_ax, '_pdp_y_ax': pdp_y_ax, '_pdp_inter_ax': inter_ax }
def _pdp_inter_three(pdp_interact_out, feature_names, plot_type, chart_grids, x_quantile, fig, plot_params): """Plot PDP interact with pdp isolate color bar Parameters ---------- chart_grids: matplotlib subplot gridspec """ cmap = plot_params.get('cmap', 'viridis') font_family = plot_params.get('font_family', 'Arial') pdp_x_ax = fig.add_subplot(chart_grids[1]) pdp_y_ax = fig.add_subplot(chart_grids[2]) inter_ax = fig.add_subplot(chart_grids[3], sharex=pdp_x_ax, sharey=pdp_y_ax) pdp_x = copy.deepcopy(pdp_interact_out.pdp_isolate_outs[0].pdp) pdp_y = copy.deepcopy(pdp_interact_out.pdp_isolate_outs[1].pdp) pdp_inter = copy.deepcopy(pdp_interact_out.pdp['preds'].values) pdp_values = np.concatenate((pdp_x, pdp_y, pdp_inter)) pdp_min, pdp_max = np.min(pdp_values), np.max(pdp_values) norm = mpl.colors.Normalize(vmin=pdp_min, vmax=pdp_max) vmean = norm.vmin + (norm.vmax - norm.vmin) * 0.5 feature_grids = pdp_interact_out.feature_grids pdp_xy_params = {'cmap': cmap, 'norm': norm, 'vmean': vmean, 'plot_params': plot_params, 'plot_type': plot_type} _pdp_xy(pdp_values=pdp_x, pdp_ax=pdp_x_ax, ticklabels=pdp_interact_out.pdp_isolate_outs[0].display_columns, feature_name=feature_names[0], y=False, **pdp_xy_params) _pdp_xy(pdp_values=pdp_y, pdp_ax=pdp_y_ax, ticklabels=pdp_interact_out.pdp_isolate_outs[1].display_columns, feature_name=feature_names[1], y=True, **pdp_xy_params) im = _pdp_inter_one(pdp_interact_out=pdp_interact_out, feature_names=feature_names, plot_type=plot_type, inter_ax=inter_ax, x_quantile=x_quantile, plot_params=plot_params, norm=norm, ticks=False) inter_ax.set_frame_on(False) plt.setp(inter_ax.get_xticklabels(), visible=False) plt.setp(inter_ax.get_yticklabels(), visible=False) inter_ax.tick_params(which="minor", bottom=False, left=False) inter_ax.tick_params(which="major", bottom=False, left=False) # insert colorbar if plot_type == 'grid': cax = inset_axes(inter_ax, width="100%", height="100%", loc='right', bbox_to_anchor=(1.05, 0., 0.05, 1), bbox_transform=inter_ax.transAxes, borderpad=0) cb_num_grids = np.max([np.min([len(feature_grids[0]), len(feature_grids[1]), 8]), 8]) boundaries = [round(v, 3) for v in np.linspace(norm.vmin, norm.vmax, cb_num_grids)] cb = plt.colorbar(im, cax=cax, boundaries=boundaries) else: cax = inset_axes(inter_ax, width="5%", height="80%", loc='right') cb = plt.colorbar(im, cax=cax, format='%.3f') _axes_modify(font_family=font_family, ax=cax, right=True, grid=True) cb.outline.set_visible(False) return { '_pdp_x_ax': pdp_x_ax, '_pdp_y_ax': pdp_y_ax, '_pdp_inter_ax': inter_ax }
Python
def cleanObservations(self, now): """Clean observations for planes not seen in a while """ if now > self.__next_clean: cleaned = [] for icao24 in self.__observations: if trace_parser: log.debug("[%s] %s -> %s : %s" % (icao24, self.__observations[icao24].getLoggedDate( ), self.__observations[icao24].getLoggedDate() + timedelta(seconds=OBSERVATION_CLEAN_INTERVAL), now)) if self.__observations[icao24].getLoggedDate() + timedelta(seconds=OBSERVATION_CLEAN_INTERVAL) < now: if trace_parser: log.debug("%s disappeared" % (icao24)) cleaned.append(icao24) for icao24 in cleaned: del self.__observations[icao24] self.__next_clean = now + \ timedelta(seconds=OBSERVATION_CLEAN_INTERVAL) self.__message_rate = float( self.__counters['messages']) / OBSERVATION_CLEAN_INTERVAL self.__observation_rate = float( self.__counters['observations']) / OBSERVATION_CLEAN_INTERVAL self.__counters.clear()
def cleanObservations(self, now): """Clean observations for planes not seen in a while """ if now > self.__next_clean: cleaned = [] for icao24 in self.__observations: if trace_parser: log.debug("[%s] %s -> %s : %s" % (icao24, self.__observations[icao24].getLoggedDate( ), self.__observations[icao24].getLoggedDate() + timedelta(seconds=OBSERVATION_CLEAN_INTERVAL), now)) if self.__observations[icao24].getLoggedDate() + timedelta(seconds=OBSERVATION_CLEAN_INTERVAL) < now: if trace_parser: log.debug("%s disappeared" % (icao24)) cleaned.append(icao24) for icao24 in cleaned: del self.__observations[icao24] self.__next_clean = now + \ timedelta(seconds=OBSERVATION_CLEAN_INTERVAL) self.__message_rate = float( self.__counters['messages']) / OBSERVATION_CLEAN_INTERVAL self.__observation_rate = float( self.__counters['observations']) / OBSERVATION_CLEAN_INTERVAL self.__counters.clear()
Python
def wikidata_url(): """URL for downloading Wikidata dump.""" mid = "" if flags.arg.wikidata != "latest": mid = flags.arg.wikidata + "/wikidata-" return "https://dumps.wikimedia.org/wikidatawiki/entities/" + \ mid + flags.arg.wikidata + "-all.json.bz2"
def wikidata_url(): """URL for downloading Wikidata dump.""" mid = "" if flags.arg.wikidata != "latest": mid = flags.arg.wikidata + "/wikidata-" return "https://dumps.wikimedia.org/wikidatawiki/entities/" + \ mid + flags.arg.wikidata + "-all.json.bz2"
Python
def wikipedia_url(language=None): """URL for downloading Wikipedia dump.""" if language == None: language = flags.arg.language return "https://dumps.wikimedia.org/" + language + "wiki/" + \ flags.arg.wikipedia + "/" + language + "wiki-" + \ flags.arg.wikipedia + "-pages-articles.xml.bz2"
def wikipedia_url(language=None): """URL for downloading Wikipedia dump.""" if language == None: language = flags.arg.language return "https://dumps.wikimedia.org/" + language + "wiki/" + \ flags.arg.wikipedia + "/" + language + "wiki-" + \ flags.arg.wikipedia + "-pages-articles.xml.bz2"
Python
def vocabulary(self, language=None): """Resource for word embedding vocabulary. This is a text map with (normalized) words and counts. """ if language == None: language = flags.arg.language return self.wf.resource("word-vocabulary.map", dir=corpora.wikidir(language), format="textmap/word")
def vocabulary(self, language=None): """Resource for word embedding vocabulary. This is a text map with (normalized) words and counts. """ if language == None: language = flags.arg.language return self.wf.resource("word-vocabulary.map", dir=corpora.wikidir(language), format="textmap/word")
Python
def word_embeddings(self, language=None): """Resource for word embeddings in word2vec embedding format.""" if language == None: language = flags.arg.language return self.wf.resource("word-embeddings.vec", dir=corpora.wikidir(language), format="embeddings")
def word_embeddings(self, language=None): """Resource for word embeddings in word2vec embedding format.""" if language == None: language = flags.arg.language return self.wf.resource("word-embeddings.vec", dir=corpora.wikidir(language), format="embeddings")
Python
def fact_lexicon(self): """Resource for fact vocabulary (text map with fact paths and counts.""" return self.wf.resource("facts.map", dir=self.fact_dir(), format="textmap/fact")
def fact_lexicon(self): """Resource for fact vocabulary (text map with fact paths and counts.""" return self.wf.resource("facts.map", dir=self.fact_dir(), format="textmap/fact")
Python
def category_lexicon(self): """Resource for category vocabulary (text map with categories and counts.""" return self.wf.resource("categories.map", dir=self.fact_dir(), format="textmap/category")
def category_lexicon(self): """Resource for category vocabulary (text map with categories and counts.""" return self.wf.resource("categories.map", dir=self.fact_dir(), format="textmap/category")
Python
def fact_embeddings(self): """Resource for fact embeddings in word2vec embedding format.""" return self.wf.resource("fact-embeddings.vec", dir=self.fact_dir(), format="embeddings")
def fact_embeddings(self): """Resource for fact embeddings in word2vec embedding format.""" return self.wf.resource("fact-embeddings.vec", dir=self.fact_dir(), format="embeddings")
Python
def category_embeddings(self): """Resource for category embeddings in word2vec embedding format.""" return self.wf.resource("category-embeddings.vec", dir=self.fact_dir(), format="embeddings")
def category_embeddings(self): """Resource for category embeddings in word2vec embedding format.""" return self.wf.resource("category-embeddings.vec", dir=self.fact_dir(), format="embeddings")
Python
def extract_fact_lexicon(self): """Build fact and category lexicons.""" kb = self.wiki.knowledge_base() factmap = self.fact_lexicon() catmap = self.category_lexicon() with self.wf.namespace("fact-embeddings"): trainer = self.wf.task("fact-lexicon-extractor") trainer.attach_input("kb", kb) trainer.attach_output("factmap", factmap) trainer.attach_output("catmap", catmap) return factmap, catmap
def extract_fact_lexicon(self): """Build fact and category lexicons.""" kb = self.wiki.knowledge_base() factmap = self.fact_lexicon() catmap = self.category_lexicon() with self.wf.namespace("fact-embeddings"): trainer = self.wf.task("fact-lexicon-extractor") trainer.attach_input("kb", kb) trainer.attach_output("factmap", factmap) trainer.attach_output("catmap", catmap) return factmap, catmap
Python
def extract_facts(self): """Extract facts for items in the knowledge base.""" kb = self.wiki.knowledge_base() factmap = self.fact_lexicon() catmap = self.category_lexicon() output = self.facts() with self.wf.namespace("fact-embeddings"): extractor = self.wf.task("fact-extractor") extractor.attach_input("kb", kb) extractor.attach_input("factmap", factmap) extractor.attach_input("catmap", catmap) facts = self.wf.channel(extractor, format="message/frame") return self.wf.write(facts, output, name="fact-writer")
def extract_facts(self): """Extract facts for items in the knowledge base.""" kb = self.wiki.knowledge_base() factmap = self.fact_lexicon() catmap = self.category_lexicon() output = self.facts() with self.wf.namespace("fact-embeddings"): extractor = self.wf.task("fact-extractor") extractor.attach_input("kb", kb) extractor.attach_input("factmap", factmap) extractor.attach_input("catmap", catmap) facts = self.wf.channel(extractor, format="message/frame") return self.wf.write(facts, output, name="fact-writer")
Python
def train_fact_embeddings(self): """Train fact and category embeddings.""" facts = self.facts() factmap = self.fact_lexicon() catmap = self.category_lexicon() fact_embeddings = self.fact_embeddings() category_embeddings = self.category_embeddings() with self.wf.namespace("fact-embeddings"): trainer = self.wf.task("fact-embeddings-trainer") trainer.add_params({ "batch_size": 256, "batches_per_update": 32, "embedding_dims": 256, "normalize": False, "epochs" : 100000, "report_interval": 250, "learning_rate": 1.0, "learning_rate_decay": 0.95, "rampup": 120, "clipping": 1, "optimizer": "sgd", }) self.wf.connect(self.wf.read(facts, name="fact-reader"), trainer) trainer.attach_input("factmap", factmap) trainer.attach_input("catmap", catmap) trainer.attach_output("factvecs", fact_embeddings) trainer.attach_output("catvecs", category_embeddings) return fact_embeddings, category_embeddings
def train_fact_embeddings(self): """Train fact and category embeddings.""" facts = self.facts() factmap = self.fact_lexicon() catmap = self.category_lexicon() fact_embeddings = self.fact_embeddings() category_embeddings = self.category_embeddings() with self.wf.namespace("fact-embeddings"): trainer = self.wf.task("fact-embeddings-trainer") trainer.add_params({ "batch_size": 256, "batches_per_update": 32, "embedding_dims": 256, "normalize": False, "epochs" : 100000, "report_interval": 250, "learning_rate": 1.0, "learning_rate_decay": 0.95, "rampup": 120, "clipping": 1, "optimizer": "sgd", }) self.wf.connect(self.wf.read(facts, name="fact-reader"), trainer) trainer.attach_input("factmap", factmap) trainer.attach_input("catmap", catmap) trainer.attach_output("factvecs", fact_embeddings) trainer.attach_output("catvecs", category_embeddings) return fact_embeddings, category_embeddings
Python
def fact_plausibility_model(self): """Resource for fact plausibility model.""" return self.wf.resource("plausibility.flow", dir=self.fact_dir(), format="flow")
def fact_plausibility_model(self): """Resource for fact plausibility model.""" return self.wf.resource("plausibility.flow", dir=self.fact_dir(), format="flow")
Python
def attach_input(self, name, resource): """Attach named input resource(s) to task.""" if isinstance(resource, list): for r in resource: self.inputs.append(Binding(name, r)) else: self.inputs.append(Binding(name, resource))
def attach_input(self, name, resource): """Attach named input resource(s) to task.""" if isinstance(resource, list): for r in resource: self.inputs.append(Binding(name, r)) else: self.inputs.append(Binding(name, resource))
Python
def attach_output(self, name, resource): """Attach named output resource(s) to task.""" if isinstance(resource, list): for r in resource: self.outputs.append(Binding(name, r)) else: self.outputs.append(Binding(name, resource))
def attach_output(self, name, resource): """Attach named output resource(s) to task.""" if isinstance(resource, list): for r in resource: self.outputs.append(Binding(name, r)) else: self.outputs.append(Binding(name, resource))
Python
def add_param(self, name, value): """Add configuration parameter to task.""" if value is True: value = 1 if value is False: value = 0 self.params[name] = str(value)
def add_param(self, name, value): """Add configuration parameter to task.""" if value is True: value = 1 if value is False: value = 0 self.params[name] = str(value)
Python
def add_params(self, params): """Add configuration parameters to task.""" if params != None: for name, value in params.items(): self.add_param(name, value)
def add_params(self, params): """Add configuration parameters to task.""" if params != None: for name, value in params.items(): self.add_param(name, value)
Python
def prefix(self): """Returns the name prefix defined in the scope by concatenating all nested name spaces.""" parts = [] s = self while s != None: parts.append(s.name) s = s.prev return '/'.join(reversed(parts))
def prefix(self): """Returns the name prefix defined in the scope by concatenating all nested name spaces.""" parts = [] s = self while s != None: parts.append(s.name) s = s.prev return '/'.join(reversed(parts))
Python
def format_of(input): """Get format from one or more channels or resources.""" if isinstance(input, list): return input[0].format else: return input.format
def format_of(input): """Get format from one or more channels or resources.""" if isinstance(input, list): return input[0].format else: return input.format
Python
def task(self, type, name=None, shard=None, params=None): """A new task to workflow.""" if name == None: name = type if self.scope != None: name = self.scope.prefix() + "/" + name basename = name index = 0 while (name, shard) in self.task_map: index += 1 name = basename + "-" + str(index) t = Task(type, name, shard) if params != None: t.add_params(params) self.tasks.append(t) self.task_map[(name, shard)] = t return t
def task(self, type, name=None, shard=None, params=None): """A new task to workflow.""" if name == None: name = type if self.scope != None: name = self.scope.prefix() + "/" + name basename = name index = 0 while (name, shard) in self.task_map: index += 1 name = basename + "-" + str(index) t = Task(type, name, shard) if params != None: t.add_params(params) self.tasks.append(t) self.task_map[(name, shard)] = t return t
Python
def resource(self, file, dir=None, shards=None, ext=None, format=None): """Adds one or more resources to workflow. The file parameter can be a file name pattern with wild-cards, in which case it is expanded to a list of matching resources. The optional dir and ext are prepended and appended to the base file name. The file name can also be a sharded file name (@n), which is expanded to a list of resources, one for each shard. The general format of a file name is as follows: [<dir>]<file>[@<shards>][ext]""" # Recursively expand comma-separated list of files. if "," in file: resources = [] for f in file.split(","): r = self.resource(f, dir=dir, shards=shards, ext=ext, format=format) if isinstance(r, list): resources.extend(r) else: resources.append(r) return resources # Convert format. if type(format) == str: format = Format(format) # Combine file name parts. filename = file if dir != None: filename = os.path.join(dir, filename) if shards != None: filename += "@" + str(shards) if ext != None: filename += ext # Check if filename is a wildcard pattern. filenames = [] if re.search(r"[\*\?\[\]]", filename): # Match file name pattern. filenames = glob.glob(filename) else: m = re.match(r"(.*)@(\d+)(.*)", filename) if m != None: # Expand sharded filename. prefix = m.group(1) shards = int(m.group(2)) suffix = m.group(3) for shard in range(shards): fn = "%s-%05d-of-%05d%s" % (prefix, shard, shards, suffix) filenames.append(fn) else: # Simple filename. filenames.append(filename) # Create resources. n = len(filenames) if n == 0: return None elif n == 1: key = (filenames[0], None, str(format)) r = self.resource_map.get(key) if r == None: r = Resource(filenames[0], None, format) self.resource_map[key] = r self.resources.append(r) return r else: filenames.sort() resources = [] for shard in range(n): key = (filenames[shard], str(Shard(shard, n)), str(format)) r = self.resource_map.get(key) if r == None: r = Resource(filenames[shard], Shard(shard, n), format) self.resource_map[key] = r self.resources.append(r) resources.append(r) return resources
def resource(self, file, dir=None, shards=None, ext=None, format=None): """Adds one or more resources to workflow. The file parameter can be a file name pattern with wild-cards, in which case it is expanded to a list of matching resources. The optional dir and ext are prepended and appended to the base file name. The file name can also be a sharded file name (@n), which is expanded to a list of resources, one for each shard. The general format of a file name is as follows: [<dir>]<file>[@<shards>][ext]""" # Recursively expand comma-separated list of files. if "," in file: resources = [] for f in file.split(","): r = self.resource(f, dir=dir, shards=shards, ext=ext, format=format) if isinstance(r, list): resources.extend(r) else: resources.append(r) return resources # Convert format. if type(format) == str: format = Format(format) # Combine file name parts. filename = file if dir != None: filename = os.path.join(dir, filename) if shards != None: filename += "@" + str(shards) if ext != None: filename += ext # Check if filename is a wildcard pattern. filenames = [] if re.search(r"[\*\?\[\]]", filename): # Match file name pattern. filenames = glob.glob(filename) else: m = re.match(r"(.*)@(\d+)(.*)", filename) if m != None: # Expand sharded filename. prefix = m.group(1) shards = int(m.group(2)) suffix = m.group(3) for shard in range(shards): fn = "%s-%05d-of-%05d%s" % (prefix, shard, shards, suffix) filenames.append(fn) else: # Simple filename. filenames.append(filename) # Create resources. n = len(filenames) if n == 0: return None elif n == 1: key = (filenames[0], None, str(format)) r = self.resource_map.get(key) if r == None: r = Resource(filenames[0], None, format) self.resource_map[key] = r self.resources.append(r) return r else: filenames.sort() resources = [] for shard in range(n): key = (filenames[shard], str(Shard(shard, n)), str(format)) r = self.resource_map.get(key) if r == None: r = Resource(filenames[shard], Shard(shard, n), format) self.resource_map[key] = r self.resources.append(r) resources.append(r) return resources
Python
def channel(self, producer, name="output", shards=None, format=None): """Adds one or more channels to workflow. The channel(s) are connected as sinks to the producer(s). If shards are specified, this creates a sharded set of channels.""" if type(format) == str: format = Format(format) if isinstance(producer, list): channels = [] for p in producer: if shards != None: for shard in range(shards): ch = Channel(format, Port(p, name, Shard(shard, shards)), None) p.connect_sink(ch) channels.append(ch) self.channels.append(ch) else: ch = Channel(format, Port(p, name, None), None) p.connect_sink(ch) channels.append(ch) self.channels.append(ch) return channels elif shards != None: channels = [] for shard in range(shards): sink = Port(producer, name, Shard(shard, shards)) ch = Channel(format, sink, None) producer.connect_sink(ch) channels.append(ch) self.channels.append(ch) return channels else: ch = Channel(format, Port(producer, name, None), None) producer.connect_sink(ch) self.channels.append(ch) return ch
def channel(self, producer, name="output", shards=None, format=None): """Adds one or more channels to workflow. The channel(s) are connected as sinks to the producer(s). If shards are specified, this creates a sharded set of channels.""" if type(format) == str: format = Format(format) if isinstance(producer, list): channels = [] for p in producer: if shards != None: for shard in range(shards): ch = Channel(format, Port(p, name, Shard(shard, shards)), None) p.connect_sink(ch) channels.append(ch) self.channels.append(ch) else: ch = Channel(format, Port(p, name, None), None) p.connect_sink(ch) channels.append(ch) self.channels.append(ch) return channels elif shards != None: channels = [] for shard in range(shards): sink = Port(producer, name, Shard(shard, shards)) ch = Channel(format, sink, None) producer.connect_sink(ch) channels.append(ch) self.channels.append(ch) return channels else: ch = Channel(format, Port(producer, name, None), None) producer.connect_sink(ch) self.channels.append(ch) return ch
Python
def read(self, input, name=None, params=None): """Add readers for input resource(s). The format of the input resource is used for selecting an appropriate reader task for the format.""" if isinstance(input, list): outputs = [] shards = len(input) for shard in range(shards): format = input[shard].format if type(format) == str: format = Format(format) if format == None: format = Format("text") tasktype = readers.get(format.file) if tasktype == None: raise Exception("No reader for " + str(format)) reader = self.task(tasktype, name=name, shard=Shard(shard, shards)) reader.add_params(params) reader.attach_input("input", input[shard]) output = self.channel(reader, format=format.as_message()) outputs.append(output) return outputs else: format = input.format if type(format) == str: format = Format(format) if format == None: format = Format("text") tasktype = readers.get(format.file) if tasktype == None: raise Exception("No reader for " + str(format)) reader = self.task(tasktype, name=name) reader.add_params(params) reader.attach_input("input", input) output = self.channel(reader, format=format.as_message()) return output
def read(self, input, name=None, params=None): """Add readers for input resource(s). The format of the input resource is used for selecting an appropriate reader task for the format.""" if isinstance(input, list): outputs = [] shards = len(input) for shard in range(shards): format = input[shard].format if type(format) == str: format = Format(format) if format == None: format = Format("text") tasktype = readers.get(format.file) if tasktype == None: raise Exception("No reader for " + str(format)) reader = self.task(tasktype, name=name, shard=Shard(shard, shards)) reader.add_params(params) reader.attach_input("input", input[shard]) output = self.channel(reader, format=format.as_message()) outputs.append(output) return outputs else: format = input.format if type(format) == str: format = Format(format) if format == None: format = Format("text") tasktype = readers.get(format.file) if tasktype == None: raise Exception("No reader for " + str(format)) reader = self.task(tasktype, name=name) reader.add_params(params) reader.attach_input("input", input) output = self.channel(reader, format=format.as_message()) return output
Python
def write(self, producer, output, sharding=None, name=None, params=None): """Add writers for output resource(s). The format of the output resource is used for selecting an appropriate writer task for the format.""" # Determine fan-in (channels) and fan-out (files). if not isinstance(producer, list): producer = [producer] if not isinstance(output, list): output = [output] fanin = len(producer) fanout = len(output) # Use sharding if fan-out is different from fan-in. if sharding == None and fanout != 1 and fanin != fanout: sharding = "sharder" # Create sharder if needed. if sharding == None: input = producer else: sharder = self.task(sharding) if fanin == 1: self.connect(producer[0], sharder) else: self.connect(producer, sharder) input = self.channel(sharder, shards=fanout, format=producer[0].format) # Create writer tasks for writing to output. writer_tasks = [] for shard in range(fanout): format = output[shard].format if type(format) == str: format = Format(format) if format == None: format = Format("text") tasktype = writers.get(format.file) if tasktype == None: raise Exception("No writer for " + str(format)) if fanout == 1: writer = self.task(tasktype, name=name) else: writer = self.task(tasktype, name=name, shard=Shard(shard, fanout)) writer.attach_output("output", output[shard]) writer.add_params(params) writer_tasks.append(writer) # Connect producer(s) to writer task(s). if isinstance(input, list) and len(input) == 1: input = input[0] if fanout == 1: writer_tasks = writer_tasks[0] self.connect(input, writer_tasks) return output
def write(self, producer, output, sharding=None, name=None, params=None): """Add writers for output resource(s). The format of the output resource is used for selecting an appropriate writer task for the format.""" # Determine fan-in (channels) and fan-out (files). if not isinstance(producer, list): producer = [producer] if not isinstance(output, list): output = [output] fanin = len(producer) fanout = len(output) # Use sharding if fan-out is different from fan-in. if sharding == None and fanout != 1 and fanin != fanout: sharding = "sharder" # Create sharder if needed. if sharding == None: input = producer else: sharder = self.task(sharding) if fanin == 1: self.connect(producer[0], sharder) else: self.connect(producer, sharder) input = self.channel(sharder, shards=fanout, format=producer[0].format) # Create writer tasks for writing to output. writer_tasks = [] for shard in range(fanout): format = output[shard].format if type(format) == str: format = Format(format) if format == None: format = Format("text") tasktype = writers.get(format.file) if tasktype == None: raise Exception("No writer for " + str(format)) if fanout == 1: writer = self.task(tasktype, name=name) else: writer = self.task(tasktype, name=name, shard=Shard(shard, fanout)) writer.attach_output("output", output[shard]) writer.add_params(params) writer_tasks.append(writer) # Connect producer(s) to writer task(s). if isinstance(input, list) and len(input) == 1: input = input[0] if fanout == 1: writer_tasks = writer_tasks[0] self.connect(input, writer_tasks) return output
Python
def pipe(self, command, format=None, name=None): """Run command and pipe output to channel.""" reader = self.task("pipe-reader", name, params={"command": command}) if type(format) == str: format = Format(format) if format is None: format = Format("pipe/text") output = self.channel(reader, format=format.as_message()) return output
def pipe(self, command, format=None, name=None): """Run command and pipe output to channel.""" reader = self.task("pipe-reader", name, params={"command": command}) if type(format) == str: format = Format(format) if format is None: format = Format("pipe/text") output = self.channel(reader, format=format.as_message()) return output
Python
def collect(self, *args): """Return list of channels that collects the input from all the arguments. The arguments can be channels, resources, or lists of channels or resources.""" channels = [] for arg in args: if isinstance(arg, Channel): channels.append(arg) elif isinstance(arg, Resource): channels.append(self.read(arg)) elif isinstance(arg, list): for elem in arg: if isinstance(elem, Channel): channels.append(elem) elif isinstance(elem, Resource): channels.append(self.read(elem)) else: raise Exception("illegal element") else: raise Exception("illegal argument") return channels if len(channels) > 1 else channels[0]
def collect(self, *args): """Return list of channels that collects the input from all the arguments. The arguments can be channels, resources, or lists of channels or resources.""" channels = [] for arg in args: if isinstance(arg, Channel): channels.append(arg) elif isinstance(arg, Resource): channels.append(self.read(arg)) elif isinstance(arg, list): for elem in arg: if isinstance(elem, Channel): channels.append(elem) elif isinstance(elem, Resource): channels.append(self.read(elem)) else: raise Exception("illegal element") else: raise Exception("illegal argument") return channels if len(channels) > 1 else channels[0]
Python
def parallel(self, input, threads=5, queue=None, name=None): """Parallelize input messages over thread worker pool.""" workers = self.task("workers", name=name) workers.add_param("worker_threads", threads) if queue != None: workers.add_param("queue_size", queue) self.connect(input, workers) return self.channel(workers, format=format_of(input))
def parallel(self, input, threads=5, queue=None, name=None): """Parallelize input messages over thread worker pool.""" workers = self.task("workers", name=name) workers.add_param("worker_threads", threads) if queue != None: workers.add_param("queue_size", queue) self.connect(input, workers) return self.channel(workers, format=format_of(input))
Python
def shuffle(self, input, shards=None): """Shard and sort the input messages.""" if shards != None: # Create sharder and connect input. sharder = self.task("sharder") self.connect(input, sharder) pipes = self.channel(sharder, shards=shards, format=format_of(input)) # Pipe outputs from sharder to sorters. sorters = [] for i in range(shards): sorter = self.task("sorter", shard=Shard(i, shards)) self.connect(pipes[i], sorter) sorters.append(sorter) else: sorters = self.task("sorter") self.connect(input, sorters) # Return output channel from sorters. outputs = self.channel(sorters, format=format_of(input)) return outputs
def shuffle(self, input, shards=None): """Shard and sort the input messages.""" if shards != None: # Create sharder and connect input. sharder = self.task("sharder") self.connect(input, sharder) pipes = self.channel(sharder, shards=shards, format=format_of(input)) # Pipe outputs from sharder to sorters. sorters = [] for i in range(shards): sorter = self.task("sorter", shard=Shard(i, shards)) self.connect(pipes[i], sorter) sorters.append(sorter) else: sorters = self.task("sorter") self.connect(input, sorters) # Return output channel from sorters. outputs = self.channel(sorters, format=format_of(input)) return outputs
Python
def reduce(self, input, output, type=None, params=None, name=None): """Reduce input and write reduced output.""" if type == None: # No reducer (i.e. identity reducer), just write input. reduced = input else: reducer = self.task(type, name=name) reducer.add_params(params) self.connect(input, reducer) reduced = self.channel(reducer, shards=length_of(output), format=format_of(output).as_message()) # Write reduce output. self.write(reduced, output, params=params) return reducer
def reduce(self, input, output, type=None, params=None, name=None): """Reduce input and write reduced output.""" if type == None: # No reducer (i.e. identity reducer), just write input. reduced = input else: reducer = self.task(type, name=name) reducer.add_params(params) self.connect(input, reducer) reduced = self.channel(reducer, shards=length_of(output), format=format_of(output).as_message()) # Write reduce output. self.write(reduced, output, params=params) return reducer
Python
def mapreduce(self, input, output, mapper, reducer=None, params=None, format=None): """Map input files, shuffle, sort, reduce, and output to files.""" # Determine the number of output shards. shards = length_of(output) # Mapping of input. mapping = self.map(input, mapper, params=params, format=format) # Shuffling of map output. shuffle = self.shuffle(mapping, shards=shards) # Reduction of shuffled map output. self.reduce(shuffle, output, reducer, params=params) return output
def mapreduce(self, input, output, mapper, reducer=None, params=None, format=None): """Map input files, shuffle, sort, reduce, and output to files.""" # Determine the number of output shards. shards = length_of(output) # Mapping of input. mapping = self.map(input, mapper, params=params, format=format) # Shuffling of map output. shuffle = self.shuffle(mapping, shards=shards) # Reduction of shuffled map output. self.reduce(shuffle, output, reducer, params=params) return output
Python
def start(self): """Start workflow. The workflow will be run in the background, and the done() and wait() methods can be used to determine if the workflow has completed.""" # Make sure all output directories exist. self.create_output_directories() # Create underlying job in task system. if self.job != None: raise Exception("job already running") self.job = api.Job(self, self.name) # Start job. global active active = True self.job.start()
def start(self): """Start workflow. The workflow will be run in the background, and the done() and wait() methods can be used to determine if the workflow has completed.""" # Make sure all output directories exist. self.create_output_directories() # Create underlying job in task system. if self.job != None: raise Exception("job already running") self.job = api.Job(self, self.name) # Start job. global active active = True self.job.start()
Python
def create_output_directories(self): """Create output directories for workflow.""" checked = set() for task in self.tasks: for output in task.outputs: directory = os.path.dirname(output.resource.name) if directory in checked: continue if not os.path.exists(directory): os.makedirs(directory) checked.add(directory)
def create_output_directories(self): """Create output directories for workflow.""" checked = set() for task in self.tasks: for output in task.outputs: directory = os.path.dirname(output.resource.name) if directory in checked: continue if not os.path.exists(directory): os.makedirs(directory) checked.add(directory)
Python
def parse(): """Parse command-line flags.""" # Register all the C++ flags. flags = api.get_flags() for name, help, default in flags: if type(default) == bool: parser.add_argument("--" + name, help=help, default=default, action="store_true") else: parser.add_argument("--" + name, help=help, type=type(default), default=default, metavar="VAL") # Parse command line flags. global arg parser.parse_args(namespace=arg) # Set C++ flags. current = vars(arg) for name, help, default in flags: value = current[name] if value != default: api.set_flag(name, value) # Call all the post-processing hooks. for callback in hooks: callback(arg)
def parse(): """Parse command-line flags.""" # Register all the C++ flags. flags = api.get_flags() for name, help, default in flags: if type(default) == bool: parser.add_argument("--" + name, help=help, default=default, action="store_true") else: parser.add_argument("--" + name, help=help, type=type(default), default=default, metavar="VAL") # Parse command line flags. global arg parser.parse_args(namespace=arg) # Set C++ flags. current = vars(arg) for name, help, default in flags: value = current[name] if value != default: api.set_flag(name, value) # Call all the post-processing hooks. for callback in hooks: callback(arg)
Python
def as_frame(self, frame): """Specify the runtime implementation of this delegate.""" frame["runtime"] = "SoftmaxDelegate" """Save the action table for this delegate in the frame.""" actions = frame.store().array(self.size()) for i in range(self.size()): action = self.action(i, previous_action=None) actions[i] = action.as_frame(frame.store()) frame["actions"] = actions
def as_frame(self, frame): """Specify the runtime implementation of this delegate.""" frame["runtime"] = "SoftmaxDelegate" """Save the action table for this delegate in the frame.""" actions = frame.store().array(self.size()) for i in range(self.size()): action = self.action(i, previous_action=None) actions[i] = action.as_frame(frame.store()) frame["actions"] = actions
Python
def build(self, cascade, actions): """Build table of actions handled by the delegate.""" self.table = Actions() for action in actions.table: if action.type != Action.SHIFT and \ action.type != Action.MARK and not is_pbevoke(action): self.table.add(action) self.softmax_size = self.table.size() + 1 # +1 for CASCADE action self.pb_index = self.table.size() # last action is CASCADE # Assume we will delegate PropBank EVOKES to PropbankEvokeDelegate. self.pb_action = Action(Action.CASCADE) self.pb_action.delegate = cascade.index_of("PropbankEvokeDelegate")
def build(self, cascade, actions): """Build table of actions handled by the delegate.""" self.table = Actions() for action in actions.table: if action.type != Action.SHIFT and \ action.type != Action.MARK and not is_pbevoke(action): self.table.add(action) self.softmax_size = self.table.size() + 1 # +1 for CASCADE action self.pb_index = self.table.size() # last action is CASCADE # Assume we will delegate PropBank EVOKES to PropbankEvokeDelegate. self.pb_action = Action(Action.CASCADE) self.pb_action.delegate = cascade.index_of("PropbankEvokeDelegate")
Python
def wikipedia_dump(self, language=None): """Resource for wikipedia dump. This can be downloaded from wikimedia.org and contains a full dump of Wikipedia in a particular language. This is in XML format with the articles in Wiki markup format.""" if language == None: language = flags.arg.language return self.wf.resource(corpora.wikipedia_dump(language), format="xml/wikipage")
def wikipedia_dump(self, language=None): """Resource for wikipedia dump. This can be downloaded from wikimedia.org and contains a full dump of Wikipedia in a particular language. This is in XML format with the articles in Wiki markup format.""" if language == None: language = flags.arg.language return self.wf.resource(corpora.wikipedia_dump(language), format="xml/wikipage")
Python
def expand(self, func, var, inputs): """Traverse graphs and add ops to flow.""" if var not in self.vars: # Add new variable to flow. self.vars.append(var) v = self.flow.var(var.name, var.dtype.base_dtype.name, []) # Get data for constants and variables. if var.op.type in ["Const", "ConstV2"]: v.data = tf.contrib.util.constant_value(var) elif var.op.type in ["Variable", "VariableV2"]: if self.feed is None: v.data = var.eval(session=self.sess) else: v.data = self.sess.run(var, feed_dict=self.feed) # Get shape. if v.data is None: shape = var.get_shape() for d in shape.as_list(): if d != None: v.shape.append(d) else: v.shape.append(-1) else: for d in v.data.shape: v.shape.append(d) if not var in inputs: op = var.op if op not in self.ops: # Add new operation to flow function. self.ops.append(op) o = self.flow.op(op.name) func.add(o) o.type = op.type for input in op.inputs: o.add_input(self.flow.var(input.name)) for output in op.outputs: o.add_output(self.flow.var(output.name)) for a in op.node_def.attr: o.add_attr(a, attr_str(op.get_attr(a))) # Traverse dependencies. for dep in op.inputs: self.expand(func, dep, inputs)
def expand(self, func, var, inputs): """Traverse graphs and add ops to flow.""" if var not in self.vars: # Add new variable to flow. self.vars.append(var) v = self.flow.var(var.name, var.dtype.base_dtype.name, []) # Get data for constants and variables. if var.op.type in ["Const", "ConstV2"]: v.data = tf.contrib.util.constant_value(var) elif var.op.type in ["Variable", "VariableV2"]: if self.feed is None: v.data = var.eval(session=self.sess) else: v.data = self.sess.run(var, feed_dict=self.feed) # Get shape. if v.data is None: shape = var.get_shape() for d in shape.as_list(): if d != None: v.shape.append(d) else: v.shape.append(-1) else: for d in v.data.shape: v.shape.append(d) if not var in inputs: op = var.op if op not in self.ops: # Add new operation to flow function. self.ops.append(op) o = self.flow.op(op.name) func.add(o) o.type = op.type for input in op.inputs: o.add_input(self.flow.var(input.name)) for output in op.outputs: o.add_output(self.flow.var(output.name)) for a in op.node_def.attr: o.add_attr(a, attr_str(op.get_attr(a))) # Traverse dependencies. for dep in op.inputs: self.expand(func, dep, inputs)
Python
def wikidata_import(self, input, name=None): """Task for converting Wikidata JSON to SLING items and properties.""" task = self.wf.task("wikidata-importer", name=name) task.add_param("primary_language", flags.arg.language) task.add_param("only_primary_language", flags.arg.only_primary_language) task.add_param("only_known_languages", flags.arg.only_known_languages) self.wf.connect(input, task) items = self.wf.channel(task, name="items", format="message/frame") properties = self.wf.channel(task, name="properties", format="message/frame") return items, properties
def wikidata_import(self, input, name=None): """Task for converting Wikidata JSON to SLING items and properties.""" task = self.wf.task("wikidata-importer", name=name) task.add_param("primary_language", flags.arg.language) task.add_param("only_primary_language", flags.arg.only_primary_language) task.add_param("only_known_languages", flags.arg.only_known_languages) self.wf.connect(input, task) items = self.wf.channel(task, name="items", format="message/frame") properties = self.wf.channel(task, name="properties", format="message/frame") return items, properties
Python
def wikidata(self, dump=None): """Import Wikidata dump to frame format. It takes a Wikidata dump in JSON format as input and converts each item and property to a SLING frame. Returns the item and property output files.""" if dump == None: dump = self.wikidata_dump() with self.wf.namespace("wikidata"): if flags.arg.lbzip2: input = self.wf.pipe("lbzip2 -d -c " + dump.name, name="wiki-decompress", format="text/json") else: input = self.wf.read(dump) input = self.wf.parallel(input, threads=5) items, properties = self.wikidata_import(input) items_output = self.wikidata_items() self.wf.write(items, items_output, name="item-writer") properties_output = self.wikidata_properties() self.wf.write(properties, properties_output, name="property-writer") return items_output, properties_output
def wikidata(self, dump=None): """Import Wikidata dump to frame format. It takes a Wikidata dump in JSON format as input and converts each item and property to a SLING frame. Returns the item and property output files.""" if dump == None: dump = self.wikidata_dump() with self.wf.namespace("wikidata"): if flags.arg.lbzip2: input = self.wf.pipe("lbzip2 -d -c " + dump.name, name="wiki-decompress", format="text/json") else: input = self.wf.read(dump) input = self.wf.parallel(input, threads=5) items, properties = self.wikidata_import(input) items_output = self.wikidata_items() self.wf.write(items, items_output, name="item-writer") properties_output = self.wikidata_properties() self.wf.write(properties, properties_output, name="property-writer") return items_output, properties_output
Python
def wikipedia_articles(self, language=None): """Resource for wikipedia articles. This is a set of record files where each Wikipedia article is encoded as a SLING document. <wid>: { =<wid> :/wp/page /wp/page/pageid: ... /wp/page/title: "..." lang: /lang/<lang> /wp/page/text: "<Wikipedia page in Wiki markup format>" } """ if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=corpora.wikidir(language), format="records/frame")
def wikipedia_articles(self, language=None): """Resource for wikipedia articles. This is a set of record files where each Wikipedia article is encoded as a SLING document. <wid>: { =<wid> :/wp/page /wp/page/pageid: ... /wp/page/title: "..." lang: /lang/<lang> /wp/page/text: "<Wikipedia page in Wiki markup format>" } """ if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=corpora.wikidir(language), format="records/frame")
Python
def wikipedia_categories(self, language=None): """Resource for wikipedia categories. This is a set of record files where each Wikipedia article is encoded as a SLING document. """ if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=corpora.wikidir(language), format="records/frame")
def wikipedia_categories(self, language=None): """Resource for wikipedia categories. This is a set of record files where each Wikipedia article is encoded as a SLING document. """ if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=corpora.wikidir(language), format="records/frame")
Python
def wikipedia_redirects(self, language=None): """Resource for wikidata redirects. This is encoded as a SLING frame store where each redirect is a SLING frame. { =<wid for redirect page> :/wp/redirect /wp/redirect/pageid: ... /wp/redirect/title: "..." /wp/redirect/link: <wid for target page> } """ if language == None: language = flags.arg.language return self.wf.resource("redirects.sling", dir=corpora.wikidir(language), format="store/frame")
def wikipedia_redirects(self, language=None): """Resource for wikidata redirects. This is encoded as a SLING frame store where each redirect is a SLING frame. { =<wid for redirect page> :/wp/redirect /wp/redirect/pageid: ... /wp/redirect/title: "..." /wp/redirect/link: <wid for target page> } """ if language == None: language = flags.arg.language return self.wf.resource("redirects.sling", dir=corpora.wikidir(language), format="store/frame")
Python
def wikipedia_mapping(self, language=None): """Resource for wikipedia to wikidata mapping. This is a SLING frame store with one frame per Wikipedia article with infomation for mapping it to Wikidata. { =<wid> /w/item/qid: <qid> /w/item/kind: /w/item/kind/... } """ if language == None: language = flags.arg.language return self.wf.resource("mapping.sling", dir=corpora.wikidir(language), format="store/frame")
def wikipedia_mapping(self, language=None): """Resource for wikipedia to wikidata mapping. This is a SLING frame store with one frame per Wikipedia article with infomation for mapping it to Wikidata. { =<wid> /w/item/qid: <qid> /w/item/kind: /w/item/kind/... } """ if language == None: language = flags.arg.language return self.wf.resource("mapping.sling", dir=corpora.wikidir(language), format="store/frame")
Python
def wikipedia_category_documents(self, language=None): """Resource for parsed Wikipedia category documents. """ if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=corpora.wikidir(language), format="records/document")
def wikipedia_category_documents(self, language=None): """Resource for parsed Wikipedia category documents. """ if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=corpora.wikidir(language), format="records/document")
Python
def wikipedia_import(self, input, name=None): """Task for converting Wikipedia dump to SLING articles and redirects. Returns article, categories, and redirect channels.""" task = self.wf.task("wikipedia-importer", name=name) task.attach_input("input", input) articles = self.wf.channel(task, name="articles", format="message/frame") categories = self.wf.channel(task, name="categories", format="message/frame") redirects = self.wf.channel(task, name="redirects", format="message/frame") return articles, categories, redirects
def wikipedia_import(self, input, name=None): """Task for converting Wikipedia dump to SLING articles and redirects. Returns article, categories, and redirect channels.""" task = self.wf.task("wikipedia-importer", name=name) task.attach_input("input", input) articles = self.wf.channel(task, name="articles", format="message/frame") categories = self.wf.channel(task, name="categories", format="message/frame") redirects = self.wf.channel(task, name="redirects", format="message/frame") return articles, categories, redirects
Python
def wikipedia(self, dump=None, language=None): """Convert Wikipedia dump to SLING articles and store them in a set of record files. Returns output resources for articles and redirects.""" if language == None: language = flags.arg.language if dump == None: dump = self.wikipedia_dump(language) with self.wf.namespace(language + "-wikipedia"): # Import Wikipedia dump and convert to SLING format. articles, categories, redirects = self.wikipedia_import(dump) # Write articles. articles_output = self.wikipedia_articles(language) self.wf.write(articles, articles_output, name="article-writer") # Write categories. categories_output = self.wikipedia_categories(language) self.wf.write(categories, categories_output, name="category-writer") # Write redirects. redirects_output = self.wikipedia_redirects(language) self.wf.write(redirects, redirects_output, name="redirect-writer") return articles_output, categories_output, redirects_output
def wikipedia(self, dump=None, language=None): """Convert Wikipedia dump to SLING articles and store them in a set of record files. Returns output resources for articles and redirects.""" if language == None: language = flags.arg.language if dump == None: dump = self.wikipedia_dump(language) with self.wf.namespace(language + "-wikipedia"): # Import Wikipedia dump and convert to SLING format. articles, categories, redirects = self.wikipedia_import(dump) # Write articles. articles_output = self.wikipedia_articles(language) self.wf.write(articles, articles_output, name="article-writer") # Write categories. categories_output = self.wikipedia_categories(language) self.wf.write(categories, categories_output, name="category-writer") # Write redirects. redirects_output = self.wikipedia_redirects(language) self.wf.write(redirects, redirects_output, name="redirect-writer") return articles_output, categories_output, redirects_output
Python
def wikimap(self, wikidata_items=None, language=None, name=None): """Task for building mapping from Wikipedia IDs (<wid>) to Wikidata IDs (<qid>). Returns file with frame store for mapping.""" if language == None: language = flags.arg.language if wikidata_items == None: wikidata_items = self.wikidata_items() wiki_mapping = self.wf.map(wikidata_items, "wikipedia-mapping", params={"language": language}, name=name) output = self.wikipedia_mapping(language) self.wf.write(wiki_mapping, output, name="mapping-writer") return output
def wikimap(self, wikidata_items=None, language=None, name=None): """Task for building mapping from Wikipedia IDs (<wid>) to Wikidata IDs (<qid>). Returns file with frame store for mapping.""" if language == None: language = flags.arg.language if wikidata_items == None: wikidata_items = self.wikidata_items() wiki_mapping = self.wf.map(wikidata_items, "wikipedia-mapping", params={"language": language}, name=name) output = self.wikipedia_mapping(language) self.wf.write(wiki_mapping, output, name="mapping-writer") return output
Python
def parse_wikipedia_articles(self, articles=None, categories=None, redirects=None, commons=None, wikimap=None, language=None): """Task for parsing Wikipedia articles to SLING documents and aliases. Returns channels for documents and aliases.""" if language == None: language = flags.arg.language if articles == None: articles = self.wikipedia_articles(language) if categories == None: categories = self.wikipedia_categories(language) if redirects == None: redirects = self.wikipedia_redirects(language) if commons == None: commons = [ self.language_defs(), self.template_defs(language), self.unit_defs(), self.calendar_defs(), self.country_defs(), ] if wikimap == None: wikimap = self.wikipedia_mapping(language) parser = self.wf.task("wikipedia-document-builder", "wikipedia-documents") parser.add_param("language", language) parser.add_param("skip_tables", True) self.wf.connect(self.wf.read(articles, name="article-reader"), parser) self.wf.connect(self.wf.read(categories, name="category-reader"), parser) parser.attach_input("commons", commons) parser.attach_input("wikimap", wikimap) parser.attach_input("redirects", redirects) documents = self.wf.channel(parser, format="message/document") aliases = self.wf.channel(parser, "aliases", format="message/qid:alias") catdocs = self.wf.channel(parser, "categories", format="message/qid:alias") return documents, aliases, catdocs
def parse_wikipedia_articles(self, articles=None, categories=None, redirects=None, commons=None, wikimap=None, language=None): """Task for parsing Wikipedia articles to SLING documents and aliases. Returns channels for documents and aliases.""" if language == None: language = flags.arg.language if articles == None: articles = self.wikipedia_articles(language) if categories == None: categories = self.wikipedia_categories(language) if redirects == None: redirects = self.wikipedia_redirects(language) if commons == None: commons = [ self.language_defs(), self.template_defs(language), self.unit_defs(), self.calendar_defs(), self.country_defs(), ] if wikimap == None: wikimap = self.wikipedia_mapping(language) parser = self.wf.task("wikipedia-document-builder", "wikipedia-documents") parser.add_param("language", language) parser.add_param("skip_tables", True) self.wf.connect(self.wf.read(articles, name="article-reader"), parser) self.wf.connect(self.wf.read(categories, name="category-reader"), parser) parser.attach_input("commons", commons) parser.attach_input("wikimap", wikimap) parser.attach_input("redirects", redirects) documents = self.wf.channel(parser, format="message/document") aliases = self.wf.channel(parser, "aliases", format="message/qid:alias") catdocs = self.wf.channel(parser, "categories", format="message/qid:alias") return documents, aliases, catdocs
Python
def parse_wikipedia(self, language=None): """Parse Wikipedia articles and build alias table.""" if language == None: language = flags.arg.language with self.wf.namespace(language + "-wikipedia"): with self.wf.namespace("mapping"): # Build mapping from Wikipedia IDs to Wikidata IDs. if not flags.arg.skip_wikipedia_mapping: self.wikimap(language=language) with self.wf.namespace("parsing"): # Parse Wikipedia articles to SLING documents. documents, aliases, catdocs = \ self.parse_wikipedia_articles(language=language) # Write Wikipedia documents. document_output = self.wikipedia_documents(language) self.wf.write(documents, document_output, name="document-writer", params={"indexed": flags.arg.index}) # Write Wikipedia category documents. category_document_output = self.wikipedia_category_documents(language) self.wf.write(catdocs, category_document_output, name="document-writer", params={"indexed": flags.arg.index}) with self.wf.namespace("aliases"): # Collect aliases. alias_output = self.wikipedia_aliases(language) self.wf.reduce(self.wf.shuffle(aliases, len(alias_output)), alias_output, "wikipedia-alias-reducer", params={'language': language}) return document_output, alias_output
def parse_wikipedia(self, language=None): """Parse Wikipedia articles and build alias table.""" if language == None: language = flags.arg.language with self.wf.namespace(language + "-wikipedia"): with self.wf.namespace("mapping"): # Build mapping from Wikipedia IDs to Wikidata IDs. if not flags.arg.skip_wikipedia_mapping: self.wikimap(language=language) with self.wf.namespace("parsing"): # Parse Wikipedia articles to SLING documents. documents, aliases, catdocs = \ self.parse_wikipedia_articles(language=language) # Write Wikipedia documents. document_output = self.wikipedia_documents(language) self.wf.write(documents, document_output, name="document-writer", params={"indexed": flags.arg.index}) # Write Wikipedia category documents. category_document_output = self.wikipedia_category_documents(language) self.wf.write(catdocs, category_document_output, name="document-writer", params={"indexed": flags.arg.index}) with self.wf.namespace("aliases"): # Collect aliases. alias_output = self.wikipedia_aliases(language) self.wf.reduce(self.wf.shuffle(aliases, len(alias_output)), alias_output, "wikipedia-alias-reducer", params={'language': language}) return document_output, alias_output
Python
def merge_wikipedia_categories(self, languages=None): """Merge Wikipedia categories for all languages.""" if languages == None: languages = flags.arg.languages with self.wf.namespace("wikipedia-categories"): documents = [] for language in languages: documents.extend(self.wikipedia_documents(language)) documents.extend(self.wikipedia_category_documents(language)) return self.wf.mapreduce(input=documents, output=self.wikipedia_items(), mapper="category-item-extractor", reducer="category-item-merger", format="message/frame")
def merge_wikipedia_categories(self, languages=None): """Merge Wikipedia categories for all languages.""" if languages == None: languages = flags.arg.languages with self.wf.namespace("wikipedia-categories"): documents = [] for language in languages: documents.extend(self.wikipedia_documents(language)) documents.extend(self.wikipedia_category_documents(language)) return self.wf.mapreduce(input=documents, output=self.wikipedia_items(), mapper="category-item-extractor", reducer="category-item-merger", format="message/frame")
Python
def fused_items(self): """Resource for merged items. This is a set of record files where each item is represented as a frame. """ return self.wf.resource("[email protected]", dir=corpora.wikidir(), format="records/frame")
def fused_items(self): """Resource for merged items. This is a set of record files where each item is represented as a frame. """ return self.wf.resource("[email protected]", dir=corpora.wikidir(), format="records/frame")
Python
def knowledge_base(self): """Resource for knowledge base. This is a SLING frame store with frames for each Wikidata item and property plus additional schema information. """ return self.wf.resource("kb.sling", dir=corpora.wikidir(), format="store/frame")
def knowledge_base(self): """Resource for knowledge base. This is a SLING frame store with frames for each Wikidata item and property plus additional schema information. """ return self.wf.resource("kb.sling", dir=corpora.wikidir(), format="store/frame")
Python
def schema_defs(self): """Resources for schemas included in knowledge base.""" return [ self.language_defs(), self.calendar_defs(), self.country_defs(), self.unit_defs(), self.wikidata_defs(), self.wikipedia_defs() ]
def schema_defs(self): """Resources for schemas included in knowledge base.""" return [ self.language_defs(), self.calendar_defs(), self.country_defs(), self.unit_defs(), self.wikidata_defs(), self.wikipedia_defs() ]
Python
def build_knowledge_base(self, items=None, properties=None, schemas=None): """Task for building knowledge base store with items, properties, and schemas.""" if items == None: items = self.fused_items() if properties == None: properties = self.wikidata_properties() if schemas == None: schemas = self.schema_defs() with self.wf.namespace("wikidata"): # Prune information from Wikidata items. pruned_items = self.wf.map(items, "wikidata-pruner", params={"prune_aliases": True, "prune_wiki_links": True, "prune_category_members": True}) # Collect property catalog. property_catalog = self.wf.map(properties, "wikidata-property-collector") # Collect frames into knowledge base store. parts = self.wf.collect(pruned_items, property_catalog, schemas) return self.wf.write(parts, self.knowledge_base(), params={"snapshot": True})
def build_knowledge_base(self, items=None, properties=None, schemas=None): """Task for building knowledge base store with items, properties, and schemas.""" if items == None: items = self.fused_items() if properties == None: properties = self.wikidata_properties() if schemas == None: schemas = self.schema_defs() with self.wf.namespace("wikidata"): # Prune information from Wikidata items. pruned_items = self.wf.map(items, "wikidata-pruner", params={"prune_aliases": True, "prune_wiki_links": True, "prune_category_members": True}) # Collect property catalog. property_catalog = self.wf.map(properties, "wikidata-property-collector") # Collect frames into knowledge base store. parts = self.wf.collect(pruned_items, property_catalog, schemas) return self.wf.write(parts, self.knowledge_base(), params={"snapshot": True})
Python
def item_names(self, language=None): """Resource for item names in language. This is a set of record files with one SLING frame per item. <qid>: { alias: { name: "<alias>" lang: /lang/<lang> sources: ... count: ... form: ... } ... } """ if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=corpora.wikidir(language), format="records/alias")
def item_names(self, language=None): """Resource for item names in language. This is a set of record files with one SLING frame per item. <qid>: { alias: { name: "<alias>" lang: /lang/<lang> sources: ... count: ... form: ... } ... } """ if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=corpora.wikidir(language), format="records/alias")
Python
def name_table(self, language=None): """Resource for item name table. This is a repository with all the names and the items they are aliases for.""" if language == None: language = flags.arg.language return self.wf.resource("name-table.repo", dir=corpora.wikidir(language), format="repository")
def name_table(self, language=None): """Resource for item name table. This is a repository with all the names and the items they are aliases for.""" if language == None: language = flags.arg.language return self.wf.resource("name-table.repo", dir=corpora.wikidir(language), format="repository")
Python
def build_name_table(self, names=None, language=None): """Build name table for all items.""" if language == None: language = flags.arg.language if names == None: names = self.item_names(language) with self.wf.namespace("name-table"): builder = self.wf.task("name-table-builder") builder.add_param("language", language) self.wf.connect(self.wf.read(names, name="name-reader"), builder) repo = self.name_table(language) builder.attach_output("repository", repo) return repo
def build_name_table(self, names=None, language=None): """Build name table for all items.""" if language == None: language = flags.arg.language if names == None: names = self.item_names(language) with self.wf.namespace("name-table"): builder = self.wf.task("name-table-builder") builder.add_param("language", language) self.wf.connect(self.wf.read(names, name="name-reader"), builder) repo = self.name_table(language) builder.attach_output("repository", repo) return repo
Python
def phrase_table(self, language=None): """Resource for item name phrase table. This is a repository with phrase fingerprints of the item names.""" if language == None: language = flags.arg.language return self.wf.resource("phrase-table.repo", dir=corpora.wikidir(language), format="repository")
def phrase_table(self, language=None): """Resource for item name phrase table. This is a repository with phrase fingerprints of the item names.""" if language == None: language = flags.arg.language return self.wf.resource("phrase-table.repo", dir=corpora.wikidir(language), format="repository")
Python
def build_phrase_table(self, names=None, language=None): """Build phrase table for all items.""" if language == None: language = flags.arg.language if names == None: names = self.item_names(language) with self.wf.namespace("phrase-table"): builder = self.wf.task("phrase-table-builder") builder.add_param("language", language) builder.add_param("transfer_aliases", True) self.wf.connect(self.wf.read(names, name="name-reader"), builder) kb = self.knowledge_base() repo = self.phrase_table(language) builder.attach_input("commons", kb) builder.attach_output("repository", repo) return repo
def build_phrase_table(self, names=None, language=None): """Build phrase table for all items.""" if language == None: language = flags.arg.language if names == None: names = self.item_names(language) with self.wf.namespace("phrase-table"): builder = self.wf.task("phrase-table-builder") builder.add_param("language", language) builder.add_param("transfer_aliases", True) self.wf.connect(self.wf.read(names, name="name-reader"), builder) kb = self.knowledge_base() repo = self.phrase_table(language) builder.attach_input("commons", kb) builder.attach_output("repository", repo) return repo
Python
def silver_documents(self, language=None): """Resource for silver-labeled documents.""" if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=self.workdir(language), format="records/document")
def silver_documents(self, language=None): """Resource for silver-labeled documents.""" if language == None: language = flags.arg.language return self.wf.resource("[email protected]", dir=self.workdir(language), format="records/document")
Python
def contains(needle, haystack) -> bool: """ A syntactic sugar for the following: >>> for item in haystack: >>> if needle == item: >>> return True >>> return False Note that this is very like Python's in operator, however it's not quite same, since in doesn't involve the __eq__ operator at every step! This function for example allows you to circumvent Python's limitations concerning :class:`~satella.coding.structures.ComparableEnum` :param needle: needle to check for :param haystack: haystack to check against :return: whether haystack contains the element """ for item in haystack: if needle == item: return True return False
def contains(needle, haystack) -> bool: """ A syntactic sugar for the following: >>> for item in haystack: >>> if needle == item: >>> return True >>> return False Note that this is very like Python's in operator, however it's not quite same, since in doesn't involve the __eq__ operator at every step! This function for example allows you to circumvent Python's limitations concerning :class:`~satella.coding.structures.ComparableEnum` :param needle: needle to check for :param haystack: haystack to check against :return: whether haystack contains the element """ for item in haystack: if needle == item: return True return False
Python
def close(self) -> bool: """ Check if the resource needs cleanup, and clean up this resource. Use like this: >>> class MyClose(Closeable): >>> def close(self): >>> if super().close(): >>> .. clean up .. :return: whether the cleanup should proceed :raises RuntimeError: the constructor was not invoked """ try: return not self.__finalized except AttributeError: warnings.warn('Attempted to clean up a non-initialized object', UserWarning) finally: self.__finalized = True
def close(self) -> bool: """ Check if the resource needs cleanup, and clean up this resource. Use like this: >>> class MyClose(Closeable): >>> def close(self): >>> if super().close(): >>> .. clean up .. :return: whether the cleanup should proceed :raises RuntimeError: the constructor was not invoked """ try: return not self.__finalized except AttributeError: warnings.warn('Attempted to clean up a non-initialized object', UserWarning) finally: self.__finalized = True
Python
def chain_callables(callable1: tp.Callable, callable2: tp.Callable) -> tp.Callable: """ Link two callables together. callable2, if it takes an argument, will receive callables'1 result, and if it takes no arguments it will received nothing. :param callable1: first callable to call :param callable2: callable to call with callable1's result :return: result of callable2 """ def inner(*args, **kwargs): res = callable1(*args, **kwargs) try: res = callable2(res) except TypeError as e: if 'positional arguments but' in e.args[0] \ and 'was given' in e.args[0] and 'takes' in e.args[0]: res = callable2() else: raise return res return inner
def chain_callables(callable1: tp.Callable, callable2: tp.Callable) -> tp.Callable: """ Link two callables together. callable2, if it takes an argument, will receive callables'1 result, and if it takes no arguments it will received nothing. :param callable1: first callable to call :param callable2: callable to call with callable1's result :return: result of callable2 """ def inner(*args, **kwargs): res = callable1(*args, **kwargs) try: res = callable2(res) except TypeError as e: if 'positional arguments but' in e.args[0] \ and 'was given' in e.args[0] and 'takes' in e.args[0]: res = callable2() else: raise return res return inner
Python
def source_to_function(src: tp.Union[tp.Callable, str]) -> tp.Callable[[tp.Any], tp.Any]: """ If src is callable, return it as-is Transform a string containing a Python expression with a variable x to a lambda. It will be treated as if it was appended to 'lambda x: ' WARNING: Do not run untrusted data. Familiarize yourself with the dangers of passing unvalidated data to exec() or eval()! :param src: a callable or a Python string expression :return: a callable """ if isinstance(src, str): q = dict(globals()) exec('_precond = lambda x: ' + src, q) return q['_precond'] else: return src
def source_to_function(src: tp.Union[tp.Callable, str]) -> tp.Callable[[tp.Any], tp.Any]: """ If src is callable, return it as-is Transform a string containing a Python expression with a variable x to a lambda. It will be treated as if it was appended to 'lambda x: ' WARNING: Do not run untrusted data. Familiarize yourself with the dangers of passing unvalidated data to exec() or eval()! :param src: a callable or a Python string expression :return: a callable """ if isinstance(src, str): q = dict(globals()) exec('_precond = lambda x: ' + src, q) return q['_precond'] else: return src
Python
def update_attr_if_none(obj: object, attr: str, value: tp.Any, on_attribute_error: bool = True, if_value_is_not_none: bool = False) -> object: """ Updates the object attribute, if it's value is None, or if it yields AttributeError (customizable as per on_attribute_error parameter) :param obj: object to alter :param attr: attribute to set :param value: value to set :param on_attribute_error: whether to proceed with setting the value on AttributeError while trying to read given attribute. If False, AttributeError will be raised. :param if_value_is_not_none: update object unconditionally, if only value is not None :return: obj """ if if_value_is_not_none: if value is not None: setattr(obj, attr, value) else: try: val = getattr(obj, attr) if val is None: setattr(obj, attr, value) except AttributeError: if on_attribute_error: setattr(obj, attr, value) else: raise return obj
def update_attr_if_none(obj: object, attr: str, value: tp.Any, on_attribute_error: bool = True, if_value_is_not_none: bool = False) -> object: """ Updates the object attribute, if it's value is None, or if it yields AttributeError (customizable as per on_attribute_error parameter) :param obj: object to alter :param attr: attribute to set :param value: value to set :param on_attribute_error: whether to proceed with setting the value on AttributeError while trying to read given attribute. If False, AttributeError will be raised. :param if_value_is_not_none: update object unconditionally, if only value is not None :return: obj """ if if_value_is_not_none: if value is not None: setattr(obj, attr, value) else: try: val = getattr(obj, attr) if val is None: setattr(obj, attr, value) except AttributeError: if on_attribute_error: setattr(obj, attr, value) else: raise return obj
Python
def update_key_if_true(dictionary: tp.Dict, key: tp.Hashable, value: tp.Any, flag: tp.Union[bool, _BlankType] = _BLANK) -> tp.Dict: """ If flag is True, execute dictionary[key] = value :param dictionary: dictionary to mutate :param key: dictionary key to use :param value: dictionary value to set :param flag: whether to execute the setting operation. If let at default, flag will be calculated from boolean of the value :return: the dict itself """ if flag is _BLANK: flag = bool(value) if flag: dictionary[key] = value return dictionary
def update_key_if_true(dictionary: tp.Dict, key: tp.Hashable, value: tp.Any, flag: tp.Union[bool, _BlankType] = _BLANK) -> tp.Dict: """ If flag is True, execute dictionary[key] = value :param dictionary: dictionary to mutate :param key: dictionary key to use :param value: dictionary value to set :param flag: whether to execute the setting operation. If let at default, flag will be calculated from boolean of the value :return: the dict itself """ if flag is _BLANK: flag = bool(value) if flag: dictionary[key] = value return dictionary
Python
def call_with_arguments(function: tp.Callable, arguments: tp.Dict[str, tp.Any]) -> tp.Any: """ Call a function, but with giving it arguments via a dictionary. Dictionary should be a mapping of argument name to it's value. :param function: function to call :param arguments: a dict of arguments : argument name => argument value. This dictionary will be modified! :return: return value of the function :raise TypeError: too few arguments, or some arguments required were missing :raise ValueError: too many arguments given """ args = [] kwargs = {} for param in signature(function).parameters.values(): param_name = param.name param_kind = param.kind if param_name not in arguments: if param_kind in (Parameter.VAR_KEYWORD, Parameter.VAR_POSITIONAL): continue elif param.default == Parameter.empty: raise TypeError('Argument %s not found' % (param_name,)) else: continue if param_kind == Parameter.POSITIONAL_ONLY or param_kind == Parameter.POSITIONAL_OR_KEYWORD: args.append(arguments.pop(param_name)) elif param_kind == Parameter.VAR_POSITIONAL: args.extend(arguments.pop(param_name)) elif param_kind == Parameter.KEYWORD_ONLY: kwargs[param_name] = arguments.pop(param_name) elif param_kind == Parameter.VAR_KEYWORD: kwargs.update(arguments.pop(param_name)) else: raise TypeError('Unknown parameter type') if arguments: raise ValueError('Too many arguments provided') return function(*args, **kwargs)
def call_with_arguments(function: tp.Callable, arguments: tp.Dict[str, tp.Any]) -> tp.Any: """ Call a function, but with giving it arguments via a dictionary. Dictionary should be a mapping of argument name to it's value. :param function: function to call :param arguments: a dict of arguments : argument name => argument value. This dictionary will be modified! :return: return value of the function :raise TypeError: too few arguments, or some arguments required were missing :raise ValueError: too many arguments given """ args = [] kwargs = {} for param in signature(function).parameters.values(): param_name = param.name param_kind = param.kind if param_name not in arguments: if param_kind in (Parameter.VAR_KEYWORD, Parameter.VAR_POSITIONAL): continue elif param.default == Parameter.empty: raise TypeError('Argument %s not found' % (param_name,)) else: continue if param_kind == Parameter.POSITIONAL_ONLY or param_kind == Parameter.POSITIONAL_OR_KEYWORD: args.append(arguments.pop(param_name)) elif param_kind == Parameter.VAR_POSITIONAL: args.extend(arguments.pop(param_name)) elif param_kind == Parameter.KEYWORD_ONLY: kwargs[param_name] = arguments.pop(param_name) elif param_kind == Parameter.VAR_KEYWORD: kwargs.update(arguments.pop(param_name)) else: raise TypeError('Unknown parameter type') if arguments: raise ValueError('Too many arguments provided') return function(*args, **kwargs)
Python
def assert_preloaded(self, for_ts: int) -> bool: """ Assert every next preloaded value can at least report for for_ts :param for_ts: timestamp to report for :return: whether every value can report for for_ts """ for i, _ in enumerate(self.series): if self.super_next_preloaded_values[i] is None: if self.next_preloaded_values[i][0] > for_ts: return False else: try: while not (self.next_preloaded_values[i][0] <= for_ts < self.super_next_preloaded_values[i][0]): self.advance(i) except TypeError: # we've reached the finale if self.next_preloaded_values[i][0] > for_ts: return False return True
def assert_preloaded(self, for_ts: int) -> bool: """ Assert every next preloaded value can at least report for for_ts :param for_ts: timestamp to report for :return: whether every value can report for for_ts """ for i, _ in enumerate(self.series): if self.super_next_preloaded_values[i] is None: if self.next_preloaded_values[i][0] > for_ts: return False else: try: while not (self.next_preloaded_values[i][0] <= for_ts < self.super_next_preloaded_values[i][0]): self.advance(i) except TypeError: # we've reached the finale if self.next_preloaded_values[i][0] > for_ts: return False return True
Python
def assert_have_timestamps(self) -> None: """ Assert that self.timestamps is not empty, or raise StopIteration if it can't be filled in """ if self.timestamps: return if all(map(_is_x_none, self.super_next_preloaded_values)): raise StopIteration('sequence exhausted') self.next() if not self.timestamps: raise StopIteration('cannot advance series anymore')
def assert_have_timestamps(self) -> None: """ Assert that self.timestamps is not empty, or raise StopIteration if it can't be filled in """ if self.timestamps: return if all(map(_is_x_none, self.super_next_preloaded_values)): raise StopIteration('sequence exhausted') self.next() if not self.timestamps: raise StopIteration('cannot advance series anymore')
Python
def silence_excs(*exc_types: ExceptionClassType, returns=None, returns_factory: tp.Optional[NoArgCallable[tp.Any]] = None): """ Silence given exception types. Can be either a decorator or a context manager. If you are using it as a decorator, you can specify what value should the function return by using the returns kwarg: >>> @silence_excs(KeyError, returns=5) >>> def returns_5(): >>> raise KeyError() >>> assert returns_5() == 5 Or if you want to you can specify a callable that will return the value you want to return >>> @silence_excs(KeyError, returns_factory=lambda: 5) >>> def returns_5(): >>> raise KeyError() >>> assert returns_5() == 5 :raises ValueError: you gave both returns and returns_factory. You can only pass one of them! """ return rethrow_as(exc_types, None, returns=returns, returns_factory=returns_factory)
def silence_excs(*exc_types: ExceptionClassType, returns=None, returns_factory: tp.Optional[NoArgCallable[tp.Any]] = None): """ Silence given exception types. Can be either a decorator or a context manager. If you are using it as a decorator, you can specify what value should the function return by using the returns kwarg: >>> @silence_excs(KeyError, returns=5) >>> def returns_5(): >>> raise KeyError() >>> assert returns_5() == 5 Or if you want to you can specify a callable that will return the value you want to return >>> @silence_excs(KeyError, returns_factory=lambda: 5) >>> def returns_5(): >>> raise KeyError() >>> assert returns_5() == 5 :raises ValueError: you gave both returns and returns_factory. You can only pass one of them! """ return rethrow_as(exc_types, None, returns=returns, returns_factory=returns_factory)
Python
def analyze_exception(self, e, args, kwargs) -> bool: """Return whether the exception has been logged""" if isinstance(e, self.exc_types): format_dict = {'args': args, 'kwargs': kwargs} if self.locals is not None: format_dict.update(self.locals) format_dict['e'] = e self.logger.log(self.severity, self.format_string.format(**format_dict), exc_info=e) return True return False
def analyze_exception(self, e, args, kwargs) -> bool: """Return whether the exception has been logged""" if isinstance(e, self.exc_types): format_dict = {'args': args, 'kwargs': kwargs} if self.locals is not None: format_dict.update(self.locals) format_dict['e'] = e self.logger.log(self.severity, self.format_string.format(**format_dict), exc_info=e) return True return False
Python
def catch_exception(exc_class: tp.Union[ExceptionClassType, tp.Tuple[ExceptionClassType, ...]], clb: NoArgCallable[tp.Optional[T]], return_instead: tp.Optional[T] = None, return_value_on_no_exception: bool = False) -> tp.Union[Exception, T]: """ Catch exception of given type and return it. Functionally equivalent to: >>> try: >>> v = clb() >>> if return_value_on_no_exception: >>> return v >>> except exc_class as e: >>> if return_instead: >>> return return_instead >>> return e If a different class of exception is caught, it will be propagated. :param exc_class: Exception classes to catch :param clb: callable/0 to call to raise the exception :param return_instead: what to return instead of the function result if it didn't end in an exception :param return_value_on_no_exception: whether to return the function result if exception didn't happen :raises ValueError: an exception was not thrown """ try: result = clb() except exc_class as e: return e if return_instead is not None: return return_instead if return_value_on_no_exception: return result raise ValueError('Callable executed without error')
def catch_exception(exc_class: tp.Union[ExceptionClassType, tp.Tuple[ExceptionClassType, ...]], clb: NoArgCallable[tp.Optional[T]], return_instead: tp.Optional[T] = None, return_value_on_no_exception: bool = False) -> tp.Union[Exception, T]: """ Catch exception of given type and return it. Functionally equivalent to: >>> try: >>> v = clb() >>> if return_value_on_no_exception: >>> return v >>> except exc_class as e: >>> if return_instead: >>> return return_instead >>> return e If a different class of exception is caught, it will be propagated. :param exc_class: Exception classes to catch :param clb: callable/0 to call to raise the exception :param return_instead: what to return instead of the function result if it didn't end in an exception :param return_value_on_no_exception: whether to return the function result if exception didn't happen :raises ValueError: an exception was not thrown """ try: result = clb() except exc_class as e: return e if return_instead is not None: return return_instead if return_value_on_no_exception: return result raise ValueError('Callable executed without error')
Python
def max(self) -> T: """ Return maximum element. None elements will be ignored. """ item = self[0, 0] for row in self: for value in row: if value is None: continue if value > item: item = value return item
def max(self) -> T: """ Return maximum element. None elements will be ignored. """ item = self[0, 0] for row in self: for value in row: if value is None: continue if value > item: item = value return item
Python
def min(self) -> T: """ Return minimum element. None elements will be ignored. """ item = self[0, 0] for row in self: for value in row: if value is None: continue if value < item: item = value return item
def min(self) -> T: """ Return minimum element. None elements will be ignored. """ item = self[0, 0] for row in self: for value in row: if value is None: continue if value < item: item = value return item
Python
def append_row(self, y: tp.Iterable[T]) -> None: """ Append a row to the bottom of the matrix :param y: iterable with consequent columns """ next_row = self.no_rows for col_no, z in enumerate(y): self[col_no, next_row] = z
def append_row(self, y: tp.Iterable[T]) -> None: """ Append a row to the bottom of the matrix :param y: iterable with consequent columns """ next_row = self.no_rows for col_no, z in enumerate(y): self[col_no, next_row] = z
Python
def clear(self) -> None: """ Clear the contents of the sparse matrix """ self.rows_dict = collections.defaultdict(lambda: collections.defaultdict(lambda: None)) self.known_column_count = {} # tp.Dict[int, int] column_no => amount self.no_cols = 0 self.no_rows = 0
def clear(self) -> None: """ Clear the contents of the sparse matrix """ self.rows_dict = collections.defaultdict(lambda: collections.defaultdict(lambda: None)) self.known_column_count = {} # tp.Dict[int, int] column_no => amount self.no_cols = 0 self.no_rows = 0
Python
def from_iterable(cls, y: tp.Iterable[tp.Iterable[T]]): """ Construct a sparse matrix given a row-first iterable. That iterable must return another iterable, that will yield values for given column. :param y: an iterable describing the sparse matrix :return: a sparse matrix object """ sm = SparseMatrix() for row_no, cols in enumerate(y): for col_no, value in enumerate(cols): sm[col_no, row_no] = value return sm
def from_iterable(cls, y: tp.Iterable[tp.Iterable[T]]): """ Construct a sparse matrix given a row-first iterable. That iterable must return another iterable, that will yield values for given column. :param y: an iterable describing the sparse matrix :return: a sparse matrix object """ sm = SparseMatrix() for row_no, cols in enumerate(y): for col_no, value in enumerate(cols): sm[col_no, row_no] = value return sm
Python
def shoot(self) -> 'SparseMatrix': """ Insert an empty cell between current cells. So the matrix which looked like [[1, 2], [3, 4]] will now look like [[1, None, 2], [None, None, None], [3, None, 4]] """ new_sparse = SparseMatrix() for row_no, row in enumerate(self): for col_no, value in enumerate(row): new_sparse[col_no * 2, row_no * 2] = value return new_sparse
def shoot(self) -> 'SparseMatrix': """ Insert an empty cell between current cells. So the matrix which looked like [[1, 2], [3, 4]] will now look like [[1, None, 2], [None, None, None], [3, None, 4]] """ new_sparse = SparseMatrix() for row_no, row in enumerate(self): for col_no, value in enumerate(row): new_sparse[col_no * 2, row_no * 2] = value return new_sparse
Python
def delete_row(self, row_no: int) -> None: """ Delete a row with specified number :param row_no: number of the row to delete """ cols = list(self.rows_dict[row_no].keys()) # Copy it here for col_no in cols: del self[col_no, row_no]
def delete_row(self, row_no: int) -> None: """ Delete a row with specified number :param row_no: number of the row to delete """ cols = list(self.rows_dict[row_no].keys()) # Copy it here for col_no in cols: del self[col_no, row_no]
Python
def skip(self, n: int) -> None: """ Advance the pointer by n bytes :param n: bytes to advance :raises NotEnoughBytes: not enough bytes remain in the stream! """ self.assert_has_bytes(n) self.pointer += n
def skip(self, n: int) -> None: """ Advance the pointer by n bytes :param n: bytes to advance :raises NotEnoughBytes: not enough bytes remain in the stream! """ self.assert_has_bytes(n) self.pointer += n
Python
def assert_has_bytes(self, n: int) -> None: """ Assert that we have at least n bytes to consume. This does not advance the pointer. :param n: amount of bytes to consume :raises NotEnoughBytes: not enough bytes remain in the stream! """ if self.length + self.init_ofs < self.pointer + n: raise NotEnoughBytes('Not enough bytes')
def assert_has_bytes(self, n: int) -> None: """ Assert that we have at least n bytes to consume. This does not advance the pointer. :param n: amount of bytes to consume :raises NotEnoughBytes: not enough bytes remain in the stream! """ if self.length + self.init_ofs < self.pointer + n: raise NotEnoughBytes('Not enough bytes')
Python
def reset(self) -> None: """ Reset the internal pointer to starting value :return: """ self.pointer = self.init_ofs
def reset(self) -> None: """ Reset the internal pointer to starting value :return: """ self.pointer = self.init_ofs