language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def resnet34(feat_list, pretrained_model_path): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [3, 4, 6, 3], feat_list=feat_list, pretrained_model_path=pretrained_model_path) return model
def resnet34(feat_list, pretrained_model_path): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [3, 4, 6, 3], feat_list=feat_list, pretrained_model_path=pretrained_model_path) return model
Python
def resnet50(feat_list, pretrained_model_path): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 6, 3], feat_list=feat_list, pretrained_model_path=pretrained_model_path) return model
def resnet50(feat_list, pretrained_model_path): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 6, 3], feat_list=feat_list, pretrained_model_path=pretrained_model_path) return model
Python
def resnet101(feat_list, pretrained_model_path): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 23, 3], feat_list=feat_list, pretrained_model_path=pretrained_model_path) return model
def resnet101(feat_list, pretrained_model_path): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 23, 3], feat_list=feat_list, pretrained_model_path=pretrained_model_path) return model
Python
def resnet152(feat_list, pretrained_model_path): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 8, 36, 3], feat_list=feat_list, pretrained_model_path=pretrained_model_path) return model
def resnet152(feat_list, pretrained_model_path): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 8, 36, 3], feat_list=feat_list, pretrained_model_path=pretrained_model_path) return model
Python
def _get_image_blob(im): """Converts an image into a network input. Arguments: im (ndarray): a color image in BGR order Returns: blob (ndarray): a data blob holding an image pyramid im_scale_factors (list): list of image scales (relative to im) used in the image pyramid """ im_orig = im.astype(np.float32, copy=True) im_orig -= np.array([[[102.9801, 115.9465, 122.7717]]]) im_shape = im_orig.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) processed_ims = [] im_scale_factors = [] for target_size in cfg.SCALES: im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale * im_size_max) > cfg.TEST.COMMON.MAX_SIZE: im_scale = float(cfg.TEST.COMMON.SMAX_SIZE) / float(im_size_max) im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) im_scale_factors.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) return blob, np.array(im_scale_factors)
def _get_image_blob(im): """Converts an image into a network input. Arguments: im (ndarray): a color image in BGR order Returns: blob (ndarray): a data blob holding an image pyramid im_scale_factors (list): list of image scales (relative to im) used in the image pyramid """ im_orig = im.astype(np.float32, copy=True) im_orig -= np.array([[[102.9801, 115.9465, 122.7717]]]) im_shape = im_orig.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) processed_ims = [] im_scale_factors = [] for target_size in cfg.SCALES: im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale * im_size_max) > cfg.TEST.COMMON.MAX_SIZE: im_scale = float(cfg.TEST.COMMON.SMAX_SIZE) / float(im_size_max) im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) im_scale_factors.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) return blob, np.array(im_scale_factors)
Python
def rcnn_im_detect(net, im, boxes, feat_list = ()): """Detect object classes in an image given object proposals. Arguments: net (caffe.Net): Fast R-CNN network to use im (ndarray): color image to test (in BGR order) boxes (ndarray): R x 4 array of object proposals or None (for RPN) feat_list: a list that contains feature names you need. (SUPPORT: conv1-conv5, fc, and logit) Returns: scores (ndarray): R x K array of object class scores (K includes background as object category 0) boxes (ndarray): R x (4*K) array of predicted bounding boxes attr_scores (ndarray): R x M array of attribute class scores """ feat_dict = { "conv1": "conv1", "conv2": "res2c", "conv3": "res3b3", "conv4": "res4b22", "conv5": "res5c", "fc":"pool5_flat", "logit":"cls_score" } blobs, im_scales = _get_blobs(im, boxes) # Purpose: save computation resource for duplicated ROIs. if cfg.DEDUP_BOXES > 0: v = np.array([1, 1e3, 1e6, 1e9, 1e12]) hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v) _, index, inv_index = np.unique(hashes, return_index=True, return_inverse=True) blobs['rois'] = blobs['rois'][index, :] boxes = boxes[index, :] im_blob = blobs['data'] blobs['im_info'] = np.array( [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]], dtype=np.float32) # reshape network inputs net.blobs['data'].reshape(*(blobs['data'].shape)) net.blobs['rois'].reshape(*(blobs['rois'].shape)) if 'im_info' in net.blobs: net.blobs['im_info'].reshape(*(blobs['im_info'].shape)) # do forward forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)} forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False) if 'im_info' in net.blobs: forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False) blobs_out = net.forward(**forward_kwargs) feats = [] if len(feat_list) > 0: for f in feat_list: feats.append(net.blobs[feat_dict[f]]) # use softmax estimated probabilities scores = blobs_out['cls_prob'] if cfg.TEST.COMMON.BBOX_REG: # Apply bounding-box regression deltas box_deltas = blobs_out['bbox_pred'] pred_boxes = bbox_transform_inv(boxes, box_deltas) pred_boxes = clip_boxes(pred_boxes, im.shape) else: # Simply repeat the boxes, once for each class pred_boxes = np.tile(boxes, (1, scores.shape[1])) if cfg.DEDUP_BOXES > 0: # Map scores and predictions back to the original set of boxes scores = scores[inv_index, :] pred_boxes = pred_boxes[inv_index, :] if 'attr_prob' in net.blobs: attr_scores = blobs_out['attr_prob'] else: attr_scores = None if 'rel_prob' in net.blobs: rel_scores = blobs_out['rel_prob'] else: rel_scores = None return scores, pred_boxes, attr_scores, rel_scores, feats
def rcnn_im_detect(net, im, boxes, feat_list = ()): """Detect object classes in an image given object proposals. Arguments: net (caffe.Net): Fast R-CNN network to use im (ndarray): color image to test (in BGR order) boxes (ndarray): R x 4 array of object proposals or None (for RPN) feat_list: a list that contains feature names you need. (SUPPORT: conv1-conv5, fc, and logit) Returns: scores (ndarray): R x K array of object class scores (K includes background as object category 0) boxes (ndarray): R x (4*K) array of predicted bounding boxes attr_scores (ndarray): R x M array of attribute class scores """ feat_dict = { "conv1": "conv1", "conv2": "res2c", "conv3": "res3b3", "conv4": "res4b22", "conv5": "res5c", "fc":"pool5_flat", "logit":"cls_score" } blobs, im_scales = _get_blobs(im, boxes) # Purpose: save computation resource for duplicated ROIs. if cfg.DEDUP_BOXES > 0: v = np.array([1, 1e3, 1e6, 1e9, 1e12]) hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v) _, index, inv_index = np.unique(hashes, return_index=True, return_inverse=True) blobs['rois'] = blobs['rois'][index, :] boxes = boxes[index, :] im_blob = blobs['data'] blobs['im_info'] = np.array( [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]], dtype=np.float32) # reshape network inputs net.blobs['data'].reshape(*(blobs['data'].shape)) net.blobs['rois'].reshape(*(blobs['rois'].shape)) if 'im_info' in net.blobs: net.blobs['im_info'].reshape(*(blobs['im_info'].shape)) # do forward forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)} forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False) if 'im_info' in net.blobs: forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False) blobs_out = net.forward(**forward_kwargs) feats = [] if len(feat_list) > 0: for f in feat_list: feats.append(net.blobs[feat_dict[f]]) # use softmax estimated probabilities scores = blobs_out['cls_prob'] if cfg.TEST.COMMON.BBOX_REG: # Apply bounding-box regression deltas box_deltas = blobs_out['bbox_pred'] pred_boxes = bbox_transform_inv(boxes, box_deltas) pred_boxes = clip_boxes(pred_boxes, im.shape) else: # Simply repeat the boxes, once for each class pred_boxes = np.tile(boxes, (1, scores.shape[1])) if cfg.DEDUP_BOXES > 0: # Map scores and predictions back to the original set of boxes scores = scores[inv_index, :] pred_boxes = pred_boxes[inv_index, :] if 'attr_prob' in net.blobs: attr_scores = blobs_out['attr_prob'] else: attr_scores = None if 'rel_prob' in net.blobs: rel_scores = blobs_out['rel_prob'] else: rel_scores = None return scores, pred_boxes, attr_scores, rel_scores, feats
Python
def RelaTransform(Array): '''Transform the number of relationship categories from two to four''' ArrayTran = Array.clone() for i in range(Array.shape[0]): shape = torch.nonzero(Array[i]).max().item() + 1 Label = range(shape) Label_perm = list(itertools.permutations(Label, 2)) for Label_pair in Label_perm: temp = ArrayTran[i, Label_pair[0], Label_pair[1]] if temp != 1 and temp != 2: if check_con(Array[i], Label_pair[0], Label_pair[1], 1): ArrayTran[i, Label_pair[0], Label_pair[1]] = 4 ArrayTran[i, Label_pair[1], Label_pair[0]] = 5 return ArrayTran
def RelaTransform(Array): '''Transform the number of relationship categories from two to four''' ArrayTran = Array.clone() for i in range(Array.shape[0]): shape = torch.nonzero(Array[i]).max().item() + 1 Label = range(shape) Label_perm = list(itertools.permutations(Label, 2)) for Label_pair in Label_perm: temp = ArrayTran[i, Label_pair[0], Label_pair[1]] if temp != 1 and temp != 2: if check_con(Array[i], Label_pair[0], Label_pair[1], 1): ArrayTran[i, Label_pair[0], Label_pair[1]] = 4 ArrayTran[i, Label_pair[1], Label_pair[0]] = 5 return ArrayTran
Python
def crf_single_img(decode_list, ConArray, x_s, x, obj_num_img): ''' x: The unary function of all relationships in an image. shape: [num_relationship, 5] ''' # Q = F.softmax(Q) # print(x.size(0), obj_num_img) ra = 0.5 if x.shape[0] == 0: return x Q = x.detach() # Q = x # Q.requires_grad = False Q = F.softmax(Q, dim=1) # print Q.shape # raise NameError _, MaxRel = torch.max(x, dim=1) # adj_matrix = gen_adj_matrix(MaxRel, obj_num_img) E_p = torch.zeros_like(Q) for r_idx in range(x.size(0)): # r_idx: idx of relationship src1, des1 = decode_list[r_idx] # src1, des1 = decode(r_idx, obj_num_img) for c_idx in range(x.size(1)): # c_idx: idx of relationship class if r_idx % 2 == 0: # This code block adds pair function according symmetry. if c_idx == 2: E_p[r_idx, c_idx] += 2.5 * ra * Q[r_idx + 1, c_idx] # For symmetry of no relationship else: E_p[r_idx, c_idx] += 2.5 * ra * Q[r_idx + 1, abs(c_idx / 3 * 4 - (1 - c_idx % 2))] # For symmetry of father-child and far father- far child else: if c_idx == 2: E_p[r_idx, c_idx] += 2.5 * ra * Q[r_idx - 1, c_idx] # For symmetry of no relationship else: E_p[r_idx, c_idx] += 2.5 * ra * Q[r_idx - 1, abs(c_idx / 3 * 4 - (1 - c_idx % 2))] # For symmetry of father-child and far father- far child if c_idx != 2: for rr_idx in range(x.size(0)): if rr_idx != r_idx: src2, des2 = decode_list[rr_idx] # src2, des2 = decode(rr_idx, obj_num_img) # tmp = adj_matrix[src2, des2] if c_idx == 0: # adj_matrix[src2, des2] = 0 # Penalty for redundant edges if ConArray[src1, src2, 0] and ConArray[des2, des1, 0]: E_p[r_idx, c_idx] -= 1.2 * ra * Q[rr_idx, 0] # Penalty for circles if ConArray[des1, src2, 0] and ConArray[des2, src1, 0]: E_p[r_idx, c_idx] -= 1 * ra * Q[rr_idx, 0] if c_idx == 1: # adj_matrix[src2, des2] = 1 # Penalty for redundant edges if ConArray[src1, src2, 1] and ConArray[des2, des1, 1]: E_p[r_idx, c_idx] -= 1.2 * ra * Q[rr_idx, 1] # Penalty for circles if ConArray[des1, src2, 1] and ConArray[des2, src1, 1]: E_p[r_idx, c_idx] -= 1 * ra * Q[rr_idx, 1] if c_idx == 3: # adj_matrix[src2, des2] = 0 # Awards for near-far match if ConArray[src1, src2, 0] and ConArray[des2, des1, 0]: E_p[r_idx, c_idx] += 4 * ra * Q[rr_idx, 0] if c_idx == 4: # adj_matrix[src2, des2] = 1 # Awards for near-far match if ConArray[src1, src2, 1] and ConArray[des2, des1, 1]: E_p[r_idx, c_idx] += 4 * ra * Q[rr_idx, 1] # adj_matrix[src2, des2] = tmp return x_s + E_p
def crf_single_img(decode_list, ConArray, x_s, x, obj_num_img): ''' x: The unary function of all relationships in an image. shape: [num_relationship, 5] ''' # Q = F.softmax(Q) # print(x.size(0), obj_num_img) ra = 0.5 if x.shape[0] == 0: return x Q = x.detach() # Q = x # Q.requires_grad = False Q = F.softmax(Q, dim=1) # print Q.shape # raise NameError _, MaxRel = torch.max(x, dim=1) # adj_matrix = gen_adj_matrix(MaxRel, obj_num_img) E_p = torch.zeros_like(Q) for r_idx in range(x.size(0)): # r_idx: idx of relationship src1, des1 = decode_list[r_idx] # src1, des1 = decode(r_idx, obj_num_img) for c_idx in range(x.size(1)): # c_idx: idx of relationship class if r_idx % 2 == 0: # This code block adds pair function according symmetry. if c_idx == 2: E_p[r_idx, c_idx] += 2.5 * ra * Q[r_idx + 1, c_idx] # For symmetry of no relationship else: E_p[r_idx, c_idx] += 2.5 * ra * Q[r_idx + 1, abs(c_idx / 3 * 4 - (1 - c_idx % 2))] # For symmetry of father-child and far father- far child else: if c_idx == 2: E_p[r_idx, c_idx] += 2.5 * ra * Q[r_idx - 1, c_idx] # For symmetry of no relationship else: E_p[r_idx, c_idx] += 2.5 * ra * Q[r_idx - 1, abs(c_idx / 3 * 4 - (1 - c_idx % 2))] # For symmetry of father-child and far father- far child if c_idx != 2: for rr_idx in range(x.size(0)): if rr_idx != r_idx: src2, des2 = decode_list[rr_idx] # src2, des2 = decode(rr_idx, obj_num_img) # tmp = adj_matrix[src2, des2] if c_idx == 0: # adj_matrix[src2, des2] = 0 # Penalty for redundant edges if ConArray[src1, src2, 0] and ConArray[des2, des1, 0]: E_p[r_idx, c_idx] -= 1.2 * ra * Q[rr_idx, 0] # Penalty for circles if ConArray[des1, src2, 0] and ConArray[des2, src1, 0]: E_p[r_idx, c_idx] -= 1 * ra * Q[rr_idx, 0] if c_idx == 1: # adj_matrix[src2, des2] = 1 # Penalty for redundant edges if ConArray[src1, src2, 1] and ConArray[des2, des1, 1]: E_p[r_idx, c_idx] -= 1.2 * ra * Q[rr_idx, 1] # Penalty for circles if ConArray[des1, src2, 1] and ConArray[des2, src1, 1]: E_p[r_idx, c_idx] -= 1 * ra * Q[rr_idx, 1] if c_idx == 3: # adj_matrix[src2, des2] = 0 # Awards for near-far match if ConArray[src1, src2, 0] and ConArray[des2, des1, 0]: E_p[r_idx, c_idx] += 4 * ra * Q[rr_idx, 0] if c_idx == 4: # adj_matrix[src2, des2] = 1 # Awards for near-far match if ConArray[src1, src2, 1] and ConArray[des2, des1, 1]: E_p[r_idx, c_idx] += 4 * ra * Q[rr_idx, 1] # adj_matrix[src2, des2] = tmp return x_s + E_p
Python
def forward(self, data_batch): """Applies network layers and ops on input image(s) x. Args: x: input image or batch of images. Shape: [batch,3,300,300]. Return: Depending on phase: test: Variable(tensor) of output class label predictions, confidence score, and corresponding location predictions for each object detected. Shape: [batch,topk,7] train: list of concat outputs from: 1: confidence layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] """ x = data_batch[0] im_info = data_batch[1] gt_boxes = data_batch[2] num_boxes = data_batch[3] rel_mat = data_batch[4] if self.training: self.iter_counter += 1 sources = [] base_feat, x = self.FeatExt(x) base_feat = self.L2Norm(base_feat) sources.append(base_feat) for m in self.extra_conv: x = m(x) sources.append(x) loc, conf = self._get_obj_det_result(sources) SSD_loss_cls, SSD_loss_bbox = 0, 0 if self.training: predictions = ( loc, conf, self.priors.type_as(loc) ) SSD_loss_bbox, SSD_loss_cls = self.criterion(predictions, gt_boxes, num_boxes) conf = self.softmax(conf) # generate object RoIs. obj_rois, obj_num = torch.Tensor([]).type_as(loc), torch.Tensor([]).type_as(num_boxes) # online data if not self.training or (cfg.TRAIN.VMRN.TRAINING_DATA == 'all' or 'online'): obj_rois, obj_num = self._object_detection(self.priors.type_as(loc), conf, loc, self.batch_size, im_info.data) # offline data if self.training and (cfg.TRAIN.VMRN.TRAINING_DATA == 'all' or 'offline'): for i in range(self.batch_size): img_ind = (i * torch.ones(num_boxes[i].item(), 1)).type_as(gt_boxes) obj_rois = torch.cat([obj_rois, torch.cat([img_ind, (gt_boxes[i][:num_boxes[i]])], 1)]) obj_num = torch.cat([obj_num, num_boxes]) obj_labels = torch.Tensor([]).type_as(gt_boxes).long() if obj_rois.size(0) > 0: obj_labels = obj_rois[:, 5] obj_rois = obj_rois[:, :5] VMRN_rel_loss_cls, reg_loss = 0, 0 if (obj_num > 1).sum().item() > 0: rel_cls_score, rel_cls_prob, reg_loss = self._get_rel_det_result(base_feat, obj_rois, obj_num, im_info) if self.training: obj_pair_rel_label = self._generate_rel_labels(obj_rois, gt_boxes, obj_num, rel_mat, rel_cls_prob.size(0)) VMRN_rel_loss_cls = self._rel_det_loss_comp(obj_pair_rel_label.type_as(gt_boxes).long(), rel_cls_score) else: rel_cls_prob = self._rel_cls_prob_post_process(rel_cls_prob) else: rel_cls_prob = torch.Tensor([]).type_as(conf) rel_result = None if not self.training: if obj_rois.numel() > 0: pred_boxes = obj_rois.data[:, 1:5] pred_boxes[:, 0::2] /= im_info[0][3].item() pred_boxes[:, 1::2] /= im_info[0][2].item() rel_result = (pred_boxes, obj_labels, rel_cls_prob.data) else: rel_result = (obj_rois.data, obj_labels, rel_cls_prob.data) return loc, conf, rel_result, SSD_loss_bbox, SSD_loss_cls, VMRN_rel_loss_cls, reg_loss
def forward(self, data_batch): """Applies network layers and ops on input image(s) x. Args: x: input image or batch of images. Shape: [batch,3,300,300]. Return: Depending on phase: test: Variable(tensor) of output class label predictions, confidence score, and corresponding location predictions for each object detected. Shape: [batch,topk,7] train: list of concat outputs from: 1: confidence layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] """ x = data_batch[0] im_info = data_batch[1] gt_boxes = data_batch[2] num_boxes = data_batch[3] rel_mat = data_batch[4] if self.training: self.iter_counter += 1 sources = [] base_feat, x = self.FeatExt(x) base_feat = self.L2Norm(base_feat) sources.append(base_feat) for m in self.extra_conv: x = m(x) sources.append(x) loc, conf = self._get_obj_det_result(sources) SSD_loss_cls, SSD_loss_bbox = 0, 0 if self.training: predictions = ( loc, conf, self.priors.type_as(loc) ) SSD_loss_bbox, SSD_loss_cls = self.criterion(predictions, gt_boxes, num_boxes) conf = self.softmax(conf) # generate object RoIs. obj_rois, obj_num = torch.Tensor([]).type_as(loc), torch.Tensor([]).type_as(num_boxes) # online data if not self.training or (cfg.TRAIN.VMRN.TRAINING_DATA == 'all' or 'online'): obj_rois, obj_num = self._object_detection(self.priors.type_as(loc), conf, loc, self.batch_size, im_info.data) # offline data if self.training and (cfg.TRAIN.VMRN.TRAINING_DATA == 'all' or 'offline'): for i in range(self.batch_size): img_ind = (i * torch.ones(num_boxes[i].item(), 1)).type_as(gt_boxes) obj_rois = torch.cat([obj_rois, torch.cat([img_ind, (gt_boxes[i][:num_boxes[i]])], 1)]) obj_num = torch.cat([obj_num, num_boxes]) obj_labels = torch.Tensor([]).type_as(gt_boxes).long() if obj_rois.size(0) > 0: obj_labels = obj_rois[:, 5] obj_rois = obj_rois[:, :5] VMRN_rel_loss_cls, reg_loss = 0, 0 if (obj_num > 1).sum().item() > 0: rel_cls_score, rel_cls_prob, reg_loss = self._get_rel_det_result(base_feat, obj_rois, obj_num, im_info) if self.training: obj_pair_rel_label = self._generate_rel_labels(obj_rois, gt_boxes, obj_num, rel_mat, rel_cls_prob.size(0)) VMRN_rel_loss_cls = self._rel_det_loss_comp(obj_pair_rel_label.type_as(gt_boxes).long(), rel_cls_score) else: rel_cls_prob = self._rel_cls_prob_post_process(rel_cls_prob) else: rel_cls_prob = torch.Tensor([]).type_as(conf) rel_result = None if not self.training: if obj_rois.numel() > 0: pred_boxes = obj_rois.data[:, 1:5] pred_boxes[:, 0::2] /= im_info[0][3].item() pred_boxes[:, 1::2] /= im_info[0][2].item() rel_result = (pred_boxes, obj_labels, rel_cls_prob.data) else: rel_result = (obj_rois.data, obj_labels, rel_cls_prob.data) return loc, conf, rel_result, SSD_loss_bbox, SSD_loss_cls, VMRN_rel_loss_cls, reg_loss
Python
def _get_image_blob(roidb): """Builds an input blob from the images in the roidb at the specified scales. """ # remember: cv2.imread will load picture in the order of BGR im = cv2.imread(roidb['image']) im = np.rot90(im, roidb['rotated']) def check_and_modify_image(im, roidb): # For some images, the size of PIL.Image.open does not match that of cv2.imread. # For now, this is just an expedient but not the perfect solution. w_r = roidb["width"] h_r = roidb["height"] if w_r == im.shape[0] and h_r == im.shape[1] and w_r != h_r: warnings.warn("The size of PIL.Image.open does not match that of cv2.imread. " "Rotating the image by 90 degrees clockwise. Image: " + roidb["image"]) im = np.rot90(im, 3) assert w_r == im.shape[1] and h_r == im.shape[0] return im im = check_and_modify_image(im, roidb) if len(im.shape) == 2: im = im[:,:,np.newaxis] im = np.concatenate((im,im,im), axis=2) # BGR to RGB if cfg.PRETRAIN_TYPE == "pytorch": im = im[:, :, ::-1] im = im.astype(np.float32, copy=False) return im
def _get_image_blob(roidb): """Builds an input blob from the images in the roidb at the specified scales. """ # remember: cv2.imread will load picture in the order of BGR im = cv2.imread(roidb['image']) im = np.rot90(im, roidb['rotated']) def check_and_modify_image(im, roidb): # For some images, the size of PIL.Image.open does not match that of cv2.imread. # For now, this is just an expedient but not the perfect solution. w_r = roidb["width"] h_r = roidb["height"] if w_r == im.shape[0] and h_r == im.shape[1] and w_r != h_r: warnings.warn("The size of PIL.Image.open does not match that of cv2.imread. " "Rotating the image by 90 degrees clockwise. Image: " + roidb["image"]) im = np.rot90(im, 3) assert w_r == im.shape[1] and h_r == im.shape[0] return im im = check_and_modify_image(im, roidb) if len(im.shape) == 2: im = im[:,:,np.newaxis] im = np.concatenate((im,im,im), axis=2) # BGR to RGB if cfg.PRETRAIN_TYPE == "pytorch": im = im[:, :, ::-1] im = im.astype(np.float32, copy=False) return im
Python
def _log_sum_exp(self,x): """Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers """ x_max = x.data.max() return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max
def _log_sum_exp(self,x): """Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers """ x_max = x.data.max() return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max
Python
def image_path_from_index(self, index): """ Construct an image path from the image's "index" identifier. """ image_path = os.path.join(self._data_path, 'Images', index + 'r' + self._image_ext) assert os.path.exists(image_path), \ 'Path does not exist: {}'.format(image_path) return image_path
def image_path_from_index(self, index): """ Construct an image path from the image's "index" identifier. """ image_path = os.path.join(self._data_path, 'Images', index + 'r' + self._image_ext) assert os.path.exists(image_path), \ 'Path does not exist: {}'.format(image_path) return image_path
Python
def _load_image_set_index(self): """ Load the indexes listed in this dataset's image set file. """ # Example path to image set file: # self._devkit_path + /Cornell/ImageSets/test.txt if isinstance(self._image_set,list): image_index = [] for file in self._image_set: image_set_file = os.path.join(self._data_path, 'ImageSets', self._split, file + '.txt') assert os.path.exists(image_set_file), \ 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index += [x.strip() for x in f.readlines()] else: image_set_file = os.path.join(self._data_path, 'ImageSets', self._split, self._image_set + '.txt') assert os.path.exists(image_set_file), \ 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index = [x.strip() for x in f.readlines()] return image_index
def _load_image_set_index(self): """ Load the indexes listed in this dataset's image set file. """ # Example path to image set file: # self._devkit_path + /Cornell/ImageSets/test.txt if isinstance(self._image_set,list): image_index = [] for file in self._image_set: image_set_file = os.path.join(self._data_path, 'ImageSets', self._split, file + '.txt') assert os.path.exists(image_set_file), \ 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index += [x.strip() for x in f.readlines()] else: image_set_file = os.path.join(self._data_path, 'ImageSets', self._split, self._image_set + '.txt') assert os.path.exists(image_set_file), \ 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index = [x.strip() for x in f.readlines()] return image_index
Python
def _get_default_path(self): """ Return the default path where PASCAL VOC is expected to be installed. """ return os.path.join(cfg.DATA_DIR, 'Cornell')
def _get_default_path(self): """ Return the default path where PASCAL VOC is expected to be installed. """ return os.path.join(cfg.DATA_DIR, 'Cornell')
Python
def _load_annotation(self, index): """ Load image and bounding boxes info from XML file in the PASCAL VOC format. """ pos_filename = os.path.join(self._data_path, 'Annotations', index + 'cpos.txt') neg_filename = os.path.join(self._data_path, 'Annotations', index + 'cneg.txt') grasps = np.loadtxt(pos_filename) non_grasps = np.loadtxt(neg_filename) num_grasps = grasps.shape[0]/4 boxes = np.zeros((num_grasps, 8), dtype=np.float32) # Load object bounding boxes into a data frame. for id in range(num_grasps): # First Line Number fl = 4 * id # check label grasp = np.array([grasps[fl][0], grasps[fl][1], grasps[fl+1][0], grasps[fl+1][1], grasps[fl+2][0], grasps[fl+2][1], grasps[fl+3][0], grasps[fl+3][1]]) checked = ((np.isnan(grasp) > 0).sum() == 0) if checked: # zero based coordinates boxes[id, :] = grasp - 1 keep = boxes.sum(1)>0 obj_boxes = np.expand_dims(self._image_bbox[index], axis=0) return {'grasps': boxes[keep], 'boxes': obj_boxes, 'rotated': 0}
def _load_annotation(self, index): """ Load image and bounding boxes info from XML file in the PASCAL VOC format. """ pos_filename = os.path.join(self._data_path, 'Annotations', index + 'cpos.txt') neg_filename = os.path.join(self._data_path, 'Annotations', index + 'cneg.txt') grasps = np.loadtxt(pos_filename) non_grasps = np.loadtxt(neg_filename) num_grasps = grasps.shape[0]/4 boxes = np.zeros((num_grasps, 8), dtype=np.float32) # Load object bounding boxes into a data frame. for id in range(num_grasps): # First Line Number fl = 4 * id # check label grasp = np.array([grasps[fl][0], grasps[fl][1], grasps[fl+1][0], grasps[fl+1][1], grasps[fl+2][0], grasps[fl+2][1], grasps[fl+3][0], grasps[fl+3][1]]) checked = ((np.isnan(grasp) > 0).sum() == 0) if checked: # zero based coordinates boxes[id, :] = grasp - 1 keep = boxes.sum(1)>0 obj_boxes = np.expand_dims(self._image_bbox[index], axis=0) return {'grasps': boxes[keep], 'boxes': obj_boxes, 'rotated': 0}
Python
def _log_sum_exp(self,x): """Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers """ x_max, _ = x.data.max(dim = 1, keepdim = True) return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max
def _log_sum_exp(self,x): """Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers """ x_max, _ = x.data.max(dim = 1, keepdim = True) return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max
Python
def prepare_roidb(imdb): """Enrich the imdb's roidb by adding some derived quantities that are useful for training. This function precomputes the maximum overlap, taken over ground-truth boxes, between each ROI and each ground-truth box. The class with maximum overlap is also recorded. """ roidb = imdb.roidb if not (imdb.name.startswith('coco') or isinstance(imdb, joint_od)): widths = imdb.widths heights = imdb.heights # xmax,ymax,xmin,ymin = (0,0,300,300) for i in range(len(imdb.image_index)): # if (np.max(roidb[i]['boxes'][:,::2]) > xmax): # xmax = np.max(roidb[i]['boxes'][:,::2]) # if (np.max(roidb[i]['boxes'][:,1::2]) > ymax): # ymax = np.max(roidb[i]['boxes'][:,1::2]) # if (np.min(roidb[i]['boxes'][:,::2]) < xmin): # xmin = np.min(roidb[i]['boxes'][:,::2]) # if (np.min(roidb[i]['boxes'][:,1::2]) < ymin): # ymin = np.min(roidb[i]['boxes'][:,1::2]) roidb[i]['img_id'] = imdb.image_id_at(i) roidb[i]['image'] = imdb.image_path_at(i) if not (imdb.name.startswith('coco') or isinstance(imdb, joint_od)): roidb[i]['width'] = widths[i] roidb[i]['height'] = heights[i] # TODO: There may be replicated img_id for different images. Deal with them! if roidb[i]['img_id'].startswith("coco"): roidb[i]['img_id'] = roidb[i]['img_id'].split("_")[1] elif roidb[i]['img_id'].startswith("vg"): roidb[i]['img_id'] = roidb[i]['img_id'].split("_")[1] # need gt_overlaps as a dense array for argmax if 'gt_overlaps' in roidb[i]: gt_overlaps = roidb[i]['gt_overlaps'].toarray() # max overlap with gt over classes (columns) max_overlaps = gt_overlaps.max(axis=1) # gt class that had the max overlap max_classes = gt_overlaps.argmax(axis=1) roidb[i]['max_classes'] = max_classes roidb[i]['max_overlaps'] = max_overlaps # sanity checks # max overlap of 0 => class should be zero (background) zero_inds = np.where(max_overlaps == 0)[0] assert all(max_classes[zero_inds] == 0) # max overlap > 0 => class should not be zero (must be a fg class) nonzero_inds = np.where(max_overlaps > 0)[0] assert all(max_classes[nonzero_inds] != 0)
def prepare_roidb(imdb): """Enrich the imdb's roidb by adding some derived quantities that are useful for training. This function precomputes the maximum overlap, taken over ground-truth boxes, between each ROI and each ground-truth box. The class with maximum overlap is also recorded. """ roidb = imdb.roidb if not (imdb.name.startswith('coco') or isinstance(imdb, joint_od)): widths = imdb.widths heights = imdb.heights # xmax,ymax,xmin,ymin = (0,0,300,300) for i in range(len(imdb.image_index)): # if (np.max(roidb[i]['boxes'][:,::2]) > xmax): # xmax = np.max(roidb[i]['boxes'][:,::2]) # if (np.max(roidb[i]['boxes'][:,1::2]) > ymax): # ymax = np.max(roidb[i]['boxes'][:,1::2]) # if (np.min(roidb[i]['boxes'][:,::2]) < xmin): # xmin = np.min(roidb[i]['boxes'][:,::2]) # if (np.min(roidb[i]['boxes'][:,1::2]) < ymin): # ymin = np.min(roidb[i]['boxes'][:,1::2]) roidb[i]['img_id'] = imdb.image_id_at(i) roidb[i]['image'] = imdb.image_path_at(i) if not (imdb.name.startswith('coco') or isinstance(imdb, joint_od)): roidb[i]['width'] = widths[i] roidb[i]['height'] = heights[i] # TODO: There may be replicated img_id for different images. Deal with them! if roidb[i]['img_id'].startswith("coco"): roidb[i]['img_id'] = roidb[i]['img_id'].split("_")[1] elif roidb[i]['img_id'].startswith("vg"): roidb[i]['img_id'] = roidb[i]['img_id'].split("_")[1] # need gt_overlaps as a dense array for argmax if 'gt_overlaps' in roidb[i]: gt_overlaps = roidb[i]['gt_overlaps'].toarray() # max overlap with gt over classes (columns) max_overlaps = gt_overlaps.max(axis=1) # gt class that had the max overlap max_classes = gt_overlaps.argmax(axis=1) roidb[i]['max_classes'] = max_classes roidb[i]['max_overlaps'] = max_overlaps # sanity checks # max overlap of 0 => class should be zero (background) zero_inds = np.where(max_overlaps == 0)[0] assert all(max_classes[zero_inds] == 0) # max overlap > 0 => class should not be zero (must be a fg class) nonzero_inds = np.where(max_overlaps > 0)[0] assert all(max_classes[nonzero_inds] != 0)
Python
def prepare(self, obj): """ Downgrade the relevance of international organizations in search results. """ return super(OrgIndex, self).prepare(obj)
def prepare(self, obj): """ Downgrade the relevance of international organizations in search results. """ return super(OrgIndex, self).prepare(obj)
Python
def prepare_population(self, obj): """ Returns the sum of all member nation populations """ members = obj.members.all() if not members: return None return sum([member.population for member in members])
def prepare_population(self, obj): """ Returns the sum of all member nation populations """ members = obj.members.all() if not members: return None return sum([member.population for member in members])
Python
def search(request): """ View function for searching all site content. The form class takes care of querying, filtering, and ordering. """ form = FactSearchForm(request.GET) sq = form.search() sort = form.cleaned_data['sort'] # is_valid called by search method facets = sq.facet_counts() return render(request, "search/result_list.html", RequestContext(request, {"results": sq, 'form': form, 'sort': sort, 'facets': facets}))
def search(request): """ View function for searching all site content. The form class takes care of querying, filtering, and ordering. """ form = FactSearchForm(request.GET) sq = form.search() sort = form.cleaned_data['sort'] # is_valid called by search method facets = sq.facet_counts() return render(request, "search/result_list.html", RequestContext(request, {"results": sq, 'form': form, 'sort': sort, 'facets': facets}))
Python
def query_update(context, field, value=''): """ Replaces the given HTTP GET query paramter with the provided value. """ params = context['request'].GET.copy() params[field] = value return params.urlencode()
def query_update(context, field, value=''): """ Replaces the given HTTP GET query paramter with the provided value. """ params = context['request'].GET.copy() params[field] = value return params.urlencode()
Python
def filter_data(txt): """ Remove unwanted lines from txt. :param txt: input text to be processed :return: new text with unwanted lines removed """ lc = 0 new_txt = '' # Process each line delimitinated by a newline for line in txt.split('\n'): if len(line) > 0: # print(lc, line) lc += 1 if line[0] == '<': # ignore lines beginning with "<" continue if line[0] == '(': # ignore lines beginning with "(" continue new_txt += line + '\n' # add the line to the output text return new_txt
def filter_data(txt): """ Remove unwanted lines from txt. :param txt: input text to be processed :return: new text with unwanted lines removed """ lc = 0 new_txt = '' # Process each line delimitinated by a newline for line in txt.split('\n'): if len(line) > 0: # print(lc, line) lc += 1 if line[0] == '<': # ignore lines beginning with "<" continue if line[0] == '(': # ignore lines beginning with "(" continue new_txt += line + '\n' # add the line to the output text return new_txt
Python
def start_building(base_input_dir, base_output_dir): """ Pre-process all the input files to produce a new directory. :param base_input_dir: location of files to be processed. :param base_output_dir: location of the output files :return: nothing. """ # Check that the input directory exists if not os.path.isdir(base_input_dir): print('***error***, the input directory does not exist:', base_input_dir) return # Create the output directory if it does not exist if not os.path.exists(base_output_dir): os.makedirs(base_output_dir) # Process all the files in each top level directory. There should be one directory for each # language to be processed. for sub_dir in os.listdir(base_input_dir): sub_dir_full_name = os.path.join(base_input_dir, sub_dir) # get the name of the language sub-directory print('Processing directory:', sub_dir_full_name) language_txt = '' # Process each file within the sub-directory for file_name in os.listdir(sub_dir_full_name): full_file_name = os.path.join(sub_dir_full_name, file_name) with open(full_file_name, 'r') as fd: try: txt = fd.read() except UnicodeDecodeError: print('Error on Unicode Decode. File will be ignored:', full_file_name) else: filt_txt = BuildTrainingDataFiles.filter_data(txt) language_txt += filt_txt # Write out a complete text file for this language language_file_name = os.path.join(base_output_dir, 'lang-' + sub_dir + '.txt') with open(language_file_name, 'w') as fh: fh.write(language_txt)
def start_building(base_input_dir, base_output_dir): """ Pre-process all the input files to produce a new directory. :param base_input_dir: location of files to be processed. :param base_output_dir: location of the output files :return: nothing. """ # Check that the input directory exists if not os.path.isdir(base_input_dir): print('***error***, the input directory does not exist:', base_input_dir) return # Create the output directory if it does not exist if not os.path.exists(base_output_dir): os.makedirs(base_output_dir) # Process all the files in each top level directory. There should be one directory for each # language to be processed. for sub_dir in os.listdir(base_input_dir): sub_dir_full_name = os.path.join(base_input_dir, sub_dir) # get the name of the language sub-directory print('Processing directory:', sub_dir_full_name) language_txt = '' # Process each file within the sub-directory for file_name in os.listdir(sub_dir_full_name): full_file_name = os.path.join(sub_dir_full_name, file_name) with open(full_file_name, 'r') as fd: try: txt = fd.read() except UnicodeDecodeError: print('Error on Unicode Decode. File will be ignored:', full_file_name) else: filt_txt = BuildTrainingDataFiles.filter_data(txt) language_txt += filt_txt # Write out a complete text file for this language language_file_name = os.path.join(base_output_dir, 'lang-' + sub_dir + '.txt') with open(language_file_name, 'w') as fh: fh.write(language_txt)
Python
def sentence_to_word_list(sentence: str): """ Return a list of the words in the sentence. Strip punctuation from the beginning and end of each word. :param sentence: a string representation of a sentence of a language :return: a list of the words in the sentence """ sentence = sentence.lower() # convert to lower case sentence = sentence.rstrip('\n') # remove the end of line from the sentence words = sentence.split(' ') # split the sentence into words using a blank space as the word separator new_words = [word.strip('.,()[]-!:?;\\"') for word in words] # remove punctuation return new_words
def sentence_to_word_list(sentence: str): """ Return a list of the words in the sentence. Strip punctuation from the beginning and end of each word. :param sentence: a string representation of a sentence of a language :return: a list of the words in the sentence """ sentence = sentence.lower() # convert to lower case sentence = sentence.rstrip('\n') # remove the end of line from the sentence words = sentence.split(' ') # split the sentence into words using a blank space as the word separator new_words = [word.strip('.,()[]-!:?;\\"') for word in words] # remove punctuation return new_words
Python
def train(self, training_data_dir: str, max_words_per_lang=0, report_freq=0): """ Train the language identification model. The language identification model is a dictionary of probabilities for each word. There is one dictionary per language. The training data directory contains a file for each language. This corpus is used to first count occurrences of words in the language and then compute probabilities of words from the counts. :param training_data_dir: A directory of training files. One file per language. :param max_words_per_lang: The maximum number of words from a language to use for training. A value of 0 will cause all of the data to be used. This can be used to limit the training data for speeding up development testing time. :param training_data_dir: the name of the training data directory :param max_words_per_lang: used to limit the amount of training data used, 0 means no limit :param report_freq: used to control the reporting output during training, 0 means no reporting. :return: returns nothing. """ # Check for the existence of the training directory if not os.path.isdir(training_data_dir): print('training directory does not exists:', training_data_dir) return # save parameters for other function calls self.init_data_dir = training_data_dir # The training directory has a file for each language. Each file contains the corpus of the language. # Process each directory building a word count dictionary for each language. for ix, file in enumerate(os.listdir(training_data_dir)): lang_name = file.split('-')[1].split('.')[0] # get the language's name (string representation) self.lang_total_word_count[lang_name] = {} # counts of the words of each language full_file_name = os.path.join(training_data_dir, file) fh = open(full_file_name, 'r') self.lang_sentence_count[lang_name] = 0 self.lang_total_word_count[lang_name] = 0 self.lang_word_count[lang_name] = {} for sentence in fh: self.lang_sentence_count[lang_name] += 1 # Convert the sentence to a list of words and count each word's occurrence in the language words = self.sentence_to_word_list(sentence) # create a list of words in the sentence for word in words: self.lang_total_word_count[lang_name] += 1 if word in self.lang_word_count[lang_name]: self.lang_word_count[lang_name][word] += 1 # increment the number occurrences in this language else: self.lang_word_count[lang_name][word] = 1 # initial occurrence of this word in the language # Check for early training termination on this language if max_words_per_lang > 0: if len(self.lang_word_count[lang_name].keys()) > max_words_per_lang: break if report_freq > 0 and self.lang_sentence_count[lang_name] % report_freq == 0: print('language:', lang_name, 'words processed:', self.lang_total_word_count[lang_name], 'sentences processed:', self.lang_sentence_count[lang_name]) print('final stats:', lang_name, 'unique words:', len(self.lang_word_count[lang_name].keys()), 'sentences:', self.lang_sentence_count[lang_name]) # Compute the probabilities of each word of a language by dividing the total number of counts of the word # in the language's corpus by the total number of words in the corpus. for lang in self.lang_word_count.keys(): self.lang_word_prob[lang] = {} # word probabilities for each language sum_counts = sum(self.lang_word_count[lang].values()) for word in self.lang_word_count[lang]: self.lang_word_prob[lang][word] = self.lang_word_count[lang][word] / sum_counts # Mark the object as having completed training self.training_complete = True # Compute the out of vocabulary probability by finding the minimum word probability in all languages # and reducing it by a factor. This is used during testing to assign a probability to a word that is not # found in the vocabulary (and hence cannot be estimated). self.out_of_vocab_prob = self.find_minimum_word_prob()
def train(self, training_data_dir: str, max_words_per_lang=0, report_freq=0): """ Train the language identification model. The language identification model is a dictionary of probabilities for each word. There is one dictionary per language. The training data directory contains a file for each language. This corpus is used to first count occurrences of words in the language and then compute probabilities of words from the counts. :param training_data_dir: A directory of training files. One file per language. :param max_words_per_lang: The maximum number of words from a language to use for training. A value of 0 will cause all of the data to be used. This can be used to limit the training data for speeding up development testing time. :param training_data_dir: the name of the training data directory :param max_words_per_lang: used to limit the amount of training data used, 0 means no limit :param report_freq: used to control the reporting output during training, 0 means no reporting. :return: returns nothing. """ # Check for the existence of the training directory if not os.path.isdir(training_data_dir): print('training directory does not exists:', training_data_dir) return # save parameters for other function calls self.init_data_dir = training_data_dir # The training directory has a file for each language. Each file contains the corpus of the language. # Process each directory building a word count dictionary for each language. for ix, file in enumerate(os.listdir(training_data_dir)): lang_name = file.split('-')[1].split('.')[0] # get the language's name (string representation) self.lang_total_word_count[lang_name] = {} # counts of the words of each language full_file_name = os.path.join(training_data_dir, file) fh = open(full_file_name, 'r') self.lang_sentence_count[lang_name] = 0 self.lang_total_word_count[lang_name] = 0 self.lang_word_count[lang_name] = {} for sentence in fh: self.lang_sentence_count[lang_name] += 1 # Convert the sentence to a list of words and count each word's occurrence in the language words = self.sentence_to_word_list(sentence) # create a list of words in the sentence for word in words: self.lang_total_word_count[lang_name] += 1 if word in self.lang_word_count[lang_name]: self.lang_word_count[lang_name][word] += 1 # increment the number occurrences in this language else: self.lang_word_count[lang_name][word] = 1 # initial occurrence of this word in the language # Check for early training termination on this language if max_words_per_lang > 0: if len(self.lang_word_count[lang_name].keys()) > max_words_per_lang: break if report_freq > 0 and self.lang_sentence_count[lang_name] % report_freq == 0: print('language:', lang_name, 'words processed:', self.lang_total_word_count[lang_name], 'sentences processed:', self.lang_sentence_count[lang_name]) print('final stats:', lang_name, 'unique words:', len(self.lang_word_count[lang_name].keys()), 'sentences:', self.lang_sentence_count[lang_name]) # Compute the probabilities of each word of a language by dividing the total number of counts of the word # in the language's corpus by the total number of words in the corpus. for lang in self.lang_word_count.keys(): self.lang_word_prob[lang] = {} # word probabilities for each language sum_counts = sum(self.lang_word_count[lang].values()) for word in self.lang_word_count[lang]: self.lang_word_prob[lang][word] = self.lang_word_count[lang][word] / sum_counts # Mark the object as having completed training self.training_complete = True # Compute the out of vocabulary probability by finding the minimum word probability in all languages # and reducing it by a factor. This is used during testing to assign a probability to a word that is not # found in the vocabulary (and hence cannot be estimated). self.out_of_vocab_prob = self.find_minimum_word_prob()
Python
def sentence_log_prob(self, sentence: str) -> (str, float): """ Find the language which maximizes the sentence probability. :param sentence: a string representing a sentence :return: the language and the probability of the sentence """ words = self.sentence_to_word_list(sentence) # create a list of the words in the sentence # Create a list of language name, log prob pairs pl = [(l, self.sentence_log_prob_from_language(words, l)) for l in self.lang_word_prob.keys()] probs = [p[1] for p in pl] # create a list of log probabilities only max_prob = max(probs) # find the maximum log probability max_i = probs.index(max_prob) # get the index of the maximum return pl[max_i]
def sentence_log_prob(self, sentence: str) -> (str, float): """ Find the language which maximizes the sentence probability. :param sentence: a string representing a sentence :return: the language and the probability of the sentence """ words = self.sentence_to_word_list(sentence) # create a list of the words in the sentence # Create a list of language name, log prob pairs pl = [(l, self.sentence_log_prob_from_language(words, l)) for l in self.lang_word_prob.keys()] probs = [p[1] for p in pl] # create a list of log probabilities only max_prob = max(probs) # find the maximum log probability max_i = probs.index(max_prob) # get the index of the maximum return pl[max_i]
Python
def sentence_log_prob_from_language(self, word_list: [str], lang: str): """ Compute the log probability of a list of words from a given language 'lang'. The log probabilities are summed rather than the probabilities multiplied in order to prevent underflow. :param word_list: input word list :param lang: language to compute probability relative to :return: the log probability of the word list relative to the specified language """ log_prob_sum = 0.0 for word in word_list: if word in self.lang_word_prob[lang]: log_prob = math.log(self.lang_word_prob[lang][word]) else: log_prob = math.log(self.out_of_vocab_prob) log_prob_sum += log_prob return log_prob_sum
def sentence_log_prob_from_language(self, word_list: [str], lang: str): """ Compute the log probability of a list of words from a given language 'lang'. The log probabilities are summed rather than the probabilities multiplied in order to prevent underflow. :param word_list: input word list :param lang: language to compute probability relative to :return: the log probability of the word list relative to the specified language """ log_prob_sum = 0.0 for word in word_list: if word in self.lang_word_prob[lang]: log_prob = math.log(self.lang_word_prob[lang][word]) else: log_prob = math.log(self.out_of_vocab_prob) log_prob_sum += log_prob return log_prob_sum
Python
def find_minimum_word_prob(self): """ Find the minimum word probability over all vocabularies. This function can be used to compute the out_of_vocab_prob from the training data. :return: the minimum probability of all words. """ if not self.training_complete: print('Training is not complete. find_minimum_word_prob_stopping.') return self.out_of_vocab_prob min_probs = [min(self.lang_word_prob[lang].values()) for lang in self.lang_word_prob.keys()] return min(min_probs)
def find_minimum_word_prob(self): """ Find the minimum word probability over all vocabularies. This function can be used to compute the out_of_vocab_prob from the training data. :return: the minimum probability of all words. """ if not self.training_complete: print('Training is not complete. find_minimum_word_prob_stopping.') return self.out_of_vocab_prob min_probs = [min(self.lang_word_prob[lang].values()) for lang in self.lang_word_prob.keys()] return min(min_probs)
Python
def print_most_prob_words(self): """ Print the most probable word of each language. This is useful for testing the 'train' function. """ if not self.training_complete: print('Training is not complete. print_most_prob_words stopping.') return print('Most probable word of each language:') for lang in self.lang_word_count.keys(): max_prob = 0.0 max_prob_word = 'None???' for word in self.lang_word_prob[lang]: if self.lang_word_prob[lang][word] > max_prob: max_prob = self.lang_word_prob[lang][word] max_prob_word = word print('language:', lang, 'word:', max_prob_word, 'prob:', '{0:.4f}'.format(max_prob))
def print_most_prob_words(self): """ Print the most probable word of each language. This is useful for testing the 'train' function. """ if not self.training_complete: print('Training is not complete. print_most_prob_words stopping.') return print('Most probable word of each language:') for lang in self.lang_word_count.keys(): max_prob = 0.0 max_prob_word = 'None???' for word in self.lang_word_prob[lang]: if self.lang_word_prob[lang][word] > max_prob: max_prob = self.lang_word_prob[lang][word] max_prob_word = word print('language:', lang, 'word:', max_prob_word, 'prob:', '{0:.4f}'.format(max_prob))
Python
def _download_rsi_(symbol): """ downloads the Relative Strength Index (RSI) time series of the given ticker symbol The default time frame for comparing up periods to down periods is 14, as in 14 trading days :param symbol: :return: { "Symbol": "ABC", "RSI": { "2018-07-02": "", "2018-06-29": "" } } """ data = {"Symbol": "", "RSI": OrderedDict()} r = requests.get( "https://www.alphavantage.co/query?function=RSI&symbol=" + symbol + "&interval=daily&time_period=14&series_type=close" + "&apikey=" + _api_key_) if r.status_code < 400: _data = r.json() if "Meta Data" in _data: data["Symbol"] = _data["Meta Data"]["1: Symbol"] for date, rsi in _data["Technical Analysis: RSI"].items(): if len(date.split(" ")) == 1: # the RSI for the current date will have a date and time data["RSI"][date] = rsi["RSI"] return data
def _download_rsi_(symbol): """ downloads the Relative Strength Index (RSI) time series of the given ticker symbol The default time frame for comparing up periods to down periods is 14, as in 14 trading days :param symbol: :return: { "Symbol": "ABC", "RSI": { "2018-07-02": "", "2018-06-29": "" } } """ data = {"Symbol": "", "RSI": OrderedDict()} r = requests.get( "https://www.alphavantage.co/query?function=RSI&symbol=" + symbol + "&interval=daily&time_period=14&series_type=close" + "&apikey=" + _api_key_) if r.status_code < 400: _data = r.json() if "Meta Data" in _data: data["Symbol"] = _data["Meta Data"]["1: Symbol"] for date, rsi in _data["Technical Analysis: RSI"].items(): if len(date.split(" ")) == 1: # the RSI for the current date will have a date and time data["RSI"][date] = rsi["RSI"] return data
Python
def from_json(symbol, json_data): """ initializes a time series from the given json data :param symbol: the ticker symbol this time series belongs to :param json_data: { "2018-06-18": { }, "2018-06-19": { } } :return: """ time_series = TimeSeries() time_series.ticker = symbol for date_time, data in json_data.items(): time_series._datetimeStamps_ += [date_time] time_series._intervals_[date_time] = IntervalData.from_json(symbol, date_time, data) time_series._datetimeStamps_ = sorted(time_series._datetimeStamps_) return time_series
def from_json(symbol, json_data): """ initializes a time series from the given json data :param symbol: the ticker symbol this time series belongs to :param json_data: { "2018-06-18": { }, "2018-06-19": { } } :return: """ time_series = TimeSeries() time_series.ticker = symbol for date_time, data in json_data.items(): time_series._datetimeStamps_ += [date_time] time_series._intervals_[date_time] = IntervalData.from_json(symbol, date_time, data) time_series._datetimeStamps_ = sorted(time_series._datetimeStamps_) return time_series
Python
def _load_(tickers=list()): """ loads the locally cached ticker symbols found from our search parameters :param tickers: any already loaded or downloaded ticker symbols :return: the list of locally cached ticker symbols """ print("-- Loading ticker symbols that meet our search criteria") for f in os.listdir(_screener_path_): if f.endswith(".json"): with open(os.path.join(_screener_path_, f)) as f_json: data = json.load(f_json) if "data" not in data: tickers += data else: for d in data["data"]: if not any(d["ticker"] == t["ticker"] for t in tickers): tickers += [d] # tickers = _download_(tickers) tickers = sorted(tickers, key=lambda x: x["ticker"]) _save_(tickers) return tickers
def _load_(tickers=list()): """ loads the locally cached ticker symbols found from our search parameters :param tickers: any already loaded or downloaded ticker symbols :return: the list of locally cached ticker symbols """ print("-- Loading ticker symbols that meet our search criteria") for f in os.listdir(_screener_path_): if f.endswith(".json"): with open(os.path.join(_screener_path_, f)) as f_json: data = json.load(f_json) if "data" not in data: tickers += data else: for d in data["data"]: if not any(d["ticker"] == t["ticker"] for t in tickers): tickers += [d] # tickers = _download_(tickers) tickers = sorted(tickers, key=lambda x: x["ticker"]) _save_(tickers) return tickers
Python
def _save_(tickers): """ caches our list of ticker symbols locally :param tickers: :return: """ with open(_screener_json_, 'w') as screener_json: json.dump(tickers, screener_json, indent=4, sort_keys=True)
def _save_(tickers): """ caches our list of ticker symbols locally :param tickers: :return: """ with open(_screener_json_, 'w') as screener_json: json.dump(tickers, screener_json, indent=4, sort_keys=True)
Python
def _build_model_(inputs, neurons=512, activation_function="tanh", dropout=0.6, loss="mse", optimizer="adam"): """ define the LSTM model. Load the network weights from a previous run if available. https://www.kaggle.com/pablocastilla/predict-stock-prices-with-lstm https://dashee87.github.io/deep%20learning/python/predicting-cryptocurrency-prices-with-deep-learning/ https://medium.com/@siavash_37715/how-to-predict-bitcoin-and-ethereum-price-with-rnn-lstm-in-keras-a6d8ee8a5109 :param inputs: :param neurons: :param activation_function: :param dropout: :param loss: :param optimizer: :return: """ print("-- Building LSTM model") # load previous model if it exists accuracy = 0 filename = "" for f in os.listdir(_oracle_path_): if f.endswith('.hdf5'): if float(os.path.splitext(f)[0].split('-')[2]) > accuracy: filename = f if filename != "": print("checkpoint file: " + filename) model = load_model(os.path.join(_oracle_path_, filename)) else: model = Sequential() model.add(LSTM(neurons, input_shape=(inputs.shape[1], inputs.shape[2]), return_sequences=True, activation=activation_function)) model.add(Dropout(dropout)) model.add(LSTM(neurons, return_sequences=True, activation=activation_function)) model.add(Dropout(dropout)) model.add(LSTM(neurons, return_sequences=True, activation=activation_function)) model.add(Dropout(dropout)) ''' model.add(Dense(units=inputs.shape[2])) ''' # Output time steps = (Input time step s —  Kernel size) / Strides + 1 model.add(Conv1D(filters=inputs.shape[2], # the dimensionality of the output space kernel_size=inputs.shape[1], # the length of the 1D convolution window activation=activation_function)) # https://medium.com/@huangkh19951228/predicting-cryptocurrency-price-with-tensorflow-and-keras-e1674b0dc58a # https://cdn-images-1.medium.com/max/800/1*I4OU7P938Otu95YAR6yMIw.png model.add(LeakyReLU()) model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy']) model.summary() return model
def _build_model_(inputs, neurons=512, activation_function="tanh", dropout=0.6, loss="mse", optimizer="adam"): """ define the LSTM model. Load the network weights from a previous run if available. https://www.kaggle.com/pablocastilla/predict-stock-prices-with-lstm https://dashee87.github.io/deep%20learning/python/predicting-cryptocurrency-prices-with-deep-learning/ https://medium.com/@siavash_37715/how-to-predict-bitcoin-and-ethereum-price-with-rnn-lstm-in-keras-a6d8ee8a5109 :param inputs: :param neurons: :param activation_function: :param dropout: :param loss: :param optimizer: :return: """ print("-- Building LSTM model") # load previous model if it exists accuracy = 0 filename = "" for f in os.listdir(_oracle_path_): if f.endswith('.hdf5'): if float(os.path.splitext(f)[0].split('-')[2]) > accuracy: filename = f if filename != "": print("checkpoint file: " + filename) model = load_model(os.path.join(_oracle_path_, filename)) else: model = Sequential() model.add(LSTM(neurons, input_shape=(inputs.shape[1], inputs.shape[2]), return_sequences=True, activation=activation_function)) model.add(Dropout(dropout)) model.add(LSTM(neurons, return_sequences=True, activation=activation_function)) model.add(Dropout(dropout)) model.add(LSTM(neurons, return_sequences=True, activation=activation_function)) model.add(Dropout(dropout)) ''' model.add(Dense(units=inputs.shape[2])) ''' # Output time steps = (Input time step s —  Kernel size) / Strides + 1 model.add(Conv1D(filters=inputs.shape[2], # the dimensionality of the output space kernel_size=inputs.shape[1], # the length of the 1D convolution window activation=activation_function)) # https://medium.com/@huangkh19951228/predicting-cryptocurrency-price-with-tensorflow-and-keras-e1674b0dc58a # https://cdn-images-1.medium.com/max/800/1*I4OU7P938Otu95YAR6yMIw.png model.add(LeakyReLU()) model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy']) model.summary() return model
Python
def to_vector(self): """ gives a normalized vector representation of this interval :return: [open, high, low, close, volume] """ return [self.open / 25.00, self.high / 25.00, self.low / 25.00, self.close / 25.00, self.volume / 1000000000.0] # self.rsi / 100]
def to_vector(self): """ gives a normalized vector representation of this interval :return: [open, high, low, close, volume] """ return [self.open / 25.00, self.high / 25.00, self.low / 25.00, self.close / 25.00, self.volume / 1000000000.0] # self.rsi / 100]
Python
def to_plot(self): """ gives the ohlc for this interval to be plotted on a candlestick chart :return: [date, open, high, low, close] """ return [mdates.date2num(self.datetimeStamp), self.open, self.high, self.low, self.close]
def to_plot(self): """ gives the ohlc for this interval to be plotted on a candlestick chart :return: [date, open, high, low, close] """ return [mdates.date2num(self.datetimeStamp), self.open, self.high, self.low, self.close]
Python
def from_json(symbol, datetime_stamp, json_data): """ initializes an interval from the given json data :param symbol: the ticker symbol this interval data belongs to :param datetime_stamp: the datetime that starts this interval :param json_data: { "1. open": "", "2. high": "", "3. low": "", "4. close": "", "5. volume": "", "RSI": "" } :return: an initialized instance of IntervalData """ interval_data = IntervalData() interval_data.ticker = symbol interval_data.datetimeStamp = datetime.strptime(datetime_stamp, "%Y-%m-%d") interval_data.open = float(json_data["1. open"]) interval_data.high = float(json_data["2. high"]) interval_data.low = float(json_data["3. low"]) interval_data.close = float(json_data["4. close"]) interval_data.volume = int(json_data["5. volume"]) interval_data.rsi = float(json_data["RSI"]) return interval_data
def from_json(symbol, datetime_stamp, json_data): """ initializes an interval from the given json data :param symbol: the ticker symbol this interval data belongs to :param datetime_stamp: the datetime that starts this interval :param json_data: { "1. open": "", "2. high": "", "3. low": "", "4. close": "", "5. volume": "", "RSI": "" } :return: an initialized instance of IntervalData """ interval_data = IntervalData() interval_data.ticker = symbol interval_data.datetimeStamp = datetime.strptime(datetime_stamp, "%Y-%m-%d") interval_data.open = float(json_data["1. open"]) interval_data.high = float(json_data["2. high"]) interval_data.low = float(json_data["3. low"]) interval_data.close = float(json_data["4. close"]) interval_data.volume = int(json_data["5. volume"]) interval_data.rsi = float(json_data["RSI"]) return interval_data
Python
def from_data(cls, filename, sep=",", labeled=True): """Creates a DataSet from a data file. :param labeled: if file contains row labels, defalts to True :type labeled: bool :param filename: The filename :type filename: str :param sep: attributes separator, defaults to "," :type sep: str, optional :return: A DataSet object :rtype: DataSet """ data = np.genfromtxt(filename, delimiter=sep) # retorna uma matriz numpy, onde se pode extrair # print(data) if labeled: # se tiver labels X = data[:, 0:-1] # os dados serão todos os extraídos, exceto a ultima coluna Y = data[:, -1] # a ultima coluna são as labels dos dados else: # se não tiver labels X = data # então todos os dados extraídos são dados da tabela Y = None # e não temos labels return cls(X, Y)
def from_data(cls, filename, sep=",", labeled=True): """Creates a DataSet from a data file. :param labeled: if file contains row labels, defalts to True :type labeled: bool :param filename: The filename :type filename: str :param sep: attributes separator, defaults to "," :type sep: str, optional :return: A DataSet object :rtype: DataSet """ data = np.genfromtxt(filename, delimiter=sep) # retorna uma matriz numpy, onde se pode extrair # print(data) if labeled: # se tiver labels X = data[:, 0:-1] # os dados serão todos os extraídos, exceto a ultima coluna Y = data[:, -1] # a ultima coluna são as labels dos dados else: # se não tiver labels X = data # então todos os dados extraídos são dados da tabela Y = None # e não temos labels return cls(X, Y)
Python
def from_dataframe(cls, df, ylabel=None): """Creates a DataSet in array form from a pandas dataframe. :param df: pandas dataframe :type df: Dataframe :param ylabel: [description], defaults to None :type ylabel: [type], optional :return: DataSet in array form :rtype: array """ if ylabel and ylabel in df.columns: X = df.loc[:, df.columns != ylabel].to_numpy() y = df.loc[:, ylabel].to_numpy() xnames = list(df.columns) xnames.remove(ylabel) yname = ylabel else: X = df.to_numpy() y = None xnames = list(df.columns) yname = None return cls(X, y, xnames, yname)
def from_dataframe(cls, df, ylabel=None): """Creates a DataSet in array form from a pandas dataframe. :param df: pandas dataframe :type df: Dataframe :param ylabel: [description], defaults to None :type ylabel: [type], optional :return: DataSet in array form :rtype: array """ if ylabel and ylabel in df.columns: X = df.loc[:, df.columns != ylabel].to_numpy() y = df.loc[:, ylabel].to_numpy() xnames = list(df.columns) xnames.remove(ylabel) yname = ylabel else: X = df.to_numpy() y = None xnames = list(df.columns) yname = None return cls(X, y, xnames, yname)
Python
def toDataframe(self): """ Converts the dataset into a pandas DataFrame""" import pandas as pd if self.Y is None: dataset = pd.DataFrame(self.X.copy(), columns=self._xnames[:]) else: dataset = pd.DataFrame(np.hstack((self.X, self.Y.reshape(len(self.Y), 1))), columns=np.hstack((self._xnames, self._yname))) return dataset
def toDataframe(self): """ Converts the dataset into a pandas DataFrame""" import pandas as pd if self.Y is None: dataset = pd.DataFrame(self.X.copy(), columns=self._xnames[:]) else: dataset = pd.DataFrame(np.hstack((self.X, self.Y.reshape(len(self.Y), 1))), columns=np.hstack((self._xnames, self._yname))) return dataset
Python
def namespace_to_data_class(args: Namespace, tuple_type, additional=None) -> Any: """ Automagically determine the arguments for a given dataclass and return an instantiated object. Parameters ---------- args tuple_type additional Returns ------- """ data = dict() for field_name, field_obj in tuple_type.__dataclass_fields__.items(): if type(field_obj.default_factory) != _MISSING_TYPE: data[field_name] = field_obj.default_factory else: data[field_name] = getattr(args, field_name, None) if additional: for key, value in additional.items(): data[key] = value return tuple_type(**data)
def namespace_to_data_class(args: Namespace, tuple_type, additional=None) -> Any: """ Automagically determine the arguments for a given dataclass and return an instantiated object. Parameters ---------- args tuple_type additional Returns ------- """ data = dict() for field_name, field_obj in tuple_type.__dataclass_fields__.items(): if type(field_obj.default_factory) != _MISSING_TYPE: data[field_name] = field_obj.default_factory else: data[field_name] = getattr(args, field_name, None) if additional: for key, value in additional.items(): data[key] = value return tuple_type(**data)
Python
async def handshake( self, wsuri: WebSocketURI, origin: Optional[Origin] = None, available_extensions: Optional[Sequence[ClientExtensionFactory]] = None, available_subprotocols: Optional[Sequence[Subprotocol]] = None, extra_headers: Optional[HeadersLike] = None, ) -> None: """ Unchanged from base client protocol except HTTP req-resp handled by auth flow """ request_headers = Headers() request_headers["Host"] = build_host(wsuri.host, wsuri.port, wsuri.secure) if wsuri.user_info: request_headers["Authorization"] = build_authorization_basic( *wsuri.user_info ) self.auth = Auth() if origin is not None: request_headers["Origin"] = origin key = build_request(request_headers) if available_extensions is not None: extensions_header = build_extension( [ (extension_factory.name, extension_factory.get_request_params()) for extension_factory in available_extensions ] ) request_headers["Sec-WebSocket-Extensions"] = extensions_header if available_subprotocols is not None: protocol_header = build_subprotocol(available_subprotocols) request_headers["Sec-WebSocket-Protocol"] = protocol_header extra_headers = extra_headers or self.extra_headers if extra_headers is not None: request_headers.update(extra_headers) request_headers.setdefault("User-Agent", USER_AGENT) request = (wsuri, request_headers) try: status_code, response_headers = await self.http_handling_auth(request) except BaseException as err: raise NegotiationError("Auth flow failed") from err if status_code in (301, 302, 303, 307, 308): if "Location" not in response_headers: raise InvalidHeader("Location") raise RedirectHandshake(response_headers["Location"]) elif status_code != 101: raise InvalidStatusCode(status_code, response_headers) check_response(response_headers, key) self.extensions = self.process_extensions( response_headers, available_extensions ) self.subprotocol = self.process_subprotocol( response_headers, available_subprotocols ) self.logger.debug("Handshake succeeded") self.connection_open()
async def handshake( self, wsuri: WebSocketURI, origin: Optional[Origin] = None, available_extensions: Optional[Sequence[ClientExtensionFactory]] = None, available_subprotocols: Optional[Sequence[Subprotocol]] = None, extra_headers: Optional[HeadersLike] = None, ) -> None: """ Unchanged from base client protocol except HTTP req-resp handled by auth flow """ request_headers = Headers() request_headers["Host"] = build_host(wsuri.host, wsuri.port, wsuri.secure) if wsuri.user_info: request_headers["Authorization"] = build_authorization_basic( *wsuri.user_info ) self.auth = Auth() if origin is not None: request_headers["Origin"] = origin key = build_request(request_headers) if available_extensions is not None: extensions_header = build_extension( [ (extension_factory.name, extension_factory.get_request_params()) for extension_factory in available_extensions ] ) request_headers["Sec-WebSocket-Extensions"] = extensions_header if available_subprotocols is not None: protocol_header = build_subprotocol(available_subprotocols) request_headers["Sec-WebSocket-Protocol"] = protocol_header extra_headers = extra_headers or self.extra_headers if extra_headers is not None: request_headers.update(extra_headers) request_headers.setdefault("User-Agent", USER_AGENT) request = (wsuri, request_headers) try: status_code, response_headers = await self.http_handling_auth(request) except BaseException as err: raise NegotiationError("Auth flow failed") from err if status_code in (301, 302, 303, 307, 308): if "Location" not in response_headers: raise InvalidHeader("Location") raise RedirectHandshake(response_headers["Location"]) elif status_code != 101: raise InvalidStatusCode(status_code, response_headers) check_response(response_headers, key) self.extensions = self.process_extensions( response_headers, available_extensions ) self.subprotocol = self.process_subprotocol( response_headers, available_subprotocols ) self.logger.debug("Handshake succeeded") self.connection_open()
Python
async def http_handling_auth( self, request: Request ) -> Response: """Create auth flow generator and execute HTTP requests""" requires_response_body = self.auth.requires_response_body auth_flow = self.auth.async_auth_flow(request) interface = HTTPInterface(self) try: request = await auth_flow.__anext__() while True: response = await interface.handle_async_request(request) # We dont want the auth flow to continue in the event of # a redirect status_code = response[0] if status_code in (301, 302, 303, 307, 308): return response[:2] if requires_response_body: content = await interface.receive_body() response = (*response, content) try: try: next_request = await auth_flow.asend(response) except StopAsyncIteration: return response[:2] request = next_request except Exception as err: raise err await interface.start_next_cycle() finally: interface.teardown() await auth_flow.aclose()
async def http_handling_auth( self, request: Request ) -> Response: """Create auth flow generator and execute HTTP requests""" requires_response_body = self.auth.requires_response_body auth_flow = self.auth.async_auth_flow(request) interface = HTTPInterface(self) try: request = await auth_flow.__anext__() while True: response = await interface.handle_async_request(request) # We dont want the auth flow to continue in the event of # a redirect status_code = response[0] if status_code in (301, 302, 303, 307, 308): return response[:2] if requires_response_body: content = await interface.receive_body() response = (*response, content) try: try: next_request = await auth_flow.asend(response) except StopAsyncIteration: return response[:2] request = next_request except Exception as err: raise err await interface.start_next_cycle() finally: interface.teardown() await auth_flow.aclose()
Python
async def async_auth_flow(self, request: Request) -> AsyncGenerator[Request, Response]: """ Execute the authentication flow asynchronously. By default, this defers to `.auth_flow()`. You should override this method when the authentication scheme does I/O and/or uses concurrency primitives. """ flow = self.auth_flow(request) request = next(flow) while True: response = yield request try: request = flow.send(response) except StopIteration: break
async def async_auth_flow(self, request: Request) -> AsyncGenerator[Request, Response]: """ Execute the authentication flow asynchronously. By default, this defers to `.auth_flow()`. You should override this method when the authentication scheme does I/O and/or uses concurrency primitives. """ flow = self.auth_flow(request) request = next(flow) while True: response = yield request try: request = flow.send(response) except StopIteration: break
Python
async def handle_async_request(self, request: Request) -> Response: """Send request and receive header portion of response""" wsuri: WebSocketURI = request[0] headers: Headers = request[1] target = wsuri.resource_name raw_request = self.prep_request(target, headers) # send request receive headers await self.send_request(raw_request) event = await self.receive_headers() # check HTTP version returned from server, must be 1.1 if event.http_version != b"1.1": msg = f"Invalid HTTP protocol: HTTP {event.http_version.decode()}" raise h11.RemoteProtocolError(msg) status_code = event.status_code raw_header = event.headers # decode and construct response headers response_headers = Headers() for pair in raw_header: header = pair[0].decode() val = pair[1].decode() response_headers[header] = val # get ssl socket if this is TLS connection. Can be helpful in some auth flows transport = self.transport if transport is not None: ssl_socket: Union[ssl.SSLSocket, None] = transport.get_extra_info("ssl_object") return status_code, response_headers, request, ssl_socket
async def handle_async_request(self, request: Request) -> Response: """Send request and receive header portion of response""" wsuri: WebSocketURI = request[0] headers: Headers = request[1] target = wsuri.resource_name raw_request = self.prep_request(target, headers) # send request receive headers await self.send_request(raw_request) event = await self.receive_headers() # check HTTP version returned from server, must be 1.1 if event.http_version != b"1.1": msg = f"Invalid HTTP protocol: HTTP {event.http_version.decode()}" raise h11.RemoteProtocolError(msg) status_code = event.status_code raw_header = event.headers # decode and construct response headers response_headers = Headers() for pair in raw_header: header = pair[0].decode() val = pair[1].decode() response_headers[header] = val # get ssl socket if this is TLS connection. Can be helpful in some auth flows transport = self.transport if transport is not None: ssl_socket: Union[ssl.SSLSocket, None] = transport.get_extra_info("ssl_object") return status_code, response_headers, request, ssl_socket
Python
def prep_request(self, target: str, headers: Headers) -> bytes: """Update state and prep request to be sent over wire""" logger.debug("Starting req-resp - %s", self.cycle_state) event = h11.Request( method="GET", target=target, headers=[(header, val) for header, val in headers.raw_items()] ) return self.state.send(event)
def prep_request(self, target: str, headers: Headers) -> bytes: """Update state and prep request to be sent over wire""" logger.debug("Starting req-resp - %s", self.cycle_state) event = h11.Request( method="GET", target=target, headers=[(header, val) for header, val in headers.raw_items()] ) return self.state.send(event)
Python
async def receive_headers(self) -> h11.Response: """Read only response headers and leave body buffered""" while True: event = await self.receive_event() if isinstance(event, (h11.Response, h11.InformationalResponse)): logger.debug("Received headers - %s", self.cycle_state) return event
async def receive_headers(self) -> h11.Response: """Read only response headers and leave body buffered""" while True: event = await self.receive_event() if isinstance(event, (h11.Response, h11.InformationalResponse)): logger.debug("Received headers - %s", self.cycle_state) return event
Python
async def receive_body(self) -> bytes: """ Receive the response body. This will be called if we are going to do another request-response cycle or the response body is required by the auth flow. """ content = b'' while True: event = await self.receive_event() if isinstance(event, h11.Data): content += bytes(event.data) elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): logger.debug("Received body - %s", self.cycle_state) self.stream_consumed = True return content
async def receive_body(self) -> bytes: """ Receive the response body. This will be called if we are going to do another request-response cycle or the response body is required by the auth flow. """ content = b'' while True: event = await self.receive_event() if isinstance(event, h11.Data): content += bytes(event.data) elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): logger.debug("Received body - %s", self.cycle_state) self.stream_consumed = True return content
Python
async def receive_event(self) -> bytes: """Receives chunk of data from reader""" reader = self.reader if reader is not None: while True: event = self.state.next_event() if event is h11.NEED_DATA: try: data = await self.reader.readline() except ValueError as err: raise err if data == b"" and self.state.their_state == h11.SEND_RESPONSE: msg = "Server disconnected without sending a response." raise h11.RemoteProtocolError(msg) self.state.receive_data(data) else: return event
async def receive_event(self) -> bytes: """Receives chunk of data from reader""" reader = self.reader if reader is not None: while True: event = self.state.next_event() if event is h11.NEED_DATA: try: data = await self.reader.readline() except ValueError as err: raise err if data == b"" and self.state.their_state == h11.SEND_RESPONSE: msg = "Server disconnected without sending a response." raise h11.RemoteProtocolError(msg) self.state.receive_data(data) else: return event
Python
async def start_next_cycle(self) -> None: """Reset internal state. Consume response body if cycle not complete""" if (self.state.our_state is h11.DONE and self.state.their_state is h11.DONE): self.state.start_next_cycle() elif not self.stream_consumed: # if the body wasnt consumed already then we dont need it await self.receive_body() await self.start_next_cycle() else: msg = f"Incorrect state - {self.cycle_state})" raise h11.LocalProtocolError(msg)
async def start_next_cycle(self) -> None: """Reset internal state. Consume response body if cycle not complete""" if (self.state.our_state is h11.DONE and self.state.their_state is h11.DONE): self.state.start_next_cycle() elif not self.stream_consumed: # if the body wasnt consumed already then we dont need it await self.receive_body() await self.start_next_cycle() else: msg = f"Incorrect state - {self.cycle_state})" raise h11.LocalProtocolError(msg)
Python
def teardown(self) -> None: """Release all references to protocol and remove state""" self.transport_wr = None self.reader_wr = None self.state = None self.stream_consumed = False
def teardown(self) -> None: """Release all references to protocol and remove state""" self.transport_wr = None self.reader_wr = None self.state = None self.stream_consumed = False
Python
def create_operator(self, mesh, particles, repr_format=constants.StateRepresentationFormatCoinPosition): """Build the interaction operator for a quantum walk. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
def create_operator(self, mesh, particles, repr_format=constants.StateRepresentationFormatCoinPosition): """Build the interaction operator for a quantum walk. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
Python
def generate(self, edges, perc_mode=constants.PercolationsGenerationModeBroadcast): """Generate mesh percolations. Parameters ---------- edges : int Number of edges of a mesh. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
def generate(self, edges, perc_mode=constants.PercolationsGenerationModeBroadcast): """Generate mesh percolations. Parameters ---------- edges : int Number of edges of a mesh. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
Python
def sparsity(self): """Calculate the sparsity of this matrix. Returns ------- float The sparsity of this matrix. """ nelem = self._nelem if nelem is None: self._logger.warning( "this matrix will be considered as dense as it has not had its number of elements defined") nelem = self._size return 1.0 - nelem / self._size
def sparsity(self): """Calculate the sparsity of this matrix. Returns ------- float The sparsity of this matrix. """ nelem = self._nelem if nelem is None: self._logger.warning( "this matrix will be considered as dense as it has not had its number of elements defined") nelem = self._size return 1.0 - nelem / self._size
Python
def dump(self, mode, glue=' ', path=None, codec=None, filename=None): """Dump this object's RDD to disk in a unique file or in many part-* files. Notes ----- Depending on the chosen dumping mode, this method calls the :py:func:`pyspark.RDD.collect` method. This is not suitable for large working sets, as all data may not fit into driver's main memory. Parameters ---------- mode : int Storage mode used to dump this state. glue : str, optional The glue string that connects each component of each element in the RDD. Default value is ' '. codec : str, optional Codec name used to compress the dumped data. Default value is None. filename : str, optional The full path with file name used when the dumping mode is in a single file. Default value is None. Raises ------ NotImplementedError If the coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault`. ValueError If the chosen dumping mode is not valid. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") rdd = self.clear().data.map( lambda m: glue.join((str(m[0]), str(m[1]), str(m[2]))) ) if mode == constants.DumpingModeUniqueFile: data = rdd.collect() with open(filename, 'a') as f: for d in data: f.write(d + "\n") elif mode == constants.DumpingModePartFiles: rdd.saveAsTextFile(path, codec) else: self._logger.error("invalid dumping mode") raise ValueError("invalid dumping mode")
def dump(self, mode, glue=' ', path=None, codec=None, filename=None): """Dump this object's RDD to disk in a unique file or in many part-* files. Notes ----- Depending on the chosen dumping mode, this method calls the :py:func:`pyspark.RDD.collect` method. This is not suitable for large working sets, as all data may not fit into driver's main memory. Parameters ---------- mode : int Storage mode used to dump this state. glue : str, optional The glue string that connects each component of each element in the RDD. Default value is ' '. codec : str, optional Codec name used to compress the dumped data. Default value is None. filename : str, optional The full path with file name used when the dumping mode is in a single file. Default value is None. Raises ------ NotImplementedError If the coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault`. ValueError If the chosen dumping mode is not valid. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") rdd = self.clear().data.map( lambda m: glue.join((str(m[0]), str(m[1]), str(m[2]))) ) if mode == constants.DumpingModeUniqueFile: data = rdd.collect() with open(filename, 'a') as f: for d in data: f.write(d + "\n") elif mode == constants.DumpingModePartFiles: rdd.saveAsTextFile(path, codec) else: self._logger.error("invalid dumping mode") raise ValueError("invalid dumping mode")
Python
def ndarray(self): """Create a Numpy array containing this object's RDD data. Notes ----- This method calls the :py:func:`pyspark.RDD.collect` method. This is not suitable for large working sets, as all data may not fit into main memory. Returns ------- :py:class:`numpy.ndarray` The Numpy array. Raises ------ NotImplementedError If this object's coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault`.. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") data = self.clear().data.collect() result = np.zeros(self._shape, dtype=self._dtype) for e in data: result[e[0], e[1]] = e[2] return result
def ndarray(self): """Create a Numpy array containing this object's RDD data. Notes ----- This method calls the :py:func:`pyspark.RDD.collect` method. This is not suitable for large working sets, as all data may not fit into main memory. Returns ------- :py:class:`numpy.ndarray` The Numpy array. Raises ------ NotImplementedError If this object's coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault`.. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") data = self.clear().data.collect() result = np.zeros(self._shape, dtype=self._dtype) for e in data: result[e[0], e[1]] = e[2] return result
Python
def clear(self): """Remove possible zero entries of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` A new matrix object. Raises ------ NotImplementedError If this object's coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault`.. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") zero = self._dtype() rdd = self._data.filter( lambda m: m[2] is not None and m[2] != zero ) return Matrix(rdd, self._shape, dtype=self._dtype, coord_format=self._coord_format, nelem=self._nelem)
def clear(self): """Remove possible zero entries of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` A new matrix object. Raises ------ NotImplementedError If this object's coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault`.. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") zero = self._dtype() rdd = self._data.filter( lambda m: m[2] is not None and m[2] != zero ) return Matrix(rdd, self._shape, dtype=self._dtype, coord_format=self._coord_format, nelem=self._nelem)
Python
def copy(self): """Make a copy of this object. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` A new matrix object. """ rdd = self._data.map( lambda m: m ) return Matrix(rdd, self._shape, dtype=self._dtype, coord_format=self._coord_format, nelem=self._nelem)
def copy(self): """Make a copy of this object. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` A new matrix object. """ rdd = self._data.map( lambda m: m ) return Matrix(rdd, self._shape, dtype=self._dtype, coord_format=self._coord_format, nelem=self._nelem)
Python
def to_coordinate(self, coord_format): """Change the coordinate format of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created in the desired coordinate format. Parameters ---------- coord_format : int The new coordinate format for this object. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` A new matrix object with the RDD in the desired coordinate format. """ if self._coord_format == coord_format: return self rdd = self._data if self._coord_format != constants.MatrixCoordinateDefault: if self._coord_format == constants.MatrixCoordinateMultiplier: rdd = rdd.map( lambda m: (m[1][0], m[0], m[1][1]) ) elif self._coord_format == constants.MatrixCoordinateMultiplicand: rdd = rdd.map( lambda m: (m[0], m[1][0], m[1][1]) ) elif self._coord_format == constants.MatrixCoordinateIndexed: rdd = rdd.map( lambda m: (m[0][0], m[0][1], m[1]) ) else: raise ValueError("invalid coordinate format") if coord_format != constants.MatrixCoordinateDefault: if coord_format == constants.MatrixCoordinateMultiplier: rdd = rdd.map( lambda m: (m[1], (m[0], m[2])) ) elif coord_format == constants.MatrixCoordinateMultiplicand: rdd = rdd.map( lambda m: (m[0], (m[1], m[2])) ) elif coord_format == constants.MatrixCoordinateIndexed: rdd = rdd.map( lambda m: ((m[0], m[1]), m[2]) ) else: raise ValueError("invalid coordinate format") return Matrix(rdd, self._shape, dtype=self._dtype, coord_format=coord_format, nelem=self._nelem)
def to_coordinate(self, coord_format): """Change the coordinate format of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created in the desired coordinate format. Parameters ---------- coord_format : int The new coordinate format for this object. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` A new matrix object with the RDD in the desired coordinate format. """ if self._coord_format == coord_format: return self rdd = self._data if self._coord_format != constants.MatrixCoordinateDefault: if self._coord_format == constants.MatrixCoordinateMultiplier: rdd = rdd.map( lambda m: (m[1][0], m[0], m[1][1]) ) elif self._coord_format == constants.MatrixCoordinateMultiplicand: rdd = rdd.map( lambda m: (m[0], m[1][0], m[1][1]) ) elif self._coord_format == constants.MatrixCoordinateIndexed: rdd = rdd.map( lambda m: (m[0][0], m[0][1], m[1]) ) else: raise ValueError("invalid coordinate format") if coord_format != constants.MatrixCoordinateDefault: if coord_format == constants.MatrixCoordinateMultiplier: rdd = rdd.map( lambda m: (m[1], (m[0], m[2])) ) elif coord_format == constants.MatrixCoordinateMultiplicand: rdd = rdd.map( lambda m: (m[0], (m[1], m[2])) ) elif coord_format == constants.MatrixCoordinateIndexed: rdd = rdd.map( lambda m: ((m[0], m[1]), m[2]) ) else: raise ValueError("invalid coordinate format") return Matrix(rdd, self._shape, dtype=self._dtype, coord_format=coord_format, nelem=self._nelem)
Python
def norm(self): """Calculate the norm of this matrix. Returns ------- float The norm of this matrix. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") if self._dtype == complex: def __map(m): return m[2].real ** 2 + m[2].imag ** 2 else: def __map(m): return m[2] ** 2 n = self._data.map( __map ).reduce( lambda a, b: a + b ) return math.sqrt(n)
def norm(self): """Calculate the norm of this matrix. Returns ------- float The norm of this matrix. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") if self._dtype == complex: def __map(m): return m[2].real ** 2 + m[2].imag ** 2 else: def __map(m): return m[2] ** 2 n = self._data.map( __map ).reduce( lambda a, b: a + b ) return math.sqrt(n)
Python
def is_unitary(self): """Check if this matrix is unitary by calculating its norm. Notes ----- This method uses the 'sparkquantum.math.roundPrecision' configuration to round the calculated norm. Returns ------- bool True if the norm of this matrix is 1.0, False otherwise. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") round_precision = int( conf.get( self._sc, 'sparkquantum.math.roundPrecision')) return round(self.norm(), round_precision) == 1.0
def is_unitary(self): """Check if this matrix is unitary by calculating its norm. Notes ----- This method uses the 'sparkquantum.math.roundPrecision' configuration to round the calculated norm. Returns ------- bool True if the norm of this matrix is 1.0, False otherwise. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise NotImplementedError("invalid coordinate format") round_precision = int( conf.get( self._sc, 'sparkquantum.math.roundPrecision')) return round(self.norm(), round_precision) == 1.0
Python
def diagonal(size, value): """Create a diagonal matrix with its elements being the desired value. Parameters ---------- size : int The size of the diagonal. value: int, float or complex The value of each element of the diagonal matrix. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` The resulting matrix. Raises ------ TypeError If `size` is not an int or `value` is not a scalar (number). """ if not isinstance(size, int): raise TypeError("int expected, not {}".format(type(size))) if not mathutil.is_scalar(value): raise TypeError( "int, float or complex expected, not {}".format(type(value))) sc = SparkContext.getOrCreate() shape = (size, size) dtype = type(value) nelem = shape[0] if value == dtype(): rdd = sc.emptyRDD() else: num_partitions = util.get_num_partitions( sc, util.get_size_of_type(dtype) * nelem ) rdd = sc.range(size, numSlices=num_partitions).map( lambda m: (m, m, value) ) return Matrix(rdd, shape, dtype=dtype, nelem=nelem)
def diagonal(size, value): """Create a diagonal matrix with its elements being the desired value. Parameters ---------- size : int The size of the diagonal. value: int, float or complex The value of each element of the diagonal matrix. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` The resulting matrix. Raises ------ TypeError If `size` is not an int or `value` is not a scalar (number). """ if not isinstance(size, int): raise TypeError("int expected, not {}".format(type(size))) if not mathutil.is_scalar(value): raise TypeError( "int, float or complex expected, not {}".format(type(value))) sc = SparkContext.getOrCreate() shape = (size, size) dtype = type(value) nelem = shape[0] if value == dtype(): rdd = sc.emptyRDD() else: num_partitions = util.get_num_partitions( sc, util.get_size_of_type(dtype) * nelem ) rdd = sc.range(size, numSlices=num_partitions).map( lambda m: (m, m, value) ) return Matrix(rdd, shape, dtype=dtype, nelem=nelem)
Python
def zeros(shape, dtype=float): """Create a matrix full of zeros. Notes ----- As all matrix-like objects are treated as sparse, an empty RDD is used. Parameters ---------- shape : tuple The shape of the matrix. dtype : type, optional The Python type of all values in this object. Default value is float. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` The resulting matrix. Raises ------ TypeError If `shape` is not a valid shape. """ if not mathutil.is_shape(shape, ndim=2): raise ValueError("invalid shape") sc = SparkContext.getOrCreate() nelem = 0 rdd = sc.emptyRDD() return Matrix(rdd, shape, dtype=dtype, nelem=nelem)
def zeros(shape, dtype=float): """Create a matrix full of zeros. Notes ----- As all matrix-like objects are treated as sparse, an empty RDD is used. Parameters ---------- shape : tuple The shape of the matrix. dtype : type, optional The Python type of all values in this object. Default value is float. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` The resulting matrix. Raises ------ TypeError If `shape` is not a valid shape. """ if not mathutil.is_shape(shape, ndim=2): raise ValueError("invalid shape") sc = SparkContext.getOrCreate() nelem = 0 rdd = sc.emptyRDD() return Matrix(rdd, shape, dtype=dtype, nelem=nelem)
Python
def ones(shape, dtype=float): """Create a matrix full of ones. Parameters ---------- shape : tuple The shape of the matrix. dtype : type, optional The Python type of all values in this object. Default value is float. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` The resulting matrix. Raises ------ TypeError If `shape` is not a valid shape. """ if not mathutil.is_shape(shape, ndim=2): raise ValueError("invalid shape") sc = SparkContext.getOrCreate() value = dtype() + 1 nelem = shape[0] * shape[1] num_partitions = util.get_num_partitions( sc, util.get_size_of_type(dtype) * nelem ) rdd = sc.range( shape[0], numSlices=num_partitions ).cartesian( sc.range(shape[1], numSlices=num_partitions) ).map( lambda m: (m[0], m[1], value) ) return Matrix(rdd, shape, dtype=dtype, nelem=nelem)
def ones(shape, dtype=float): """Create a matrix full of ones. Parameters ---------- shape : tuple The shape of the matrix. dtype : type, optional The Python type of all values in this object. Default value is float. Returns ------- :py:class:`sparkquantum.math.matrix.Matrix` The resulting matrix. Raises ------ TypeError If `shape` is not a valid shape. """ if not mathutil.is_shape(shape, ndim=2): raise ValueError("invalid shape") sc = SparkContext.getOrCreate() value = dtype() + 1 nelem = shape[0] * shape[1] num_partitions = util.get_num_partitions( sc, util.get_size_of_type(dtype) * nelem ) rdd = sc.range( shape[0], numSlices=num_partitions ).cartesian( sc.range(shape[1], numSlices=num_partitions) ).map( lambda m: (m[0], m[1], value) ) return Matrix(rdd, shape, dtype=dtype, nelem=nelem)
Python
def center(self, dim=None, coord=False): """Get the center site number or coordinate of a dimension or of the entire grid. Parameters ---------- dim : int, optional The chosen dimension to get the center site. Default value is None. coord : bool, optional Indicate to return the center site in coordinates. Default value is False. Returns ------- int or tuple of int The center site of a dimension of this grid or of the entire grid, whether in coordinates or not. """ if coord: return 0 if dim is not None else (0, ) return super().center(dim=dim, coord=coord)
def center(self, dim=None, coord=False): """Get the center site number or coordinate of a dimension or of the entire grid. Parameters ---------- dim : int, optional The chosen dimension to get the center site. Default value is None. coord : bool, optional Indicate to return the center site in coordinates. Default value is False. Returns ------- int or tuple of int The center site of a dimension of this grid or of the entire grid, whether in coordinates or not. """ if coord: return 0 if dim is not None else (0, ) return super().center(dim=dim, coord=coord)
Python
def to_site(self, coord): """Get the correspondent site number from coordinates. Parameters ---------- coord : tuple of int The coordinates. Returns ------- int The correspondent site number. Raises ------ ValueError If the coordinates are invalid or are out of the grid boundaries. """ if len(coord) != self._ndim: self._logger.error("invalid coordinates") raise ValueError("invalid coordinates") if not self.has_coordinate(coord): self._logger.error("coordinates out of grid boundaries") raise ValueError("coordinates out of grid boundaries") return coord[0] + super().center()
def to_site(self, coord): """Get the correspondent site number from coordinates. Parameters ---------- coord : tuple of int The coordinates. Returns ------- int The correspondent site number. Raises ------ ValueError If the coordinates are invalid or are out of the grid boundaries. """ if len(coord) != self._ndim: self._logger.error("invalid coordinates") raise ValueError("invalid coordinates") if not self.has_coordinate(coord): self._logger.error("coordinates out of grid boundaries") raise ValueError("coordinates out of grid boundaries") return coord[0] + super().center()
Python
def axis(self): """Get the ranges corresponding to coordinates of this grid. Returns ------- tuple of range The ranges corresponding to coordinates of this grid. """ return (range(-super().center(), super().center() + 1), )
def axis(self): """Get the ranges corresponding to coordinates of this grid. Returns ------- tuple of range The ranges corresponding to coordinates of this grid. """ return (range(-super().center(), super().center() + 1), )
Python
def center(self, dim=None, coord=False): """Get the center site number or coordinate of a dimension or of the entire grid. Parameters ---------- dim : int, optional The chosen dimension to get the center site. Default value is None. coord : bool, optional Indicate to return the center site in coordinates. Default value is False. Returns ------- int or tuple of int The center site of a dimension of this grid or of the entire grid, whether in coordinates or not. """ if dim is not None: if dim < 0 or dim >= self._ndim: self._logger.error("invalid dimension") raise ValueError("invalid dimension") return int((self._shape[dim] - 1) / 2) if coord: return tuple([self.center(d) for d in range(self._ndim)]) else: def __center(ndim): if ndim == 1: return self.center(dim=ndim - 1) else: accsites = 1 for d in range(ndim - 1): accsites *= self._shape[d] return accsites * \ self.center(dim=ndim - 1) + __center(ndim - 1) return __center(self._ndim)
def center(self, dim=None, coord=False): """Get the center site number or coordinate of a dimension or of the entire grid. Parameters ---------- dim : int, optional The chosen dimension to get the center site. Default value is None. coord : bool, optional Indicate to return the center site in coordinates. Default value is False. Returns ------- int or tuple of int The center site of a dimension of this grid or of the entire grid, whether in coordinates or not. """ if dim is not None: if dim < 0 or dim >= self._ndim: self._logger.error("invalid dimension") raise ValueError("invalid dimension") return int((self._shape[dim] - 1) / 2) if coord: return tuple([self.center(d) for d in range(self._ndim)]) else: def __center(ndim): if ndim == 1: return self.center(dim=ndim - 1) else: accsites = 1 for d in range(ndim - 1): accsites *= self._shape[d] return accsites * \ self.center(dim=ndim - 1) + __center(ndim - 1) return __center(self._ndim)
Python
def has_coordinate(self, coord, dim=None): """Indicate whether a coordinate of a specific dimension or all coordinates are inside this grid. Parameters ---------- coord : int or tuple of int A coordinate or all coordinates to be checked. dim : int, optional The chosen dimension. Default value is None. Returns ------- bool True if this grid comprises the coordinates, False otherwise. """ r = self.axis() if dim is not None: return coord >= r[dim].start and coord < r[dim].stop for d in range(self._ndim): if coord[d] < r[d].start or coord[d] >= r[d].stop: return False return True
def has_coordinate(self, coord, dim=None): """Indicate whether a coordinate of a specific dimension or all coordinates are inside this grid. Parameters ---------- coord : int or tuple of int A coordinate or all coordinates to be checked. dim : int, optional The chosen dimension. Default value is None. Returns ------- bool True if this grid comprises the coordinates, False otherwise. """ r = self.axis() if dim is not None: return coord >= r[dim].start and coord < r[dim].stop for d in range(self._ndim): if coord[d] < r[d].start or coord[d] >= r[d].stop: return False return True
Python
def to_site(self, coord): """Get the correspondent site number from coordinates. Parameters ---------- coord : tuple of int The coordinates. Returns ------- int The correspondent site number. Raises ------ ValueError If the coordinates are invalid or are out of the grid boundaries. """ if len(coord) != self._ndim: self._logger.error("invalid coordinates") raise ValueError("invalid coordinates") for d in range(self._ndim): if coord[d] < 0 or coord[d] >= self._shape[d]: self._logger.error("coordinates out of grid boundaries") raise ValueError("coordinates out of grid boundaries") def __to_site(ndim): if ndim == 1: return coord[ndim - 1] else: accsites = 1 for d in range(ndim - 1): accsites *= self._shape[d] return coord[ndim - 1] * accsites + __to_site(ndim - 1) return __to_site(self._ndim)
def to_site(self, coord): """Get the correspondent site number from coordinates. Parameters ---------- coord : tuple of int The coordinates. Returns ------- int The correspondent site number. Raises ------ ValueError If the coordinates are invalid or are out of the grid boundaries. """ if len(coord) != self._ndim: self._logger.error("invalid coordinates") raise ValueError("invalid coordinates") for d in range(self._ndim): if coord[d] < 0 or coord[d] >= self._shape[d]: self._logger.error("coordinates out of grid boundaries") raise ValueError("coordinates out of grid boundaries") def __to_site(ndim): if ndim == 1: return coord[ndim - 1] else: accsites = 1 for d in range(ndim - 1): accsites *= self._shape[d] return coord[ndim - 1] * accsites + __to_site(ndim - 1) return __to_site(self._ndim)
Python
def axis(self): """Get the ranges corresponding to coordinates of this grid. Returns ------- tuple of range The ranges corresponding to coordinates of this grid. """ return tuple([range(self._shape[d]) for d in range(self._ndim)])
def axis(self): """Get the ranges corresponding to coordinates of this grid. Returns ------- tuple of range The ranges corresponding to coordinates of this grid. """ return tuple([range(self._shape[d]) for d in range(self._ndim)])
Python
def repartition(self, num_partitions, shuffle=False): """Change the number of partitions of this object's RDD. Parameters ---------- num_partitions : int The target number of partitions of the RDD. shuffle: bool Indicate that Spark must force a shuffle operation. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ if num_partitions > self._data.getNumPartitions(): self._data = self._data.repartition(num_partitions) elif num_partitions < self._data.getNumPartitions(): self._data = self._data.coalesce(num_partitions, shuffle) return self
def repartition(self, num_partitions, shuffle=False): """Change the number of partitions of this object's RDD. Parameters ---------- num_partitions : int The target number of partitions of the RDD. shuffle: bool Indicate that Spark must force a shuffle operation. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ if num_partitions > self._data.getNumPartitions(): self._data = self._data.repartition(num_partitions) elif num_partitions < self._data.getNumPartitions(): self._data = self._data.coalesce(num_partitions, shuffle) return self
Python
def partition_by(self, num_partitions=None, partition_func=None): """Set a partitioner with the chosen number of partitions for this object's RDD. Notes ----- When `partition_func` is None, the default partition function is used (i.e., portable_hash). Parameters ---------- num_partitions : int, optional The chosen number of partitions for the RDD. Default value is the original number of partitions of the RDD. partition_func: function, optional The chosen partition function. Default value is None. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ if num_partitions is None: np = self._data.getNumPartitions() else: np = num_partitions if partition_func is None: self._data = self._data.partitionBy(np) else: self._data = self._data.partitionBy( np, partitionFunc=partition_func ) return self
def partition_by(self, num_partitions=None, partition_func=None): """Set a partitioner with the chosen number of partitions for this object's RDD. Notes ----- When `partition_func` is None, the default partition function is used (i.e., portable_hash). Parameters ---------- num_partitions : int, optional The chosen number of partitions for the RDD. Default value is the original number of partitions of the RDD. partition_func: function, optional The chosen partition function. Default value is None. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ if num_partitions is None: np = self._data.getNumPartitions() else: np = num_partitions if partition_func is None: self._data = self._data.partitionBy(np) else: self._data = self._data.partitionBy( np, partitionFunc=partition_func ) return self
Python
def unpersist(self): """Unpersist this object's RDD. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ if self._data is not None: if self._data.is_cached: self._data.unpersist() self._logger.info( "RDD {} was unpersisted".format( self._data.id())) else: self._logger.info( "RDD {} has already been unpersisted".format( self._data.id())) else: self._logger.warning( "there is no data to be unpersisted") return self
def unpersist(self): """Unpersist this object's RDD. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ if self._data is not None: if self._data.is_cached: self._data.unpersist() self._logger.info( "RDD {} was unpersisted".format( self._data.id())) else: self._logger.info( "RDD {} has already been unpersisted".format( self._data.id())) else: self._logger.warning( "there is no data to be unpersisted") return self
Python
def materialize(self, storage_level=StorageLevel.MEMORY_AND_DISK): """Materialize this object's RDD considering the chosen storage level. This method calls persist and right after counts how many elements there are in the RDD to force its persistence. Parameters ---------- storage_level : :py:class:`pyspark.StorageLevel`, optional The desired storage level when materializing the RDD. Default value is :py:const:`pyspark.StorageLevel.MEMORY_AND_DISK`. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ self.persist(storage_level=storage_level) self._nelem = self._data.count() self._logger.info("RDD {} was materialized".format(self._data.id())) return self
def materialize(self, storage_level=StorageLevel.MEMORY_AND_DISK): """Materialize this object's RDD considering the chosen storage level. This method calls persist and right after counts how many elements there are in the RDD to force its persistence. Parameters ---------- storage_level : :py:class:`pyspark.StorageLevel`, optional The desired storage level when materializing the RDD. Default value is :py:const:`pyspark.StorageLevel.MEMORY_AND_DISK`. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ self.persist(storage_level=storage_level) self._nelem = self._data.count() self._logger.info("RDD {} was materialized".format(self._data.id())) return self
Python
def checkpoint(self): """Checkpoint this object's RDD. Notes ----- If it is intended to use this method in an application, it is necessary to define the checkpoint dir using the :py:class:`pyspark.SparkContext` object. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ if self._data.isCheckpointed(): self._logger.info("RDD already checkpointed") return self if not self._data.is_cached: self._logger.warning( "it is recommended to cache the RDD before checkpointing it") self._data.checkpoint() self._logger.info( "RDD {} was checkpointed in {}".format( self._data.id(), self._data.getCheckpointFile())) return self
def checkpoint(self): """Checkpoint this object's RDD. Notes ----- If it is intended to use this method in an application, it is necessary to define the checkpoint dir using the :py:class:`pyspark.SparkContext` object. Returns ------- :py:class:`sparkquantum.base.Base` A reference to this object. """ if self._data.isCheckpointed(): self._logger.info("RDD already checkpointed") return self if not self._data.is_cached: self._logger.warning( "it is recommended to cache the RDD before checkpointing it") self._data.checkpoint() self._logger.info( "RDD {} was checkpointed in {}".format( self._data.id(), self._data.getCheckpointFile())) return self
Python
def _create_evolution_operators(self): """Build the evolution operators for the walk. This method builds a list with n operators, where n is the number of particles of the system. In a multiparticle quantum walk, each operator is built by applying a tensor product between the evolution operator and ``n-1`` identity matrices as follows: ``W1 = U1 (X) I2 (X) ... (X) In Wi = I1 (X) ... (X) Ii-1 (X) Ui (X) Ii+1 (X) ... In Wn = I1 (X) ... (X) In-1 (X) Un`` Raises ------ ValueError If the chosen 'sparkquantum.dtqw.evolutionOperator.kroneckerMode' configuration is not valid. """ self._logger.info("building evolution operators...") self._create_coin_operators() if self._shift_operator is None: self._logger.info( "no shift operator has been set. A new one will be built") self._create_shift_operator() self._destroy_evolution_operators() particles = len(self._particles) for i, particle in enumerate(self._particles): name = particle.name if particle.name is not None else 'unidentified' self._logger.info( "building evolution operator for particle {} ({})...".format(i + 1, name)) time = datetime.now() eo = self._shift_operator.multiply(self._coin_operators[i]) dtype = self._coin_operators[i].dtype nelem = eo.nelem * eo.shape[0] ** (particles - 1) num_partitions = max(util.get_num_partitions( self._sc, util.get_size_of_type(dtype) * nelem ), eo.data.getNumPartitions()) shape = eo.shape if particles > 1: shape_tmp = shape if i == 0: # The first particle's evolution operator consists in applying the tensor product between the # evolution operator and the other particles' corresponding identity matrices # # W1 = U1 (X) I2 (X) ... (X) In rdd_shape = ( shape_tmp[0] ** (particles - 1 - i), shape_tmp[1] ** (particles - 1 - i) ) def __map(m): for i in range(rdd_shape[0]): yield m[0] * rdd_shape[0] + i, m[1] * rdd_shape[1] + i, m[2] rdd = eo.data.flatMap( __map ) else: # For the other particles, each one has its operator built by applying the # tensor product between its previous particles' identity matrices and its evolution operator. # # Wi = I1 (X) ... (X) Ii-1 (X) Wi ... rdd_shape = ( shape_tmp[0] ** i, shape_tmp[1] ** i ) def __map(m): for i in range(rdd_shape[0]): yield i * shape_tmp[0] + m[0], i * shape_tmp[1] + m[1], m[2] rdd = eo.data.flatMap( __map ) # Then, the tensor product is applied between the following particles' identity matrices. # # ... (X) Ii+1 (X) ... In # # If it is the last particle, the tensor product is applied between # the pre-identity and evolution operators # # ... (X) Ii-1 (X) Wn if i < particles - 1: rdd_shape = ( shape_tmp[0] ** (particles - 1 - i), shape_tmp[1] ** (particles - 1 - i) ) def __map(m): for i in range(rdd_shape[0]): yield m[0] * rdd_shape[0] + i, m[1] * rdd_shape[1] + i, m[2] rdd = rdd.flatMap( __map ) shape = (rdd_shape[0] * shape_tmp[0], rdd_shape[1] * shape_tmp[1]) eo = Operator(rdd, shape, dtype=dtype, nelem=nelem) eo = eo.to_coordinate( constants.MatrixCoordinateMultiplier ).partition_by( num_partitions=num_partitions ).persist(self._storage_level) if self._checkpoint_operators: eo = eo.checkpoint() eo = eo.materialize(self._storage_level) self._evolution_operators.append(eo) time = (datetime.now() - time).total_seconds() self._logger.info( "evolution operator for particle {} ({}) was built in {}s".format(i + 1, name, time)) self._profile_operator( 'evolutionOperatorParticle{}'.format(i + 1), eo, time) self._shift_operator.unpersist()
def _create_evolution_operators(self): """Build the evolution operators for the walk. This method builds a list with n operators, where n is the number of particles of the system. In a multiparticle quantum walk, each operator is built by applying a tensor product between the evolution operator and ``n-1`` identity matrices as follows: ``W1 = U1 (X) I2 (X) ... (X) In Wi = I1 (X) ... (X) Ii-1 (X) Ui (X) Ii+1 (X) ... In Wn = I1 (X) ... (X) In-1 (X) Un`` Raises ------ ValueError If the chosen 'sparkquantum.dtqw.evolutionOperator.kroneckerMode' configuration is not valid. """ self._logger.info("building evolution operators...") self._create_coin_operators() if self._shift_operator is None: self._logger.info( "no shift operator has been set. A new one will be built") self._create_shift_operator() self._destroy_evolution_operators() particles = len(self._particles) for i, particle in enumerate(self._particles): name = particle.name if particle.name is not None else 'unidentified' self._logger.info( "building evolution operator for particle {} ({})...".format(i + 1, name)) time = datetime.now() eo = self._shift_operator.multiply(self._coin_operators[i]) dtype = self._coin_operators[i].dtype nelem = eo.nelem * eo.shape[0] ** (particles - 1) num_partitions = max(util.get_num_partitions( self._sc, util.get_size_of_type(dtype) * nelem ), eo.data.getNumPartitions()) shape = eo.shape if particles > 1: shape_tmp = shape if i == 0: # The first particle's evolution operator consists in applying the tensor product between the # evolution operator and the other particles' corresponding identity matrices # # W1 = U1 (X) I2 (X) ... (X) In rdd_shape = ( shape_tmp[0] ** (particles - 1 - i), shape_tmp[1] ** (particles - 1 - i) ) def __map(m): for i in range(rdd_shape[0]): yield m[0] * rdd_shape[0] + i, m[1] * rdd_shape[1] + i, m[2] rdd = eo.data.flatMap( __map ) else: # For the other particles, each one has its operator built by applying the # tensor product between its previous particles' identity matrices and its evolution operator. # # Wi = I1 (X) ... (X) Ii-1 (X) Wi ... rdd_shape = ( shape_tmp[0] ** i, shape_tmp[1] ** i ) def __map(m): for i in range(rdd_shape[0]): yield i * shape_tmp[0] + m[0], i * shape_tmp[1] + m[1], m[2] rdd = eo.data.flatMap( __map ) # Then, the tensor product is applied between the following particles' identity matrices. # # ... (X) Ii+1 (X) ... In # # If it is the last particle, the tensor product is applied between # the pre-identity and evolution operators # # ... (X) Ii-1 (X) Wn if i < particles - 1: rdd_shape = ( shape_tmp[0] ** (particles - 1 - i), shape_tmp[1] ** (particles - 1 - i) ) def __map(m): for i in range(rdd_shape[0]): yield m[0] * rdd_shape[0] + i, m[1] * rdd_shape[1] + i, m[2] rdd = rdd.flatMap( __map ) shape = (rdd_shape[0] * shape_tmp[0], rdd_shape[1] * shape_tmp[1]) eo = Operator(rdd, shape, dtype=dtype, nelem=nelem) eo = eo.to_coordinate( constants.MatrixCoordinateMultiplier ).partition_by( num_partitions=num_partitions ).persist(self._storage_level) if self._checkpoint_operators: eo = eo.checkpoint() eo = eo.materialize(self._storage_level) self._evolution_operators.append(eo) time = (datetime.now() - time).total_seconds() self._logger.info( "evolution operator for particle {} ({}) was built in {}s".format(i + 1, name, time)) self._profile_operator( 'evolutionOperatorParticle{}'.format(i + 1), eo, time) self._shift_operator.unpersist()
Python
def _destroy_operators(self): """Release all operators from memory and/or disk.""" self._logger.info("destroying operators...") self._destroy_coin_operators() self._destroy_shift_operator() self._destroy_interaction_operator() self._destroy_evolution_operators() self._logger.info("operators have been destroyed")
def _destroy_operators(self): """Release all operators from memory and/or disk.""" self._logger.info("destroying operators...") self._destroy_coin_operators() self._destroy_shift_operator() self._destroy_interaction_operator() self._destroy_evolution_operators() self._logger.info("operators have been destroyed")
Python
def destroy(self): """Clear the current state and all operators of this quantum walk. Returns ------- :py:class:`sparkquantum.dtqw.dtqw.DiscreteTimeQuantumWalk` A reference to this object. """ if self._inistate is not None: self._inistate.unpersist() self._destroy_state() self._destroy_operators()
def destroy(self): """Clear the current state and all operators of this quantum walk. Returns ------- :py:class:`sparkquantum.dtqw.dtqw.DiscreteTimeQuantumWalk` A reference to this object. """ if self._inistate is not None: self._inistate.unpersist() self._destroy_state() self._destroy_operators()
Python
def reset(self): """Reset this quantum walk. Returns ------- :py:class:`sparkquantum.dtqw.dtqw.DiscreteTimeQuantumWalk` A reference to this object. """ if self._inistate is not None: self._inistate.unpersist() self._destroy_state() self._curstep = 0 return self
def reset(self): """Reset this quantum walk. Returns ------- :py:class:`sparkquantum.dtqw.dtqw.DiscreteTimeQuantumWalk` A reference to this object. """ if self._inistate is not None: self._inistate.unpersist() self._destroy_state() self._curstep = 0 return self
Python
def step(self, checkpoint_state=False): """Perform a step of this quantum walk. Parameters ---------- checkpoint_state : bool, optional Indicate whether the state must be checkpointed. Default value is False. Returns ------- :py:class:`sparkquantum.dtqw.state.State` The state of the system after performing the step. Raises ------ NotImplementedError If this quantum walk has not been setup. """ if self._curstate is None: self._logger.error("this quantum walk has not been setup") raise NotImplementedError("this quantum walk has not been setup") step = self._curstep + 1 result = self._curstate # When there is a non-permanent percolations generator (e.g., random), # the evolution operators will be built in each step of the walk if (self._mesh.percolation is not None and not is_permanent(self._mesh.percolation)): self._destroy_shift_operator() self._create_evolution_operators() time = datetime.now() if self._interaction_operator is not None: result = self._interaction_operator.multiply( result.to_coordinate( constants.MatrixCoordinateMultiplicand ).partition_by( self._interaction_operator.data.getNumPartitions() ) ) for eo in self._evolution_operators: result = eo.multiply( result.to_coordinate( constants.MatrixCoordinateMultiplicand ).partition_by( eo.data.getNumPartitions() ) ) result = result.persist(self._storage_level) if checkpoint_state: result = result.checkpoint() result = result.materialize(self._storage_level) self._curstate.unpersist() time = (datetime.now() - time).total_seconds() self._logger.info( "system state after step {} was done in {}s".format(step, time)) self._profile_state('systemState{}'.format(step), result, time) self._profiler.log_rdd(app_id=self._sc.applicationId) self._curstep += 1 return result
def step(self, checkpoint_state=False): """Perform a step of this quantum walk. Parameters ---------- checkpoint_state : bool, optional Indicate whether the state must be checkpointed. Default value is False. Returns ------- :py:class:`sparkquantum.dtqw.state.State` The state of the system after performing the step. Raises ------ NotImplementedError If this quantum walk has not been setup. """ if self._curstate is None: self._logger.error("this quantum walk has not been setup") raise NotImplementedError("this quantum walk has not been setup") step = self._curstep + 1 result = self._curstate # When there is a non-permanent percolations generator (e.g., random), # the evolution operators will be built in each step of the walk if (self._mesh.percolation is not None and not is_permanent(self._mesh.percolation)): self._destroy_shift_operator() self._create_evolution_operators() time = datetime.now() if self._interaction_operator is not None: result = self._interaction_operator.multiply( result.to_coordinate( constants.MatrixCoordinateMultiplicand ).partition_by( self._interaction_operator.data.getNumPartitions() ) ) for eo in self._evolution_operators: result = eo.multiply( result.to_coordinate( constants.MatrixCoordinateMultiplicand ).partition_by( eo.data.getNumPartitions() ) ) result = result.persist(self._storage_level) if checkpoint_state: result = result.checkpoint() result = result.materialize(self._storage_level) self._curstate.unpersist() time = (datetime.now() - time).total_seconds() self._logger.info( "system state after step {} was done in {}s".format(step, time)) self._profile_state('systemState{}'.format(step), result, time) self._profiler.log_rdd(app_id=self._sc.applicationId) self._curstep += 1 return result
Python
def walk(self, steps, checkpoint_frequency=None): """Perform the quantum walk. Parameters ---------- steps : int The number of steps of the quantum walk. checkpoint_frequency : int, optional The rate which states will be checkpointed. Must be a positive value. When it is set to None, zero or negative values, the checkpointing does not occur. Default value is None. Returns ------- :py:class:`sparkquantum.dtqw.state.State` The final state of the system after performing the walk. Raises ------ ValueError If the final state of the system is not unitary. """ self.reset() self.setup() self._logger.info( "starting a {} for {} steps...".format(self, steps)) time = datetime.now() step = self._curstep while step < steps: if (checkpoint_frequency is not None and checkpoint_frequency > 0 and (step + 1) % checkpoint_frequency == 0): checkpoint = True else: checkpoint = False self._curstate = self.step(checkpoint_state=checkpoint) step = self._curstep time = (datetime.now() - time).total_seconds() self._logger.info("walk was done in {}s".format(time)) self._logger.info("checking if the final state is unitary...") if not self._curstate.is_unitary(): self._logger.error("the final state is not unitary") raise ValueError("the final state is not unitary") return self._curstate
def walk(self, steps, checkpoint_frequency=None): """Perform the quantum walk. Parameters ---------- steps : int The number of steps of the quantum walk. checkpoint_frequency : int, optional The rate which states will be checkpointed. Must be a positive value. When it is set to None, zero or negative values, the checkpointing does not occur. Default value is None. Returns ------- :py:class:`sparkquantum.dtqw.state.State` The final state of the system after performing the walk. Raises ------ ValueError If the final state of the system is not unitary. """ self.reset() self.setup() self._logger.info( "starting a {} for {} steps...".format(self, steps)) time = datetime.now() step = self._curstep while step < steps: if (checkpoint_frequency is not None and checkpoint_frequency > 0 and (step + 1) % checkpoint_frequency == 0): checkpoint = True else: checkpoint = False self._curstate = self.step(checkpoint_state=checkpoint) step = self._curstep time = (datetime.now() - time).total_seconds() self._logger.info("walk was done in {}s".format(time)) self._logger.info("checking if the final state is unitary...") if not self._curstate.is_unitary(): self._logger.error("the final state is not unitary") raise ValueError("the final state is not unitary") return self._curstate
Python
def generate(self, edges, perc_mode=constants.PercolationsGenerationModeBroadcast): """Generate mesh percolations based on its probability to have a percolation. Parameters ---------- edges : int Number of edges of the mesh. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Returns ------- :py:class:`pyspark.RDD` or :py:class:`pyspark.Broadcast` The :py:class:`pyspark.RDD` or :py:class:`pyspark.Broadcast` dict which keys are the numbered edges that are broken, depending on the chosen 'sparkquantum.dtqw.mesh.percolation.generationMode' configuration. Raises ------ ValueError If some edge of this object is out of the bounds of the number of edges of the mesh or if the chosen 'sparkquantum.dtqw.mesh.percolation.generationMode' configuration is not valid. """ if max(self._edges) >= edges: self._logger.error( "this mesh supports edges from {} to {}".format(0, edges - 1)) raise ValueError( "this mesh supports edges from {} to {}".format(0, edges - 1)) if isinstance(self._edges, range): rdd = self._sc.range( self._edges ) else: rdd = self._sc.parallelize( self._edges ) rdd = rdd.map( lambda m: (m, True) ) if perc_mode == constants.PercolationsGenerationModeRDD: return rdd elif perc_mode == constants.PercolationsGenerationModeBroadcast: return util.broadcast(self._sc, rdd.collectAsMap()) else: self._logger.error("invalid percolations generation mode") raise ValueError("invalid percolations generation mode")
def generate(self, edges, perc_mode=constants.PercolationsGenerationModeBroadcast): """Generate mesh percolations based on its probability to have a percolation. Parameters ---------- edges : int Number of edges of the mesh. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Returns ------- :py:class:`pyspark.RDD` or :py:class:`pyspark.Broadcast` The :py:class:`pyspark.RDD` or :py:class:`pyspark.Broadcast` dict which keys are the numbered edges that are broken, depending on the chosen 'sparkquantum.dtqw.mesh.percolation.generationMode' configuration. Raises ------ ValueError If some edge of this object is out of the bounds of the number of edges of the mesh or if the chosen 'sparkquantum.dtqw.mesh.percolation.generationMode' configuration is not valid. """ if max(self._edges) >= edges: self._logger.error( "this mesh supports edges from {} to {}".format(0, edges - 1)) raise ValueError( "this mesh supports edges from {} to {}".format(0, edges - 1)) if isinstance(self._edges, range): rdd = self._sc.range( self._edges ) else: rdd = self._sc.parallelize( self._edges ) rdd = rdd.map( lambda m: (m, True) ) if perc_mode == constants.PercolationsGenerationModeRDD: return rdd elif perc_mode == constants.PercolationsGenerationModeBroadcast: return util.broadcast(self._sc, rdd.collectAsMap()) else: self._logger.error("invalid percolations generation mode") raise ValueError("invalid percolations generation mode")
Python
def create_operator(self, repr_format=constants.StateRepresentationFormatCoinPosition, perc_mode=constants.PercolationsGenerationModeBroadcast): """Build the shift operator for a quantum walk. Parameters ---------- repr_format : int, optional Indicate how the quantum system is represented. Default value is :py:const:`sparkquantum.constants.StateRepresentationFormatCoinPosition`. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` The created operator using this mesh. Raises ------ ValueError If `repr_format` or `perc_mode` is not valid. """ cspace = 2 ** self._ndim pspace = self._sites shape = (cspace * pspace, cspace * pspace) nelem = shape[0] if self._percolation is not None: percolations = self._percolation.generate(self._edges) if perc_mode == constants.PercolationsGenerationModeRDD: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(e): """e = (edge, (edge, broken or not))""" for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e[1][0] - i - l) % pspace if e[1][1]: l = 0 yield (i + l) * pspace + (x + l) % pspace, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(e): """e = (edge, (edge, broken or not))""" for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e[1][0] - i - l) % pspace if e[1][1]: l = 0 yield ((x + l) % pspace) * cspace + i + l, x * cspace + 1 - i, 1 else: percolations.unpersist() self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( self._edges ).map( lambda m: (m, m) ).leftOuterJoin( percolations ).flatMap( __map ) elif perc_mode == constants.PercolationsGenerationModeBroadcast: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(e): for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e - i - l) % pspace if e in percolations.value: l = 0 yield (i + l) * pspace + (x + l) % pspace, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(e): for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e - i - l) % pspace if e in percolations.value: l = 0 yield ((x + l) % pspace) * cspace + i + l, x * cspace + 1 - i, 1 else: percolations.unpersist() self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( self._edges ).flatMap( __map ) else: percolations.unpersist() self._logger.error("invalid percolations generation mode") raise ValueError("invalid percolations generation mode") else: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(x): for i in range(cspace): l = (-1) ** i yield i * pspace + (x + l) % pspace, i * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(x): for i in range(cspace): l = (-1) ** i yield ((x + l) % pspace) * cspace + i, x * cspace + i, 1 else: percolations.unpersist() self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( pspace ).flatMap( __map ) return Operator(rdd, shape, dtype=int, nelem=nelem)
def create_operator(self, repr_format=constants.StateRepresentationFormatCoinPosition, perc_mode=constants.PercolationsGenerationModeBroadcast): """Build the shift operator for a quantum walk. Parameters ---------- repr_format : int, optional Indicate how the quantum system is represented. Default value is :py:const:`sparkquantum.constants.StateRepresentationFormatCoinPosition`. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` The created operator using this mesh. Raises ------ ValueError If `repr_format` or `perc_mode` is not valid. """ cspace = 2 ** self._ndim pspace = self._sites shape = (cspace * pspace, cspace * pspace) nelem = shape[0] if self._percolation is not None: percolations = self._percolation.generate(self._edges) if perc_mode == constants.PercolationsGenerationModeRDD: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(e): """e = (edge, (edge, broken or not))""" for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e[1][0] - i - l) % pspace if e[1][1]: l = 0 yield (i + l) * pspace + (x + l) % pspace, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(e): """e = (edge, (edge, broken or not))""" for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e[1][0] - i - l) % pspace if e[1][1]: l = 0 yield ((x + l) % pspace) * cspace + i + l, x * cspace + 1 - i, 1 else: percolations.unpersist() self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( self._edges ).map( lambda m: (m, m) ).leftOuterJoin( percolations ).flatMap( __map ) elif perc_mode == constants.PercolationsGenerationModeBroadcast: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(e): for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e - i - l) % pspace if e in percolations.value: l = 0 yield (i + l) * pspace + (x + l) % pspace, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(e): for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e - i - l) % pspace if e in percolations.value: l = 0 yield ((x + l) % pspace) * cspace + i + l, x * cspace + 1 - i, 1 else: percolations.unpersist() self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( self._edges ).flatMap( __map ) else: percolations.unpersist() self._logger.error("invalid percolations generation mode") raise ValueError("invalid percolations generation mode") else: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(x): for i in range(cspace): l = (-1) ** i yield i * pspace + (x + l) % pspace, i * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(x): for i in range(cspace): l = (-1) ** i yield ((x + l) % pspace) * cspace + i, x * cspace + i, 1 else: percolations.unpersist() self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( pspace ).flatMap( __map ) return Operator(rdd, shape, dtype=int, nelem=nelem)
Python
def to_coordinate(self, coord_format): """Change the coordinate format of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created in the desired coordinate format. Thus, a new instance of this class is returned with this RDD. Parameters ---------- coord_format : int The new coordinate format of this object. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` A new operator object with the RDD in the desired coordinate format. """ return Operator.from_matrix(super().to_coordinate(coord_format))
def to_coordinate(self, coord_format): """Change the coordinate format of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created in the desired coordinate format. Thus, a new instance of this class is returned with this RDD. Parameters ---------- coord_format : int The new coordinate format of this object. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` A new operator object with the RDD in the desired coordinate format. """ return Operator.from_matrix(super().to_coordinate(coord_format))
Python
def multiply(self, other): """Multiply this operator with another one or with a system state. Parameters ---------- other : :py:class:`sparkquantum.dtqw.operator.Operator` or :py:class:`sparkquantum.dtqw.state.State` An operator if multiplying another operator, state otherwise. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` or :py:class:`sparkquantum.dtqw.state.State` :py:class:`sparkquantum.dtqw.operator.Operator` if multiplying another operator, :py:class:`sparkquantum.dtqw.state.State` otherwise. Raises ------ TypeError If `other` is not a :py:class:`sparkquantum.dtqw.operator.Operator` nor :py:class:`sparkquantum.dtqw.state.State`. ValueError If this matrix's and `other`'s shapes are incompatible for multiplication. """ if is_operator(other): return Operator.from_matrix(super().multiply(other)) elif is_state(other): return State.from_matrix(super().multiply(other), other.mesh, other.particles, other.repr_format) else: self._logger.error( "'State' or 'Operator' instance expected, not '{}'".format(type(other))) raise TypeError( "'State' or 'Operator' instance expected, not '{}'".format(type(other)))
def multiply(self, other): """Multiply this operator with another one or with a system state. Parameters ---------- other : :py:class:`sparkquantum.dtqw.operator.Operator` or :py:class:`sparkquantum.dtqw.state.State` An operator if multiplying another operator, state otherwise. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` or :py:class:`sparkquantum.dtqw.state.State` :py:class:`sparkquantum.dtqw.operator.Operator` if multiplying another operator, :py:class:`sparkquantum.dtqw.state.State` otherwise. Raises ------ TypeError If `other` is not a :py:class:`sparkquantum.dtqw.operator.Operator` nor :py:class:`sparkquantum.dtqw.state.State`. ValueError If this matrix's and `other`'s shapes are incompatible for multiplication. """ if is_operator(other): return Operator.from_matrix(super().multiply(other)) elif is_state(other): return State.from_matrix(super().multiply(other), other.mesh, other.particles, other.repr_format) else: self._logger.error( "'State' or 'Operator' instance expected, not '{}'".format(type(other))) raise TypeError( "'State' or 'Operator' instance expected, not '{}'".format(type(other)))
Python
def clear(self): """Remove possible zero entries of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` A new operator object. Raises ------ NotImplementedError If this object's coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault`.. """ return Operator.from_matrix(super().clear())
def clear(self): """Remove possible zero entries of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` A new operator object. Raises ------ NotImplementedError If this object's coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault`.. """ return Operator.from_matrix(super().clear())
Python
def has_site(self, site): """Indicate whether this mesh comprises a site. Parameters ---------- site : int Site number. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
def has_site(self, site): """Indicate whether this mesh comprises a site. Parameters ---------- site : int Site number. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
Python
def create_operator(self, cspace, repr_format=constants.StateRepresentationFormatCoinPosition, perc_mode=constants.PercolationsGenerationModeBroadcast): """Build the shift operator for a quantum walk. Parameters ---------- cspace : int The size of the coin space. repr_format : int, optional Indicate how the quantum system is represented. Default value is :py:const:`sparkquantum.constants.StateRepresentationFormatCoinPosition`. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
def create_operator(self, cspace, repr_format=constants.StateRepresentationFormatCoinPosition, perc_mode=constants.PercolationsGenerationModeBroadcast): """Build the shift operator for a quantum walk. Parameters ---------- cspace : int The size of the coin space. repr_format : int, optional Indicate how the quantum system is represented. Default value is :py:const:`sparkquantum.constants.StateRepresentationFormatCoinPosition`. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
Python
def ndarray(self): """Create a Numpy array containing this object's RDD data. Notes ----- This method calls the :py:func:`pyspark.RDD.collect` method. This is not suitable for large working sets, as all data may not fit into main memory. Returns ------- :py:class:`numpy.ndarray` The Numpy array. """ ndim = len(self._domain) if ndim == 1 and self._shape[1] == 2: # One-dimensional grids with just one random variable shape = (max(self._domain[0]) - min(self._domain[0]) + 1, 1) elif ndim == 1 and self._shape[1] == 3: # One-dimensional grids with two random variables shape = (max(self._domain[0]) - min(self._domain[0]) + 1, max(self._domain[0]) - min(self._domain[0]) + 1) elif ndim == 2 and self._shape[1] == 3: # Two-dimensional grids with one random variable shape = (max(self._domain[0]) - min(self._domain[0]) + 1, max(self._domain[1]) - min(self._domain[1]) + 1) else: self._logger.error( "incompatible domain dimension and number of variables to create a Numpy array") raise NotImplementedError( "incompatible domain dimension and number of variables to create a Numpy array") data = self._data.collect() result = np.zeros(shape, dtype=self._dtype) for e in data: result[e[0:-1]] = e[-1] return result
def ndarray(self): """Create a Numpy array containing this object's RDD data. Notes ----- This method calls the :py:func:`pyspark.RDD.collect` method. This is not suitable for large working sets, as all data may not fit into main memory. Returns ------- :py:class:`numpy.ndarray` The Numpy array. """ ndim = len(self._domain) if ndim == 1 and self._shape[1] == 2: # One-dimensional grids with just one random variable shape = (max(self._domain[0]) - min(self._domain[0]) + 1, 1) elif ndim == 1 and self._shape[1] == 3: # One-dimensional grids with two random variables shape = (max(self._domain[0]) - min(self._domain[0]) + 1, max(self._domain[0]) - min(self._domain[0]) + 1) elif ndim == 2 and self._shape[1] == 3: # Two-dimensional grids with one random variable shape = (max(self._domain[0]) - min(self._domain[0]) + 1, max(self._domain[1]) - min(self._domain[1]) + 1) else: self._logger.error( "incompatible domain dimension and number of variables to create a Numpy array") raise NotImplementedError( "incompatible domain dimension and number of variables to create a Numpy array") data = self._data.collect() result = np.zeros(shape, dtype=self._dtype) for e in data: result[e[0:-1]] = e[-1] return result
Python
def sum(self): """Sum the probabilities of this probability distribution. Returns ------- float The sum of the probabilities. """ dtype = self._dtype() return self.data.filter( lambda m: m[-1] != dtype ).map( lambda m: m[-1] ).reduce( lambda a, b: a + b )
def sum(self): """Sum the probabilities of this probability distribution. Returns ------- float The sum of the probabilities. """ dtype = self._dtype() return self.data.filter( lambda m: m[-1] != dtype ).map( lambda m: m[-1] ).reduce( lambda a, b: a + b )
Python
def norm(self): """Calculate the norm of this probability distribution. Returns ------- float The norm of this probability distribution. """ dtype = self._dtype() n = self.data.filter( lambda m: m[-1] != dtype ).map( lambda m: m[-1] ** 2 ).reduce( lambda a, b: a + b ) return math.sqrt(n)
def norm(self): """Calculate the norm of this probability distribution. Returns ------- float The norm of this probability distribution. """ dtype = self._dtype() n = self.data.filter( lambda m: m[-1] != dtype ).map( lambda m: m[-1] ** 2 ).reduce( lambda a, b: a + b ) return math.sqrt(n)
Python
def max(self): """Find the maximum value of this probability distribution. Returns ------ float The maximum value of this probability distribution. """ return self.data.map( lambda m: m[-1] ).max()
def max(self): """Find the maximum value of this probability distribution. Returns ------ float The maximum value of this probability distribution. """ return self.data.map( lambda m: m[-1] ).max()
Python
def min(self): """Find the minimum value of this probability distribution. Returns ------ float The minimum value of this probability distribution. """ return self.data.map( lambda m: m[-1] ).min()
def min(self): """Find the minimum value of this probability distribution. Returns ------ float The minimum value of this probability distribution. """ return self.data.map( lambda m: m[-1] ).min()
Python
def dump(self, mode, glue=' ', path=None, codec=None, filename=None, format=constants.StateDumpingFormatIndex): """Dump this object's RDD to disk in a unique file or in many part-* files. Notes ----- Depending on the chosen dumping mode, this method calls the :py:func:`pyspark.RDD.collect` method. This is not suitable for large working sets, as all data may not fit into driver's main memory. Parameters ---------- mode : int Storage mode used to dump this state. glue : str, optional The glue string that connects each component of each element in the RDD. Default value is ' '. codec : str, optional Codec name used to compress the dumped data. Default value is None. filename : str, optional The full path with file name used when the dumping mode is in a single file. Default value is None. format : int, optional Printing format used to dump this state. Default value is :py:const:`sparkquantum.constants.StateDumpingFormatIndex`. Raises ------ NotImplementedError If the dimension of the mesh is not valid. ValueError If this state's coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault` or if the chosen dumping mode or dumping format is not valid. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise ValueError("invalid coordinate format") rdd = self.clear().data if format == constants.StateDumpingFormatIndex: rdd = rdd.map( lambda m: glue.join((str(m[0]), str(m[1]), str(m[2]))) ) elif format == constants.StateDumpingFormatCoordinate: repr_format = self._repr_format ndim = self._mesh.ndim csubspace = 2 cspace = csubspace ** ndim psubspace = self._mesh.shape pspace = self._mesh.sites particles = self._particles cpspace = cspace * pspace if ndim == 1: mesh_offset = min(self._mesh.axis()[0]) if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(m): ix = [] for p in range(particles): # Coin ix.append( str(int(m[0] / (cpspace ** (particles - 1 - p) * psubspace[0])) % csubspace)) # Position ix.append( str(int(m[0] / (cpspace ** (particles - 1 - p))) % psubspace[0] + mesh_offset)) ix.append(str(m[2])) return glue.join(ix) elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(m): xi = [] for p in range(particles): # Position xi.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * csubspace)) % psubspace[0] + mesh_offset)) # Coin xi.append( str(int(m[0] / (cpspace ** (particles - 1 - p))) % csubspace)) xi.append(str(m[2])) return glue.join(xi) else: self._logger.error("invalid representation format") raise ValueError("invalid representation format") elif ndim == 2: axis = self._mesh.axis() mesh_offset = (min(axis[0]), min(axis[1])) if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(m): ijxy = [] for p in range(particles): # Coin ijxy.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * csubspace * psubspace[0] * psubspace[1])) % csubspace)) ijxy.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * psubspace[0] * psubspace[1])) % csubspace)) # Position ijxy.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * psubspace[1])) % psubspace[0] + mesh_offset[0])) ijxy.append( str(int(m[0] / (cpspace ** (particles - 1 - p))) % psubspace[1] + mesh_offset[1])) ijxy.append(str(m[2])) return glue.join(ijxy) elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(m): xyij = [] for p in range(particles): # Position xyij.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * cspace * psubspace[1])) % psubspace[0] + mesh_offset[0])) xyij.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * cspace)) % psubspace[1] + mesh_offset[1])) # Coin xyij.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * csubspace)) % csubspace)) xyij.append( str(int(m[0] / (cpspace ** (particles - 1 - p))) % csubspace)) xyij.append(str(m[2])) return glue.join(xyij) else: self._logger.error("invalid representation format") raise ValueError("invalid representation format") else: self._logger.error("mesh dimension not implemented") raise NotImplementedError("mesh dimension not implemented") rdd = rdd.map(__map) else: self._logger.error("invalid dumping format") raise ValueError("invalid dumping format") if mode == constants.DumpingModeUniqueFile: data = rdd.collect() with open(filename, 'a') as f: for d in data: f.write(d + "\n") elif mode == constants.DumpingModePartFiles: rdd.saveAsTextFile(path, codec) else: self._logger.error("invalid dumping mode") raise ValueError("invalid dumping mode")
def dump(self, mode, glue=' ', path=None, codec=None, filename=None, format=constants.StateDumpingFormatIndex): """Dump this object's RDD to disk in a unique file or in many part-* files. Notes ----- Depending on the chosen dumping mode, this method calls the :py:func:`pyspark.RDD.collect` method. This is not suitable for large working sets, as all data may not fit into driver's main memory. Parameters ---------- mode : int Storage mode used to dump this state. glue : str, optional The glue string that connects each component of each element in the RDD. Default value is ' '. codec : str, optional Codec name used to compress the dumped data. Default value is None. filename : str, optional The full path with file name used when the dumping mode is in a single file. Default value is None. format : int, optional Printing format used to dump this state. Default value is :py:const:`sparkquantum.constants.StateDumpingFormatIndex`. Raises ------ NotImplementedError If the dimension of the mesh is not valid. ValueError If this state's coordinate format is not :py:const:`sparkquantum.constants.MatrixCoordinateDefault` or if the chosen dumping mode or dumping format is not valid. """ if self._coord_format != constants.MatrixCoordinateDefault: self._logger.error("invalid coordinate format") raise ValueError("invalid coordinate format") rdd = self.clear().data if format == constants.StateDumpingFormatIndex: rdd = rdd.map( lambda m: glue.join((str(m[0]), str(m[1]), str(m[2]))) ) elif format == constants.StateDumpingFormatCoordinate: repr_format = self._repr_format ndim = self._mesh.ndim csubspace = 2 cspace = csubspace ** ndim psubspace = self._mesh.shape pspace = self._mesh.sites particles = self._particles cpspace = cspace * pspace if ndim == 1: mesh_offset = min(self._mesh.axis()[0]) if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(m): ix = [] for p in range(particles): # Coin ix.append( str(int(m[0] / (cpspace ** (particles - 1 - p) * psubspace[0])) % csubspace)) # Position ix.append( str(int(m[0] / (cpspace ** (particles - 1 - p))) % psubspace[0] + mesh_offset)) ix.append(str(m[2])) return glue.join(ix) elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(m): xi = [] for p in range(particles): # Position xi.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * csubspace)) % psubspace[0] + mesh_offset)) # Coin xi.append( str(int(m[0] / (cpspace ** (particles - 1 - p))) % csubspace)) xi.append(str(m[2])) return glue.join(xi) else: self._logger.error("invalid representation format") raise ValueError("invalid representation format") elif ndim == 2: axis = self._mesh.axis() mesh_offset = (min(axis[0]), min(axis[1])) if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(m): ijxy = [] for p in range(particles): # Coin ijxy.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * csubspace * psubspace[0] * psubspace[1])) % csubspace)) ijxy.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * psubspace[0] * psubspace[1])) % csubspace)) # Position ijxy.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * psubspace[1])) % psubspace[0] + mesh_offset[0])) ijxy.append( str(int(m[0] / (cpspace ** (particles - 1 - p))) % psubspace[1] + mesh_offset[1])) ijxy.append(str(m[2])) return glue.join(ijxy) elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(m): xyij = [] for p in range(particles): # Position xyij.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * cspace * psubspace[1])) % psubspace[0] + mesh_offset[0])) xyij.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * cspace)) % psubspace[1] + mesh_offset[1])) # Coin xyij.append(str(int( m[0] / (cpspace ** (particles - 1 - p) * csubspace)) % csubspace)) xyij.append( str(int(m[0] / (cpspace ** (particles - 1 - p))) % csubspace)) xyij.append(str(m[2])) return glue.join(xyij) else: self._logger.error("invalid representation format") raise ValueError("invalid representation format") else: self._logger.error("mesh dimension not implemented") raise NotImplementedError("mesh dimension not implemented") rdd = rdd.map(__map) else: self._logger.error("invalid dumping format") raise ValueError("invalid dumping format") if mode == constants.DumpingModeUniqueFile: data = rdd.collect() with open(filename, 'a') as f: for d in data: f.write(d + "\n") elif mode == constants.DumpingModePartFiles: rdd.saveAsTextFile(path, codec) else: self._logger.error("invalid dumping mode") raise ValueError("invalid dumping mode")
Python
def to_coordinate(self, coord_format): """Change the coordinate format of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created in the desired coordinate format. Thus, a new instance of this class is returned with this RDD. Parameters ---------- coord_format : int The new coordinate format of this object. Returns ------- :py:class:`sparkquantum.dtqw.state.State` A new state object with the RDD in the desired coordinate format. """ return State.from_matrix(super().to_coordinate(coord_format), self._mesh, self._particles, self._repr_format)
def to_coordinate(self, coord_format): """Change the coordinate format of this object. Notes ----- Due to the immutability of RDD, a new RDD instance is created in the desired coordinate format. Thus, a new instance of this class is returned with this RDD. Parameters ---------- coord_format : int The new coordinate format of this object. Returns ------- :py:class:`sparkquantum.dtqw.state.State` A new state object with the RDD in the desired coordinate format. """ return State.from_matrix(super().to_coordinate(coord_format), self._mesh, self._particles, self._repr_format)
Python
def measure(self, state, particle=None, storage_level=StorageLevel.MEMORY_AND_DISK): """Perform the measurement of the system state. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
def measure(self, state, particle=None, storage_level=StorageLevel.MEMORY_AND_DISK): """Perform the measurement of the system state. Raises ------- NotImplementedError This method must not be called from this class, because the successor classes should implement it. """ raise NotImplementedError
Python
def generate(self, edges, perc_mode=constants.PercolationsGenerationModeBroadcast): """Generate mesh percolations based on its probability to have a broken link. Parameters ---------- edges : int Number of edges of the mesh. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Returns ------- :py:class:`pyspark.RDD` or :py:class:`pyspark.Broadcast` The :py:class:`pyspark.RDD` or :py:class:`pyspark.Broadcast` dict which keys are the numbered edges that are broken, depending on the chosen 'sparkquantum.dtqw.mesh.percolation.generationMode' configuration. Raises ------ ValueError If the chosen 'sparkquantum.dtqw.mesh.percolation.generationMode' configuration is not valid. """ prob = self._probability rdd = self._sc.range( edges ).map( lambda m: (m, random.random() < prob) ).filter( lambda m: m[1] is True ) if perc_mode == constants.PercolationsGenerationModeRDD: return rdd elif perc_mode == constants.PercolationsGenerationModeBroadcast: return util.broadcast(self._sc, rdd.collectAsMap()) else: self._logger.error("invalid percolations generation mode") raise ValueError("invalid percolations generation mode")
def generate(self, edges, perc_mode=constants.PercolationsGenerationModeBroadcast): """Generate mesh percolations based on its probability to have a broken link. Parameters ---------- edges : int Number of edges of the mesh. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Returns ------- :py:class:`pyspark.RDD` or :py:class:`pyspark.Broadcast` The :py:class:`pyspark.RDD` or :py:class:`pyspark.Broadcast` dict which keys are the numbered edges that are broken, depending on the chosen 'sparkquantum.dtqw.mesh.percolation.generationMode' configuration. Raises ------ ValueError If the chosen 'sparkquantum.dtqw.mesh.percolation.generationMode' configuration is not valid. """ prob = self._probability rdd = self._sc.range( edges ).map( lambda m: (m, random.random() < prob) ).filter( lambda m: m[1] is True ) if perc_mode == constants.PercolationsGenerationModeRDD: return rdd elif perc_mode == constants.PercolationsGenerationModeBroadcast: return util.broadcast(self._sc, rdd.collectAsMap()) else: self._logger.error("invalid percolations generation mode") raise ValueError("invalid percolations generation mode")
Python
def create_operator(self, repr_format=constants.StateRepresentationFormatCoinPosition, perc_mode=constants.PercolationsGenerationModeBroadcast): """Build the shift operator for a quantum walk. Parameters ---------- repr_format : int, optional Indicate how the quantum system is represented. Default value is :py:const:`sparkquantum.constants.StateRepresentationFormatCoinPosition`. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` The created operator using this mesh. Raises ------ ValueError If `repr_format` or `perc_mode` is not valid. """ cspace = 2 ** self._ndim pspace = self._sites shape = (cspace * pspace, cspace * pspace) nelem = shape[0] if self._percolation: percolation = self._percolation.generate(self._edges) if perc_mode == constants.PercolationsGenerationModeRDD: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(e): """e = (edge, (edge, broken or not))""" for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e[1][0] - i - l) % pspace if e[1][1]: bl = 0 else: if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (i + bl) * pspace + x + bl, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(e): """e = (edge, (edge, broken or not))""" for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e[1][0] - i - l) % pspace if e[1][1]: bl = 0 else: if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (x + bl) * cspace + i + bl, x * cspace + 1 - i, 1 else: self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( self._edges ).map( lambda m: (m, m) ).leftOuterJoin( percolation ).flatMap( __map ) elif perc_mode == constants.PercolationsGenerationModeBroadcast: percolation = util.broadcast( self._sc, percolation) if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(e): for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e - i - l) % pspace if e in percolation.value: bl = 0 else: if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (i + bl) * pspace + x + bl, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(e): for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e - i - l) % pspace if e in percolation.value: bl = 0 else: if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (x + bl) * cspace + i + bl, x * cspace + 1 - i, 1 else: percolation.unpersist() self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( self._edges ).flatMap( __map ) else: self._logger.error("invalid percolations generation mode") raise ValueError("invalid percolations generation mode") else: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(x): for i in range(cspace): l = (-1) ** i if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (i + bl) * pspace + x + bl, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(x): for i in range(cspace): l = (-1) ** i if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (x + bl) * cspace + i + bl, x * cspace + (1 - i), 1 else: self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( pspace ).flatMap( __map ) return Operator(rdd, shape, dtype=int, nelem=nelem)
def create_operator(self, repr_format=constants.StateRepresentationFormatCoinPosition, perc_mode=constants.PercolationsGenerationModeBroadcast): """Build the shift operator for a quantum walk. Parameters ---------- repr_format : int, optional Indicate how the quantum system is represented. Default value is :py:const:`sparkquantum.constants.StateRepresentationFormatCoinPosition`. perc_mode : int, optional Indicate how the percolations will be generated. Default value is :py:const:`sparkquantum.constants.PercolationsGenerationModeBroadcast`. Returns ------- :py:class:`sparkquantum.dtqw.operator.Operator` The created operator using this mesh. Raises ------ ValueError If `repr_format` or `perc_mode` is not valid. """ cspace = 2 ** self._ndim pspace = self._sites shape = (cspace * pspace, cspace * pspace) nelem = shape[0] if self._percolation: percolation = self._percolation.generate(self._edges) if perc_mode == constants.PercolationsGenerationModeRDD: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(e): """e = (edge, (edge, broken or not))""" for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e[1][0] - i - l) % pspace if e[1][1]: bl = 0 else: if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (i + bl) * pspace + x + bl, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(e): """e = (edge, (edge, broken or not))""" for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e[1][0] - i - l) % pspace if e[1][1]: bl = 0 else: if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (x + bl) * cspace + i + bl, x * cspace + 1 - i, 1 else: self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( self._edges ).map( lambda m: (m, m) ).leftOuterJoin( percolation ).flatMap( __map ) elif perc_mode == constants.PercolationsGenerationModeBroadcast: percolation = util.broadcast( self._sc, percolation) if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(e): for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e - i - l) % pspace if e in percolation.value: bl = 0 else: if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (i + bl) * pspace + x + bl, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(e): for i in range(cspace): l = (-1) ** i # Finding the correspondent x coordinate of the # vertex from the edge number x = (e - i - l) % pspace if e in percolation.value: bl = 0 else: if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (x + bl) * cspace + i + bl, x * cspace + 1 - i, 1 else: percolation.unpersist() self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( self._edges ).flatMap( __map ) else: self._logger.error("invalid percolations generation mode") raise ValueError("invalid percolations generation mode") else: if repr_format == constants.StateRepresentationFormatCoinPosition: def __map(x): for i in range(cspace): l = (-1) ** i if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (i + bl) * pspace + x + bl, (1 - i) * pspace + x, 1 elif repr_format == constants.StateRepresentationFormatPositionCoin: def __map(x): for i in range(cspace): l = (-1) ** i if x + l >= pspace or x + l < 0: bl = 0 else: bl = l yield (x + bl) * cspace + i + bl, x * cspace + (1 - i), 1 else: self._logger.error("invalid representation format") raise ValueError("invalid representation format") rdd = self._sc.range( pspace ).flatMap( __map ) return Operator(rdd, shape, dtype=int, nelem=nelem)
Python
def find_bucketing_step(pipeline: Pipeline, identifier: str = "bucketingprocess"): """ Finds a specific step in a sklearn Pipeline that has a 'name' attribute equalling 'identifier'. This is usefull to extract certain steps from a pipeline, f.e. a BucketingProcess. Args: pipeline (sklearn.pipeline.Pipeline): sklearn pipeline identifier (str): the attribute used to find the pipeline step Returns: index (int): position of bucketing step in pipeline.steps """ # Find the bucketing pipeline step bucket_pipes = [s for s in pipeline.steps if getattr(s[1], "name", "") == identifier] # Raise error if missing if len(bucket_pipes) == 0: msg = """ Did not find a bucketing pipeline step. Identity the bucketing pipeline step using skorecard.pipeline.make_bucketing_pipeline or skorecard.pipeline.make_prebucketing_pipeline. Note that the pipeline should always have a skorecard.pipeline.make_prebucketing_pipeline defined. If you do not need prebucketing simply leave it empty. Example: ```python from sklearn.pipeline import make_pipeline from skorecard.pipeline import make_bucketing_pipeline, make_prebucketing_pipeline pipeline = make_pipeline( make_prebucketing_pipeline(), make_bucketing_pipeline( OptimalBucketer(variables=num_cols, max_n_bins=10, min_bin_size=0.05), OptimalBucketer(variables=cat_cols, variables_type="categorical", max_n_bins=10, min_bin_size=0.05), ) ) ``` """ raise AssertionError(msg) if len(bucket_pipes) > 1: msg = """ You need to identity only the bucketing step, using skorecard.pipeline.make_bucketing_pipeline and skorecard.pipeline.make_prebucketing_pipeline only once. Example: ```python from skorecard.pipeline import make_bucketing_pipeline bucket_pipeline = make_bucketing_pipeline( OptimalBucketer(variables=num_cols, max_n_bins=10, min_bin_size=0.05), OptimalBucketer(variables=cat_cols, variables_type="categorical", max_n_bins=10, min_bin_size=0.05), ) ``` """ raise AssertionError(msg) index_bucket_pipeline = pipeline.steps.index(bucket_pipes[0]) return index_bucket_pipeline
def find_bucketing_step(pipeline: Pipeline, identifier: str = "bucketingprocess"): """ Finds a specific step in a sklearn Pipeline that has a 'name' attribute equalling 'identifier'. This is usefull to extract certain steps from a pipeline, f.e. a BucketingProcess. Args: pipeline (sklearn.pipeline.Pipeline): sklearn pipeline identifier (str): the attribute used to find the pipeline step Returns: index (int): position of bucketing step in pipeline.steps """ # Find the bucketing pipeline step bucket_pipes = [s for s in pipeline.steps if getattr(s[1], "name", "") == identifier] # Raise error if missing if len(bucket_pipes) == 0: msg = """ Did not find a bucketing pipeline step. Identity the bucketing pipeline step using skorecard.pipeline.make_bucketing_pipeline or skorecard.pipeline.make_prebucketing_pipeline. Note that the pipeline should always have a skorecard.pipeline.make_prebucketing_pipeline defined. If you do not need prebucketing simply leave it empty. Example: ```python from sklearn.pipeline import make_pipeline from skorecard.pipeline import make_bucketing_pipeline, make_prebucketing_pipeline pipeline = make_pipeline( make_prebucketing_pipeline(), make_bucketing_pipeline( OptimalBucketer(variables=num_cols, max_n_bins=10, min_bin_size=0.05), OptimalBucketer(variables=cat_cols, variables_type="categorical", max_n_bins=10, min_bin_size=0.05), ) ) ``` """ raise AssertionError(msg) if len(bucket_pipes) > 1: msg = """ You need to identity only the bucketing step, using skorecard.pipeline.make_bucketing_pipeline and skorecard.pipeline.make_prebucketing_pipeline only once. Example: ```python from skorecard.pipeline import make_bucketing_pipeline bucket_pipeline = make_bucketing_pipeline( OptimalBucketer(variables=num_cols, max_n_bins=10, min_bin_size=0.05), OptimalBucketer(variables=cat_cols, variables_type="categorical", max_n_bins=10, min_bin_size=0.05), ) ``` """ raise AssertionError(msg) index_bucket_pipeline = pipeline.steps.index(bucket_pipes[0]) return index_bucket_pipeline