content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import os def get_exploration_components_from_dir(dir_path): """Gets the (yaml, assets) from the contents of an exploration data dir. Args: dir_path: str. a full path to the exploration root directory. Returns: *. A 2-tuple, the first element of which is a yaml string, and the second element of which is a list of (filepath, content) 2-tuples. The filepath does not include the assets/ prefix. Raises: Exception: if the following condition doesn't hold: "There is exactly one file not in assets/, and this file has a .yaml suffix". """ yaml_content = None assets_list = [] dir_path_array = dir_path.split('/') while dir_path_array[-1] == '': dir_path_array = dir_path_array[:-1] dir_path_length = len(dir_path_array) for root, dirs, files in os.walk(dir_path): for directory in dirs: if root == dir_path and directory != 'assets': raise Exception( 'The only directory in %s should be assets/' % dir_path) for filename in files: filepath = os.path.join(root, filename) if root == dir_path: if filepath.endswith('.DS_Store'): # These files are added automatically by Mac OS Xsystems. # We ignore them. continue if yaml_content is not None: raise Exception('More than one non-asset file specified ' 'for %s' % dir_path) elif not filepath.endswith('.yaml'): raise Exception('Found invalid non-asset file %s. There ' 'should only be a single non-asset file, ' 'and it should have a .yaml suffix.' % filepath) else: yaml_content = get_file_contents(filepath) else: filepath_array = filepath.split('/') # The additional offset is to remove the 'assets/' prefix. filename = '/'.join(filepath_array[dir_path_length + 1:]) assets_list.append((filename, get_file_contents( filepath, raw_bytes=True))) if yaml_content is None: raise Exception('No yaml file specifed for %s' % dir_path) return yaml_content, assets_list
945690e496846e9cf6c0443ff8cc57ff18d5d056
11,700
import time def time_this_function(func): """ Time the function. use as a decorator. Examples --------- :: @time_this_function def func(x): return x a= func(1) Parameters ---------- func: Callable function Returns ------- result function results """ @wraps(func) def wrapper(*args, **kwargs): start = time.time() result = func(*args, **kwargs) end = time.time() print(func.__name__, "time", end - start) return result return wrapper
6ee2d12dc2301e1c3efe2ca02548297aa83d316f
11,701
def power_plot(data, sfreq, toffset, log_scale, zscale, title): """Plot the computed power of the iq data.""" print("power") t_axis = np.arange(0, len(data)) / sfreq + toffset if log_scale: lrxpwr = 10 * np.log10(data + 1e-12) else: lrxpwr = data zscale_low, zscale_high = zscale if zscale_low == 0 and zscale_high == 0: if log_scale: zscale_low = np.min(lrxpwr[np.where(lrxpwr.real != -np.Inf)]) zscale_high = np.max(lrxpwr) + 3.0 else: zscale_low = np.min(lrxpwr) zscale_high = np.max(lrxpwr) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(t_axis, lrxpwr.real) ax.grid(True) ax.axis([toffset, t_axis[len(t_axis) - 1], zscale_low, zscale_high]) ax.set_xlabel("time (seconds)") if log_scale: ax.set_ylabel("power (dB)") else: ax.set_ylabel("power") ax.set_title(title) return fig
06d7fab09c027ec2dbf7a11fa78f3b6fcd97e2d7
11,702
import torchvision import torch def load_dataset(dataset): """ Loads a dataset and returns train, val and test partitions. """ dataset_to_class = { 'mnist': torchvision.datasets.MNIST, 'cifar10': torchvision.datasets.CIFAR10, 'fa-mnist': torchvision.datasets.FashionMNIST } assert dataset in dataset_to_class.keys() transform = transforms.Compose([transforms.ToTensor()]) train_dataset = dataset_to_class[dataset](root='./data', train=True, download=True, transform=transform) train_split, val_split = torch.utils.data.random_split(train_dataset, lengths=[len(train_dataset)-10000, 10000]) test_split = dataset_to_class[dataset](root='./data', train=False, download=True, transform=transform) return train_split, val_split, test_split
e17dcec84603a742cb6eec0fa18ad40af2454461
11,703
def compareTo(s1, s2): """Compares two strings to check if they are the same length and whether one is longer than the other""" move_slice1 = 0 move_slice2 = 1 if s1[move_slice1:move_slice2] == '' and s2[move_slice1:move_slice2] == '': return 0 # return 0 if same length elif s1[move_slice1:move_slice2] == '' and s2[move_slice1:move_slice2] != '': return len(s2) * -1 # return negative number if s2 > s1 elif s1[move_slice1:move_slice2] != '' and s2[move_slice1:move_slice2] == '': return len(s1) # return positive number if s1 > s2 else: move_slice1 += 1 # with each new call, the next object in the string is checked if empty or not move_slice2 += 1 return compareTo(s1[1:], s2[1:])
4700360d10561227a6d4995c66953993dce1cea3
11,704
def is_unique(x): # A set cannot contain any duplicate, so we just check that the length of the list is the same as the length of the corresponding set """Check that the given list x has no duplicate Returns: boolean: tells if there are only unique values or not Args: x (list): elements to be compared """ return len(x) == len(set(x))
12b4513a71fc1b423366de3f48dd9e21db79e73a
11,705
def str2format(fmt, ignore_types=None): """Convert a string to a list of formats.""" ignore_types = ignore_types if ignore_types else () token_to_format = { "s": "", "S": "", "d": "g", "f": "f", "e": "e", } base_fmt = "{{:{}}}" out = [] for i, token in enumerate(fmt.split(",")): n = token[:-1] if i in ignore_types: out.append(base_fmt.format(n.split(".")[0])) elif token[-1].lower() == "s": out.append(base_fmt.format("{}.{}".format(n, n))) else: out.append(base_fmt.format(">{}{}".format(n, token_to_format[token[-1]]))) return out
9cbe719abe6b37a0adcd52af250dfe768f850ffa
11,706
from pathlib import Path def _read_concordance(filename: Path, Sample_IDs: pd.Index) -> pd.DataFrame: """Create a flag of known replicates that show low concordance. Given a set of samples that are known to be from the same Subject. Flag samples that show low concordance with one or more replicates. Returns: pd.Series: - Sample_ID (pd.Index) - is_discordant_replicate (bool): True if replicates show a concordance below the supplied threshold. Otherwise False. """ df = sample_concordance.read(filename) return ( df.melt( id_vars=["is_discordant_replicate"], value_vars=["Sample_ID1", "Sample_ID2"], var_name="To_Drop", value_name="Sample_ID", ) .drop("To_Drop", axis=1) .groupby("Sample_ID") .max() # Flag a sample as True if it is True for any comparison. .astype("boolean") .reindex(Sample_IDs) )
2c7049ff5b521927ffbf863471fc23e073bce531
11,707
def load_image(name): """ Get and cache an enaml Image for the given icon name. """ path = icon_path(name) global _IMAGE_CACHE if path not in _IMAGE_CACHE: with open(path, 'rb') as f: data = f.read() _IMAGE_CACHE[path] = Image(data=data) return _IMAGE_CACHE[path]
6ce56c1a9d4d9e80d25a19aca239f43ebd119840
11,708
def add_shipment_comment( tracking_id: str, body: CreateComment = Body(...), client: VBR_Api = Depends(vbr_admin_client), ): """Add a Comment to a Shipment. Requires: **VBR_WRITE_PUBLIC**""" tracking_id = sanitize_identifier_string(tracking_id) shipment = client.get_shipment_by_tracking_id(tracking_id) data_event = client.create_and_link(comment=body.comment, link_target=shipment)[0] return Comment(comment=data_event.comment, timestamp=data_event.event_ts)
d115d230bbf47a8f1cf625f0ab66e855f382244c
11,709
import os def train_faster_rcnn_alternating(base_model_file_name, debug_output=False): """ 4-Step Alternating Training scheme from the Faster R-CNN paper: # Create initial network, only rpn, without detection network # --> train only the rpn (and conv3_1 and up for VGG16) # buffer region proposals from rpn # Create full network, initialize conv layers with imagenet, use buffered proposals # --> train only detection network (and conv3_1 and up for VGG16) # Keep conv weights from detection network and fix them # --> train only rpn # buffer region proposals from rpn # Keep conv and rpn weights from step 3 and fix them # --> train only detection network """ # Learning parameters rpn_lr_factor = globalvars['rpn_lr_factor'] rpn_lr_per_sample_scaled = [x * rpn_lr_factor for x in cfg["CNTK"].RPN_LR_PER_SAMPLE] frcn_lr_factor = globalvars['frcn_lr_factor'] frcn_lr_per_sample_scaled = [x * frcn_lr_factor for x in cfg["CNTK"].FRCN_LR_PER_SAMPLE] l2_reg_weight = cfg["CNTK"].L2_REG_WEIGHT mm_schedule = momentum_schedule(globalvars['momentum_per_mb']) rpn_epochs = globalvars['rpn_epochs'] frcn_epochs = globalvars['frcn_epochs'] print("Using base model: {}".format(cfg["CNTK"].BASE_MODEL)) print("rpn_lr_per_sample: {}".format(rpn_lr_per_sample_scaled)) print("frcn_lr_per_sample: {}".format(frcn_lr_per_sample_scaled)) if debug_output: print("Storing graphs and models to %s." % globalvars['output_path']) # Input variables denoting features, labeled ground truth rois (as 5-tuples per roi) and image dimensions image_input = input_variable((num_channels, image_height, image_width), dynamic_axes=[Axis.default_batch_axis()], name=feature_node_name) feat_norm = image_input - normalization_const roi_input = input_variable((cfg["CNTK"].INPUT_ROIS_PER_IMAGE, 5), dynamic_axes=[Axis.default_batch_axis()]) scaled_gt_boxes = alias(roi_input, name='roi_input') dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()]) dims_node = alias(dims_input, name='dims_input') rpn_rois_input = input_variable((cfg["TRAIN"].RPN_POST_NMS_TOP_N, 4), dynamic_axes=[Axis.default_batch_axis()]) rpn_rois_buf = alias(rpn_rois_input, name='rpn_rois') # base image classification model (e.g. VGG16 or AlexNet) base_model = load_model(base_model_file_name) print("stage 1a - rpn") if True: # Create initial network, only rpn, without detection network # initial weights train? # conv: base_model only conv3_1 and up # rpn: init new yes # frcn: - - # conv layers conv_layers = clone_conv_layers(base_model) conv_out = conv_layers(feat_norm) # RPN and losses rpn_rois, rpn_losses = create_rpn(conv_out, scaled_gt_boxes, dims_node, proposal_layer_param_string=cfg["CNTK"].PROPOSAL_LAYER_PARAMS) stage1_rpn_network = combine([rpn_rois, rpn_losses]) # train if debug_output: plot(stage1_rpn_network, os.path.join(globalvars['output_path'], "graph_frcn_train_stage1a_rpn." + cfg["CNTK"].GRAPH_TYPE)) train_model(image_input, roi_input, dims_input, rpn_losses, rpn_losses, rpn_lr_per_sample_scaled, mm_schedule, l2_reg_weight, epochs_to_train=rpn_epochs) print("stage 1a - buffering rpn proposals") buffered_proposals_s1 = compute_rpn_proposals(stage1_rpn_network, image_input, roi_input, dims_input) print("stage 1b - frcn") if True: # Create full network, initialize conv layers with imagenet, fix rpn weights # initial weights train? # conv: base_model only conv3_1 and up # rpn: stage1a rpn model no --> use buffered proposals # frcn: base_model + new yes # conv_layers conv_layers = clone_conv_layers(base_model) conv_out = conv_layers(feat_norm) # use buffered proposals in target layer rois, label_targets, bbox_targets, bbox_inside_weights = \ create_proposal_target_layer(rpn_rois_buf, scaled_gt_boxes, num_classes=globalvars['num_classes']) # Fast RCNN and losses fc_layers = clone_model(base_model, [pool_node_name], [last_hidden_node_name], CloneMethod.clone) cls_score, bbox_pred = create_fast_rcnn_predictor(conv_out, rois, fc_layers) detection_losses = create_detection_losses(cls_score, label_targets, rois, bbox_pred, bbox_targets, bbox_inside_weights) pred_error = classification_error(cls_score, label_targets, axis=1, name="pred_error") stage1_frcn_network = combine([rois, cls_score, bbox_pred, detection_losses, pred_error]) # train if debug_output: plot(stage1_frcn_network, os.path.join(globalvars['output_path'], "graph_frcn_train_stage1b_frcn." + cfg["CNTK"].GRAPH_TYPE)) train_model(image_input, roi_input, dims_input, detection_losses, pred_error, frcn_lr_per_sample_scaled, mm_schedule, l2_reg_weight, epochs_to_train=frcn_epochs, rpn_rois_input=rpn_rois_input, buffered_rpn_proposals=buffered_proposals_s1) buffered_proposals_s1 = None print("stage 2a - rpn") if True: # Keep conv weights from detection network and fix them # initial weights train? # conv: stage1b frcn model no # rpn: stage1a rpn model yes # frcn: - - # conv_layers conv_layers = clone_model(stage1_frcn_network, [feature_node_name], [last_conv_node_name], CloneMethod.freeze) conv_out = conv_layers(image_input) # RPN and losses rpn = clone_model(stage1_rpn_network, [last_conv_node_name, "roi_input", "dims_input"], ["rpn_rois", "rpn_losses"], CloneMethod.clone) rpn_net = rpn(conv_out, dims_node, scaled_gt_boxes) rpn_rois = rpn_net.outputs[0] rpn_losses = rpn_net.outputs[1] stage2_rpn_network = combine([rpn_rois, rpn_losses]) # train if debug_output: plot(stage2_rpn_network, os.path.join(globalvars['output_path'], "graph_frcn_train_stage2a_rpn." + cfg["CNTK"].GRAPH_TYPE)) train_model(image_input, roi_input, dims_input, rpn_losses, rpn_losses, rpn_lr_per_sample_scaled, mm_schedule, l2_reg_weight, epochs_to_train=rpn_epochs) print("stage 2a - buffering rpn proposals") buffered_proposals_s2 = compute_rpn_proposals(stage2_rpn_network, image_input, roi_input, dims_input) print("stage 2b - frcn") if True: # Keep conv and rpn weights from step 3 and fix them # initial weights train? # conv: stage2a rpn model no # rpn: stage2a rpn model no --> use buffered proposals # frcn: stage1b frcn model yes - # conv_layers conv_layers = clone_model(stage2_rpn_network, [feature_node_name], [last_conv_node_name], CloneMethod.freeze) conv_out = conv_layers(image_input) # Fast RCNN and losses frcn = clone_model(stage1_frcn_network, [last_conv_node_name, "rpn_rois", "roi_input"], ["cls_score", "bbox_regr", "rpn_target_rois", "detection_losses", "pred_error"], CloneMethod.clone) stage2_frcn_network = frcn(conv_out, rpn_rois_buf, scaled_gt_boxes) detection_losses = stage2_frcn_network.outputs[3] pred_error = stage2_frcn_network.outputs[4] # train if debug_output: plot(stage2_frcn_network, os.path.join(globalvars['output_path'], "graph_frcn_train_stage2b_frcn." + cfg["CNTK"].GRAPH_TYPE)) train_model(image_input, roi_input, dims_input, detection_losses, pred_error, frcn_lr_per_sample_scaled, mm_schedule, l2_reg_weight, epochs_to_train=frcn_epochs, rpn_rois_input=rpn_rois_input, buffered_rpn_proposals=buffered_proposals_s2) buffered_proposals_s2 = None return create_eval_model(stage2_frcn_network, image_input, dims_input, rpn_model=stage2_rpn_network)
cb0b255198f60da9415282f047396c8b501a43aa
11,710
def _mp2_energy(output_str): """ Reads the MP2 energy from the output file string. Returns the energy in Hartrees. :param output_str: string of the program's output file :type output_str: str :rtype: float """ ene = ar.energy.read( output_str, app.one_of_these([ app.escape('Total MP2 energy'), app.escape('MP2 energy') ])) return ene
febd9f4c5759cb6150ff16bda2e9050199c48c5f
11,711
def fastlcs(a,b,Dmax=None): """ return the length of the longest common substring or 0 if the maximum number of difference Dmax cannot be respected Implementation: see the excellent paper "An O(ND) Difference Algorithm and Its Variations" by EUGENE W. MYERS, 1986 NOTE: let D be the minimal number of insertion or deletion that transform A into B let L be the length of a longest common substring we always have D = M + N - 2 * L """ N, M = len(a), len(b) if N+M == 0: return 0 #very special case... if Dmax == None: Dmax = N + M #worse case else: Dmax = min(Dmax, M+N) #a larger value does not make sense! assert Dmax >= 0, "SOFWARE ERROR: Dmax must be a positive integer" sesLength = None W = [0] * (Dmax * 2 + 2) #for i in -Dmax..Dmax, V[i] == W[i+Dmax) for D in range(0, Dmax+1): for k in range(-D, +D+1, 2): if k == -D or (k != D and W[k-1+Dmax] < W[k+1+Dmax]): #k == -D or (k != D and V[k-1] < V[k+1]) x = W[k+1+Dmax] #x = V[k+1] else: x = W[k-1+Dmax]+1 #x = V[k-1]+1 y = x - k while x < N and y < M and a[x] == b[y]: #follow any snake x += 1 y += 1 W[k+Dmax] = x # V[k] = x #farstest reaching point with D edits if x >= N and y >= M: sesLength = D L = (M+N-D) / 2 assert D == M+N-L-L, ("INTERNAL SOFWARE ERROR", M,N,D) return L return 0
d8a88c7ffaae892e48a292b7a045da9f5dc58173
11,712
from typing import Optional from typing import Mapping def FeaturesExtractor( # pylint: disable=invalid-name eval_config: config_pb2.EvalConfig, tensor_representations: Optional[Mapping[ Text, schema_pb2.TensorRepresentation]] = None) -> extractor.Extractor: """Creates an extractor for extracting features. The extractor acts as follows depending on the existence of certain keys within the incoming extracts: 1) Extracts contains tfma.ARROW_RECORD_BATCH_KEY The features stored in the RecordBatch will be extracted and added to the output extract under the key tfma.FEATURES_KEY and the raw serialized inputs will be added under the tfma.INPUT_KEY. Any extracts that already exist will be merged with the values from the RecordBatch with the RecordBatch values taking precedence when duplicate keys are detected. The tfma.ARROW_RECORD_BATCH_KEY key will be removed from the output extracts. 2) Extracts contains tfma.FEATURES_KEY (but not tfma.ARROW_RECORD_BATCH_KEY) The operation will be a no-op and the incoming extracts will be passed as is to the output. 3) Extracts contains neither tfma.FEATURES_KEY | tfma.ARROW_RECORD_BATCH_KEY An exception will be raised. Args: eval_config: Eval config. tensor_representations: Optional tensor representations to use when parsing the data. If tensor_representations are not passed or a representation is not found for a given feature name a default representation will be used where possible, otherwise an exception will be raised. Returns: Extractor for extracting features. """ del eval_config # pylint: disable=no-value-for-parameter return extractor.Extractor( stage_name=_FEATURES_EXTRACTOR_STAGE_NAME, ptransform=_ExtractFeatures(tensor_representations or {}))
86e58783fca3ebb23de1e6b7ac9cdd4030e99c38
11,713
def assert__(engine, obj, condition, message=u'Assertion failed'): """:yaql:assert Evaluates condition against object. If it evaluates to true returns the object, otherwise throws an exception with provided message. :signature: obj.assert(condition, message => "Assertion failed") :arg obj: object to evaluate condition on :argType obj: any :arg condition: lambda function to be evaluated on obj. If result of function evaluates to false then trows exception message :argType condition: lambda :arg message: message to trow if condition returns false :argType message: string :returnType: obj type or message .. code:: yaql> 12.assert($ < 2) Execution exception: Assertion failed yaql> 12.assert($ < 20) 12 yaql> [].assert($, "Failed assertion") Execution exception: Failed assertion """ if utils.is_iterator(obj): obj = utils.memorize(obj, engine) if not condition(obj): raise AssertionError(message) return obj
c29e073bf6673ce0c89ed339c27f2287d6952991
11,714
def create_review(request, item_id, template_name="reviewclone/create_review.html"): """ Current user can create a new review. Find the item with `item_id` then make sure the current user does not already have a review. If a review is found `review_exist` will be True. If the current user's review count is less than `REVIEWCLONE_REVIEW_MIN`,`random_item` will be 1 item the user has not reviewed yet. """ review_exist = False random_item = None item = get_object_or_404(Item, pk=item_id) if Review.objects.filter(item=item, user=request.user).count() > 0: review_exist = True if request.POST: form = ReviewForm(request.POST) if form.is_valid() and review_exist == False: form.instance.user = request.user form.instance.item = item form.save() if form.cleaned_data.get('post_review_message') == True: request.facebook.graph.put_wall_post( # TODO: Change to template 'I just gave \"%s\" %s Stars on reviewclone.com.' % (item.name, form.instance.amount), # TODO: Add attachment ) messages.add_message(request, messages.INFO, 'Your review was posted to your Facebook wall.') messages.add_message(request, messages.INFO, 'You reviewed %s.' % item) return HttpResponseRedirect(reverse('after_review', args=[form.instance.pk])) else: user_reviews = Review.objects.filter(user=request.user) if user_reviews.count() < settings.REVIEWCLONE_REVIEW_MIN: random_item = Item.objects.all().exclude( pk__in=user_reviews.values_list('item__pk') ).order_by('?')[0] form = ReviewForm() return render_to_response( template_name, { 'item': item, 'review_exist': review_exist, 'form': form, 'random': random_item, }, context_instance=RequestContext(request) )
fe1806017cbce5e95be183be48c9e35a63a10e26
11,715
from typing import Iterable from typing import List def build_level_codes(incoming_column_name: str, levels: Iterable) -> List[str]: """ Pick level names for a set of levels. :param incoming_column_name: :param levels: :return: """ levels = [str(lev) for lev in levels] levels = [incoming_column_name + "_lev_" + clean_string(lev) for lev in levels] if len(set(levels)) != len(levels): levels = [levels[i] + "_" + str(i) for i in range(len(levels))] return levels
994ccc0673bd27dcce30709a97372c29d75a8e67
11,716
def get_all(isamAppliance, check_mode=False, force=False): """ Get all rsyslog objects """ return isamAppliance.invoke_get("Get all rsyslog objects", "/core/rsp_rsyslog_objs")
55ff144577a9ef25b555ca3a37db65bfdb0f0af4
11,717
def genomic_dup1_37_loc(): """Create test fixture GRCh37 duplication subject""" return { "_id": "ga4gh:VSL.CXcLL6RUPkro3dLXN0miGEzlzPYiqw2q", "sequence_id": "ga4gh:SQ.VNBualIltAyi2AI_uXcKU7M9XUOuA7MS", "interval": { "type": "SequenceInterval", "start": {"value": 49568693, "type": "Number"}, "end": {"value": 49568695, "type": "Number"}, }, "type": "SequenceLocation", }
470af80795c649bc0f4dd29393d1093c45c9f0da
11,718
def parse(f, _bytes): """ Parse function will take a parser combinator and parse some set of bytes """ if type(_bytes) == Parser: return f(_bytes) else: s = Parser(_bytes, 0) return f(s)
7f824c46477a384ce97f66806813f4b42412d6d8
11,719
def spiral_tm(wg_width=0.5, length=2): """ sample of component cutback """ c = spiral_inner_io_euler(wg_width=wg_width, length=length, dx=10, dy=10, N=5) cc = add_gratings_and_loop_back( component=c, grating_coupler=pp.c.grating_coupler_elliptical_tm, bend_factory=pp.c.bend_circular, ) return cc
e7fcec9e61984d8f89558d7479cc942db389ba3a
11,720
import time import sys def get_url_until_success(url): """Continuously tries to open a url until it succeeds or times out.""" time_spent = 0 while (time_spent < RECEIVE_TIMEOUT): try: helps = urllib2.urlopen(url) break except: time.sleep(RETRY_INTERVAL) time_spent += RETRY_INTERVAL if (time_spent >= RECEIVE_TIMEOUT): print >> sys.stderr, 'Timeout attempting to hit url: %s' % (url) sys.exit(1) return helps.read()
04c47d158393c1f8b809240b84d2aecc6fcbf8de
11,721
from typing import List from typing import Tuple def _chunk(fst: pynini.Fst) -> List[Tuple[str, str]]: """Chunks a string transducer into tuples. This function is given a string transducer of the form: il1 il2 il3 il4 il5 il6 ol1 eps eps ol2 eps ol3 And returns the list: [(il1 il2 il3, ol1), (il4 il5, ol2), (il6, ol3)] It thus recovers the "many-to-one" alignment. Args: fst: a string transducer containing the alignment. Returns: A list of string, char tuples. """ # Input epsilon-normalization and removal forces a sensible alignment. fst = pynini.epsnormalize(fst).rmepsilon() assert ( fst.properties(pynini.STRING, True) == pynini.STRING ), "FST is not a string automaton" alignment: List[Tuple[str, str]] = [] state = 0 arc = fst.arcs(state).value() assert arc.ilabel, f"Input label leaving state {state} contains epsilon" ilabels = bytearray([arc.ilabel]) assert arc.olabel, f"Output label leaving state {state} contains epsilon" olabel = arc.olabel for state in range(1, fst.num_states() - 1): arc = fst.arcs(state).value() assert ( arc.ilabel ), f"Input label leaving state {state} contains epsilon" # A non-epsilon olabel signals a new chunk. if arc.olabel: alignment.append((ilabels.decode("utf8"), chr(olabel))) ilabels.clear() olabel = arc.olabel ilabels.append(arc.ilabel) assert ( ilabels ), f"Input label leaving penultimate state {state} contains epsilon" alignment.append((ilabels.decode("utf8"), chr(olabel))) return alignment
fa50e13062267e8929df5f538ab9a924822bc265
11,722
def auth_token_required(func): """Your auth here""" return func
e65b94d40c914c57ff8d894409b664cf97aa790d
11,723
def base_convert_money(amount, currency_from, currency_to): """ Convert 'amount' from 'currency_from' to 'currency_to' """ source = get_rate_source() # Get rate for currency_from. if source.base_currency != currency_from: rate_from = get_rate(currency_from) else: # If currency from is the same as base currency its rate is 1. rate_from = Decimal(1) # Get rate for currency_to. rate_to = get_rate(currency_to) if isinstance(amount, float): amount = Decimal(amount).quantize(Decimal('.000001')) # After finishing the operation, quantize down final amount to two points. return ((amount / rate_from) * rate_to).quantize(Decimal("1.00"))
5417ba7a9d757bafc835df8f55a1d4e6de72cb2f
11,724
import confluent_kafka as ck from elasticsearch import Elasticsearch import sys def init_dask_workers(worker, config, obj_dict=None): """ Initalize for all dask workers :param worker: Dask worker :type worker: object :param config: Configuration which contains source and sink details :type config: dict :param obj_dict: Objects that are required to be present on every dask worker :type obj_dict: dict :return: worker: Dask worker :rtype: object """ if obj_dict is not None: for key in obj_dict.keys(): worker.data[key] = obj_dict[key] sink = config["sink"] if sink == SINK_KAFKA: producer_conf = config["kafka_conf"]["producer_conf"] print("Producer conf: " + str(producer_conf)) producer = ck.Producer(producer_conf) worker.data["sink"] = producer elif sink == SINK_ES: es_conf = config["elasticsearch_conf"] if "username" in es_conf and "password" in es_conf: es_client = Elasticsearch( [ es_conf["url"].format( es_conf["username"], es_conf["password"], es_conf["port"] ) ], use_ssl=True, verify_certs=True, ca_certs=es_conf["ca_file"], ) else: es_client = Elasticsearch( [{"host": config["elasticsearch_conf"]["url"]}], port=config["elasticsearch_conf"]["port"], ) worker.data["sink"] = es_client elif sink == SINK_FS: print( "Streaming process will write the output to location '{}'".format( config["output_dir"] ) ) else: print( "No valid sink provided in the configuration file. Please provide kafka/elasticsearch/filsesystem" ) sys.exit(-1) print("Successfully initialized dask worker " + str(worker)) return worker
34fb468e5a5f2acf3ea4c18065c7c0efde750a04
11,725
async def contestant() -> dict: """Create a mock contestant object.""" return { "id": "290e70d5-0933-4af0-bb53-1d705ba7eb95", "first_name": "Cont E.", "last_name": "Stant", "birth_date": date(1970, 1, 1).isoformat(), "gender": "M", "ageclass": "G 12 år", "region": "Oslo Skikrets", "club": "Lyn Ski", "team": "Team Kollen", "email": "[email protected]", "event_id": "ref_to_event", "bib": 1, }
261fd560107489b58c645efb1bb9c19a396e0dce
11,726
import requests import http def GetApitoolsTransport(timeout='unset', enable_resource_quota=True, response_encoding=None, ca_certs=None, allow_account_impersonation=True, use_google_auth=None, response_handler=None, redact_request_body_reason=None): """Get an transport client for use with apitools. Args: timeout: double, The timeout in seconds to pass to httplib2. This is the socket level timeout. If timeout is None, timeout is infinite. If default argument 'unset' is given, a sensible default is selected. enable_resource_quota: bool, By default, we are going to tell APIs to use the quota of the project being operated on. For some APIs we want to use gcloud's quota, so you can explicitly disable that behavior by passing False here. response_encoding: str, the encoding to use to decode the response. ca_certs: str, absolute filename of a ca_certs file that overrides the default allow_account_impersonation: bool, True to allow use of impersonated service account credentials for calls made with this client. If False, the active user credentials will always be used. use_google_auth: bool, True if the calling command indicates to use google-auth library for authentication. If False, authentication will fallback to using the oauth2client library. response_handler: requests.ResponseHandler, handler that gets executed before any other response handling. redact_request_body_reason: str, the reason why the request body must be redacted if --log-http is used. If None, the body is not redacted. Returns: 1. A httplib2.Http-like object backed by httplib2 or requests. """ if base.UseRequests(): if response_handler: if not isinstance(response_handler, core_requests.ResponseHandler): raise ValueError('response_handler should be of type ResponseHandler.') if (properties.VALUES.core.log_http.GetBool() and properties.VALUES.core.log_http_streaming_body.GetBool()): # We want to print the actual body instead of printing the placeholder. # To achieve this, we need to set streaming_response_body as False. # Not that the body will be empty if the response_handler has already # consumed the stream. streaming_response_body = False else: streaming_response_body = response_handler.use_stream else: streaming_response_body = False session = requests.GetSession( timeout=timeout, enable_resource_quota=enable_resource_quota, ca_certs=ca_certs, allow_account_impersonation=allow_account_impersonation, streaming_response_body=streaming_response_body, redact_request_body_reason=redact_request_body_reason) return core_requests.GetApitoolsRequests(session, response_handler, response_encoding) return http.Http(timeout=timeout, enable_resource_quota=enable_resource_quota, response_encoding=response_encoding, ca_certs=ca_certs, allow_account_impersonation=allow_account_impersonation, use_google_auth=use_google_auth)
21fc8d521703580a51c753811b7f0d401f68bba5
11,727
def user_requested_anomaly7(): """ Checks if the user requested an anomaly, and returns True/False accordingly. """ digit = 0 res = False if is_nonzero_file7(summon_filename): lines = [] with open(get_full_path(summon_filename)) as f: lines = f.readlines() if len(lines) > 0: try: digit = int(lines[0]) if digit > 0: res = True except Exception as e: res = False append_logs("ERROR:" + str(e), name4logs, "always") else: res = False else: res = False # Disable summoning of anomalies after the requested number of anomalies were added if res: with open(get_full_path(summon_filename), "w") as f: if digit > 0: f.write(str(digit - 1)) else: f.write("0") return res
bed54831c00deb6c11ce81c731fe37f35ef070b7
11,728
def mat_to_r(_line, _mat_object : MatlabObject, _r_object : RObject = RObject()): """Move variables from Matlab to R Parameters ---------- _line : str, Iterable[str] If str, one of the following: 1. '#! m[at[lab]] -> <vars>' 2. '<vars>' where <vars> is a comma separated list of Matlab variable names If Iterable[str]: [<var1>, <var2>, ...] where <varX> is the name of a Matlab variable All variables must be str, int, float. _mat_object : Matlabobject The Matlab environment where the variables are stored _r_object : optional[RObject] The R environment to load the variables into Default: new RObject() Returns ------- MatlabObject A Matlab environment with the given variables loaded Raises ------ RuntimeError: If _mat_object or _r_object is not alive ValueError If _line is not the right format NameError If a requested variable is not in the given Matlab environment """ ## input validation if not _mat_object.isalive: # can't do anything raise RuntimeError('Matlab connection was killed before things could be brought back to Python.') if type(_line) is str and ('#!' in _line or '%!' in _line): # _line = '#! <lang> -> <vars>' if not '->' in _line: raise ValueError('Misformatted line: "' + _line + '"') _to_load = _line.split('->')[1].replace(' ','').split(',') elif type(_line) is str: # _line = '<vars>' _to_load = _line.replace(' ','').split(',') elif hasattr(_line, '__iter__') and all([type(i) is str for i in _line]): # _line = [<var i>, ...] _to_load = list(_line) else: raise ValueError('Unrecognized _line') if not _r_object.isalive: # can't do anything raise RuntimeError('R connection was killed before things could be send to it.') if _to_load[0] == '': # null case return _r_object # check the variables _who = _mat_object.who for i in _to_load: if i not in _who: raise NameError(str(i) + ' not in Matlab environment') # bundle them _random_name = ''.join(choices('abcdefghijklmnopqrstuvwxyz', k=10)) _mat_object.sendline(_random_name + ' = tempname') _temp_file = _mat_object.before.split('\r\n\r\n')[2].strip()[1:-1] # get them _mat_object.sendlines([ 'save ' + _temp_file + '.mat ' + ' '.join(_to_load), 'clear ' + _random_name ]) # load them _r_object.sendlines( [ 'library("R.matlab")', _random_name + ' <- readMat("' + _temp_file + '.mat")' ] + [ _current + ' <- ' + _random_name + '$' + _current for _current in _to_load ] + [ 'rm(' + _random_name + ')' ] ) return _r_object
22f78c06bcf47a71596563debf6115c954d89e21
11,729
import os def read_keys(): """ read aws credentials from file, then stick into global variables... """ with open('%s/.aws/credentials' % os.getenv('HOME'), 'rt') as infile: for line in infile: if 'aws_access_key_id' in line: aws_access_key_id = line.split('=')[-1].strip() if 'aws_secret_access_key' in line: aws_secret_access_key = line.split('=')[-1].strip() return aws_access_key_id, aws_secret_access_key
c79752cb52045c3d369516dc16343ec766aa9e09
11,730
def RecalculatedEdgeDegreeAttack(G, remove_fraction = 1.0): """ Recalculated Edge Degree Attack """ n = G.number_of_nodes() m = int(G.number_of_edges() * (remove_fraction+0.0) ) tot_ND = [0] * (m + 1) tot_T = [0] * (m + 1) ND, ND_lambda = ECT.get_number_of_driver_nodes(G) tot_ND[0] = ND tot_T[0] = 0 for i in range(m): # calculate max edge degree cur_max_edge_degree = -1 cur_max_u = -1 cur_max_v = -1 for u, v in G.edges(): temp = G.degree(u) * G.degree(v) if temp > cur_max_edge_degree: cur_max_edge_degree = temp cur_max_u = u cur_max_v = v # remove edge G.remove_edge(cur_max_u, cur_max_v) # calculate and save ND ND, ND_lambda = ECT.get_number_of_driver_nodes(G) tot_ND[i+1] = ND tot_T [i+1] = i + 1 return (tot_ND, tot_T)
ff88430f172a1ca319af9d637c292091cab2bf6f
11,731
def get(url, params=None, headers=None): """Return the contents from a URL Params: - url (str): Target website URL - params (dict, optional): Param payload to add to the GET request - headers (dict, optional): Headers to add to the GET request Example: ``` get('https://httpbin.org/anything', {'soup': 'gazpacho'}) ``` """ opener = build_opener() if params: url += "?" + urlencode(params) if headers: for h in headers.items(): opener.addheaders = [h] if (headers and not headers.get("User-Agent")) or not headers: UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:69.0) Gecko/20100101 Firefox/69.0" opener.addheaders = [("User-Agent", UA)] with opener.open(url) as f: content = f.read().decode("utf-8") return content
edb0fe25728fe1bd11d9e74509a630f2d3823af1
11,732
def get_param_num(model): """ get the number of parameters Args: model: Returns: """ return sum(p.numel() for p in model.parameters())
19d98a1bcbdcb827be4a657f82cda2ff09f119e4
11,733
import os def get_branch_name(): """Get the name of the current branch returns: The name of the current branch """ HEAD = data.get_ref('HEAD', deref=False) if not HEAD.symbolic: return None HEAD = HEAD.value assert HEAD.startswith('refs/heads/') return os.path.relpath(HEAD, 'refs/heads')
631b08be509503f50826b5e5f7c1ceca95b8f4af
11,734
from typing import Union def format_tensor_to_ndarray(x: Union[ms.Tensor, np.ndarray]) -> np.ndarray: """Unify `mindspore.Tensor` and `np.ndarray` to `np.ndarray`. """ if isinstance(x, ms.Tensor): x = x.asnumpy() if not isinstance(x, np.ndarray): raise TypeError('input should be one of [ms.Tensor or np.ndarray],' ' but receive {}'.format(type(x))) return x
6e64a40bbafe2b2f89f5afd200077be369adcfe7
11,735
from typing import Pattern import re def hunt_csv(regex: Pattern, body: str) -> list: """ finds chunk of csv in a larger string defined as regex, splits it, and returns as list. really useful only for single lines. worse than StringIO -> numpy or pandas csv reader in other cases. """ csv_string = re.search(regex, body)[0] if r"\n" in csv_string: lines = csv_string.split(r"\n") processed_lines = [] for line in lines: csv_fields = line.split(",") csv_fields = [field.strip() for field in csv_fields] processed_lines.append(csv_fields) return processed_lines csv_fields = csv_string.split(",") return [field.strip() for field in csv_fields]
9c5574f059ef05e6f99e468a9272f42393d79030
11,736
import re def time_key(file_name): """ provides a time-based sorting key """ splits = file_name.split('/') [date] = re.findall(r'(\d{4}_\d{2}_\d{2})', splits[-2]) date_id = [int(token) for token in date.split('_')] recording_id = natural_key(splits[-1]) session_id = session_key(splits[-2]) return date_id + session_id + recording_id
07a5448b7b39b00780f53080b316981198d54c91
11,737
import subprocess def silent_popen(args, **kwargs): """Wrapper for subprocess.Popen with suppressed output. STERR is redirected to STDOUT which is piped back to the calling process and returned as the result. """ return subprocess.Popen(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, **kwargs).communicate()[0]
5075a3ea1891ad3c237d5b05b474f563005ff48f
11,738
def sizeRange(contourList, low, high): """Only keeps contours that are in range for size""" newList = [] for i in contourList: if (low <= cv2.contourArea(i) <= high): newList.append(i) return newList
ac83b09acfd8d8e23a03965b52c5c4cc0361710d
11,739
def number_field_choices(field): """ Given a field, returns the number of choices. """ try: return len(field.get_flat_choices()) except AttributeError: return 0
b8776e813e9eb7471a480df9d6e49bfeb48a0eb6
11,740
def _is_an_unambiguous_user_argument(argument: str) -> bool: """Check if the provided argument is a user mention, user id, or username (name#discrim).""" has_id_or_mention = bool(commands.IDConverter()._get_id_match(argument) or RE_USER_MENTION.match(argument)) # Check to see if the author passed a username (a discriminator exists) argument = argument.removeprefix("@") has_username = len(argument) > 5 and argument[-5] == "#" return has_id_or_mention or has_username
adecc093a0597d43292171f867ebcf5a64edc7d8
11,741
def resize_image(image, size): """ Resize the image to fit in the specified size. :param image: Original image. :param size: Tuple of (width, height). :return: Resized image. :rtype: :py:class: `~PIL.Image.Image` """ image.thumbnail(size) return image
67db04eac8a92d27ebd3ec46c4946b7662f9c03f
11,742
def one_hot_vector(val, lst): """Converts a value to a one-hot vector based on options in lst""" if val not in lst: val = lst[-1] return map(lambda x: x == val, lst)
401ff1d6666c392b3a217659929a4f7832c52522
11,743
def follow(request, username): """ Add user with username to current user's following list """ request.user.followers.add(User.objects.get(username=username)) return redirect('accounts:followers')
72530b32cfcb2282045cd2ef112df62a19e03239
11,744
def sesteva_stolpce(seznam_seznamov_stolpcev): """sešteje vse 'stolpce' v posameznem podseznamu """ matrika_stolpcev = [] for i in range(len(seznam_seznamov_stolpcev)): sez = seznam_seznamov_stolpcev[i] stolpec11 = sez[0] while len(sez) > 1: i = 0 stolpec22 = sez[1] stolpec11 = vsota_stolpcev(stolpec11, stolpec22) sez = sez[i+1:] matrika_stolpcev.append(stolpec11) return matrika_stolpcev
fe69368a79b60e549983a07e140ec3b5e532868e
11,745
def create_response(data={}, status=200, message=''): """ Wraps response in a consistent format throughout the API Format inspired by https://medium.com/@shazow/how-i-design-json-api-responses-71900f00f2db Modifications included: - make success a boolean since there's only 2 values - make message a single string since we will only use one message per response IMPORTANT: data must be a dictionary where: - the key is the name of the type of data - the value is the data itself """ response = { 'success': 200 <= status < 300, 'code': status, 'message': message, 'result': data } return jsonify(response), status
51346e3a92bdf93085b12eaccc99511b66a34bcf
11,746
def do_sizes_match(imgs): """Returns if sizes match for all images in list.""" return len([*filter(lambda x: x.size != x.size[0], imgs)]) > 0
7da30972ecfd4d3cac3d21ff380255865ec3b5c8
11,747
def gaussian_sampling(len_x, len_y, num_samples, spread_factor=5, origin_ball=1): """ Create a gaussian sampling pattern where each point is sampled from a bivariate, concatenated normal distribution. Args: len_x (int): Size of output mask in x direction (width) len_y (int): Size of output mask in y direction (height) num_samples (int): Number of samples to pick spread_factor (float): Concentration of samples (ie, the SD of the probability distributions are len/spread_factor) origin_ball (int): Radius of ball around origin where all samples are included. Returns: np.ndarray: A boolean numpy array (mask) depicting sampling pattern. """ # Create two truncated normal distributions for x and y dir lower = 0 upper_x = len_x mu_x = len_x // 2 sigma_x = len_x // spread_factor randgen_x = stats.truncnorm( (lower - mu_x) / sigma_x, (upper_x - mu_x) / sigma_x, loc=mu_x, scale=sigma_x ) upper_y = len_y mu_y = len_y // 2 sigma_y = len_y // spread_factor randgen_y = stats.truncnorm( (lower - mu_y) / sigma_y, (upper_y - mu_y) / sigma_y, loc=mu_y, scale=sigma_y ) # Create mask mask = np.zeros([len_y, len_x], dtype=np.bool) # Add origin ball if origin_ball > 0: y_grid, x_grid = np.ogrid[:len_y, :len_x] dist_from_center = np.sqrt((y_grid - mu_y) ** 2 + (x_grid - mu_x) ** 2) mask = dist_from_center <= origin_ball # Subtract origin ball from number of samples num_samples -= np.sum(mask) # Sample points from distribution xs = randgen_x.rvs(num_samples).astype(np.uint32) ys = randgen_y.rvs(num_samples).astype(np.uint32) for i in range(num_samples): x, y = xs[i], ys[i] # Ensure unique samples while mask[y, x]: x = randgen_x.rvs(1).astype(np.uint32) y = randgen_y.rvs(1).astype(np.uint32) xs[i], ys[i] = x, y mask[y, x] = True return mask
fce7396b02778aa832c5d24028fb1f55f1013b15
11,748
import json def from_cx_jsons(graph_json_str: str) -> BELGraph: """Read a BEL graph from a CX JSON string.""" return from_cx(json.loads(graph_json_str))
c61e415199ce0bfc610c1a4277aa8ba1b74a070a
11,749
from typing import Tuple def _calculate_dimensions(image: Image) -> Tuple[int, int]: """ Returns the width and height of the given pixel data. The height of the image is the number of rows in the list, while the width of the image is determined by the number of pixels on the first row. It is assumed that each row contains the same number of pixels. :param image: pixel data :return: width and height as a tuple """ try: width = 0 height = len(image) if height != 0: width = len(image[0]) return width, height except (IndexError, TypeError): # Either data is not subscribable, or the # length of the first row cannot be obtained. raise ValueError("invalid pixel data - could not determine dimensions")
7e74f181839b70e45cb64ca8b8517ef663c7caf8
11,750
def cli(ctx, invocation_id): """Get a summary of an invocation, stating the number of jobs which succeed, which are paused and which have errored. Output: The invocation summary. For example:: {'states': {'paused': 4, 'error': 2, 'ok': 2}, 'model': 'WorkflowInvocation', 'id': 'a799d38679e985db', 'populated_state': 'ok'} """ return ctx.gi.invocations.get_invocation_summary(invocation_id)
94197a9c55c0d37b311585fdfce9d615c6986cb5
11,751
import numpy as np def remove_observations_mean(data,data_obs,lats,lons): """ Removes observations to calculate model biases """ ### Import modules ### Remove observational data databias = data - data_obs[np.newaxis,np.newaxis,:,:,:] return databias
8f0cf60137660878f57dc35caa8c23896944d6ab
11,752
import logging def joint_extraction_model_fn(features, labels, mode, params): """Runs the node-level sequence labeling model.""" logging.info("joint_extraction_model_fn") inputs = features # Arg "features" is the overall inputs. # Read vocabs and inputs. dropout = params["dropout"] if params["circle_features"]: nnodes, friend_has_label, (words, nwords), ( prev_text_words, n_prev_text_words), (chars_list, chars_len_list), (partner_words, _), ( friends_words, n_friends_words), (friends_fix, friends_var), ( leaf_type_list, goldmine_feat_list), (_, _), ( node_xpath_list, node_xpath_len_list), (attributes, attributes_plus_none), ( position_list) = inputs else: nnodes, (words, nwords), (prev_text_words, n_prev_text_words), ( chars_list, chars_len_list), (leaf_type_list, goldmine_feat_list), ( _, _), (node_xpath_list, node_xpath_len_list), (attributes), (position_list) = inputs # nnodes, the number of nodes in each page; # shape is [?]; length is the number of pages. # words, nwords are the node_text feature, shape is [?, ?, ?] # the first two dimension is the batch * pages, # the last one is the maximum length of the word lists # prev_text_words, n_prev_text_words, similar as above for previous nodes'text # chars_list, chars_len_list, shape is [?,?,?,?] also for node_text features # the additional dim is for the length of the character sequences. # friends_words, shape is [?, ?, ?], gathers all the words from different # friends of one node. # friends_fix, friends_var, shapes are [?, ?, ?, ?] # the first two dimension is the batch * pages, # the last two are the maximum length of friend nodes and words. nnodes = merge_first_two_dims(nnodes) training = (mode == tf.estimator.ModeKeys.TRAIN) vocab_words = _index_table_from_file( params["words"], num_oov_buckets=params["num_oov_buckets"]) with tf.gfile.Open(params["tags"]) as f: indices = [idx for idx, tag in enumerate(f) if tag.strip() != "none"] num_tags = len(indices) + 1 # Make "None" as the tag with the last index. # NodeText Char Embeddings. with tf.gfile.Open(params["chars"]) as f: num_chars = sum(1 for _ in f) + params["num_oov_buckets"] vocab_chars = _index_table_from_file( params["chars"], num_oov_buckets=params["num_oov_buckets"]) char_ids = vocab_chars.lookup(chars_list) variable = tf.get_variable("chars_embeddings", [num_chars + 1, params["dim_chars"]], tf.float32) char_embeddings = tf.nn.embedding_lookup(variable, char_ids) char_embeddings = tf.layers.dropout( char_embeddings, rate=dropout, training=training) logging.info("char_embeddings.shape: %s", char_embeddings.shape) # Char 1d convolution. weights = tf.sequence_mask(chars_len_list) char_embeddings = masked_conv1d_and_max(char_embeddings, weights, params["filters"], params["kernel_size"]) logging.info("char_embeddings.shape after CNN: %s", char_embeddings.shape) # Word Embeddings. word_ids = vocab_words.lookup(words) glove = np.load(tf.gfile.Open(params["glove"], "rb"))["embeddings"] # np.array variable = np.vstack([glove, [[0.] * params["dim_word_embedding"]]]) # To finetune the GloVe embedding by setting trainable as True. variable = tf.Variable(variable, dtype=tf.float32, trainable=True) word_embeddings = tf.nn.embedding_lookup(variable, word_ids) logging.info("word_embeddings.shape: %s", word_embeddings.shape) # Prev_Text Representations. prev_text_word_ids = vocab_words.lookup(prev_text_words) prev_text_word_embeddings = tf.nn.embedding_lookup(variable, prev_text_word_ids) if params["use_prev_text_lstm"]: # PREV_text LSTM. logging.info("prev_text_representation using lstm") prev_t = merge_first_two_dims(prev_text_word_embeddings) # Seq * batch * input prev_t = tf.transpose(prev_t, perm=[1, 0, 2]) # Need time-major. prev_output_fw, prev_output_bw = _bidirectional_lstm( prev_t, params["lstm_size"], merge_first_two_dims(n_prev_text_words)) prev_output = tf.concat([prev_output_fw, prev_output_bw], axis=-1) prev_output = tf.reduce_mean(prev_output, 0) prev_output = tf.layers.dropout( prev_output, rate=dropout, training=training) logging.info("prev_output.shape (after reduce_mean): %s", prev_output.shape) context_representation = split_first_two_dims_by_example( prev_output, prev_text_word_embeddings) logging.info("context_representation.shape (after split): %s", context_representation.shape) else: logging.info("prev_text_word_embeddings.shape: %s", prev_text_word_embeddings.shape) context_representation = tf.reduce_mean(prev_text_word_embeddings, 2) logging.info("context_representation.shape: %s", context_representation.shape) if params["circle_features"]: partner_embeddings, circle_representation = circle_feature_modeling( variable, vocab_words, partner_words, friends_words, n_friends_words, friends_fix, friends_var, word_embeddings, dropout, training, params) context_representation = circle_representation if params["use_friend_semantic"]: friends_ids = vocab_words.lookup(friends_words) friend_embeddings = tf.nn.embedding_lookup(variable, friends_ids) if params["use_xpath_lstm"]: h_output = xpath_feature_modeling(node_xpath_list, node_xpath_len_list, training, params) context_representation = tf.concat([h_output, context_representation], axis=2) if params["use_position_embedding"]: position_representation = position_modeling(position_list, params) context_representation = tf.concat( [context_representation, position_representation], axis=2) # Text Embeddings: Concatenate Word and Char and Feature Embeddings. embeddings = tf.concat([word_embeddings, char_embeddings], axis=-1) embeddings = tf.layers.dropout(embeddings, rate=dropout, training=training) logging.info("embeddings.shape: %s", embeddings.shape) # LSTM inside node texts. t = merge_first_two_dims(embeddings) t = tf.transpose(t, perm=[1, 0, 2]) # Need time-major. output_fw, output_bw = _bidirectional_lstm(t, params["lstm_size"], merge_first_two_dims(nwords)) output = tf.concat([output_fw, output_bw], axis=-1) output = tf.reduce_mean(output, 0) output = tf.layers.dropout(output, rate=dropout, training=training) logging.info("output.shape (after reduce_mean): %s", output.shape) output = split_first_two_dims_by_example(output, embeddings) logging.info("output.shape (after split): %s", output.shape) node_seq_input = tf.concat([output, context_representation], axis=2) logging.info("output.shape (after + prev): %s", node_seq_input.shape) # Leaf Type Features. if params["add_leaf_types"]: with tf.gfile.Open(params["leaf_types"]) as f: num_leaf_types = sum(1 for _ in f) + params["num_oov_buckets"] vocab_leaf_types = _index_table_from_file( params["leaf_types"], num_oov_buckets=params["num_oov_buckets"]) leaf_type_ids = vocab_leaf_types.lookup(leaf_type_list) leaf_variable = tf.get_variable( "leaf_type_embeddings", [num_leaf_types + 1, params["dim_leaf_type"]], tf.float32) leaf_type_embeddings = tf.nn.embedding_lookup(leaf_variable, leaf_type_ids) leaf_type_embeddings = tf.layers.dropout( leaf_type_embeddings, rate=dropout, training=training) logging.info("leaf_type_embeddings.shape: %s", char_embeddings.shape) logging.info("node_seq_input.shape before leaf: %s", node_seq_input.shape) node_seq_input = tf.concat([node_seq_input, leaf_type_embeddings], axis=2) logging.info("node_seq_input.shape after leaf: %s", node_seq_input.shape) # Goldmine Feat Embeddings. if params["add_goldmine"]: vocab_goldmine_features = _index_table_from_file( params["goldmine_features"], num_oov_buckets=1) goldmine_feature_variable = tf.get_variable("goldmine_feature_embeddings", [8 + 1, params["dim_goldmine"]], tf.float32) goldmine_feat_ids = vocab_goldmine_features.lookup(goldmine_feat_list) goldmine_feat_embeddings = tf.nn.embedding_lookup(goldmine_feature_variable, goldmine_feat_ids) goldmine_feat_embeddings = tf.reduce_sum(goldmine_feat_embeddings, 2) logging.info("goldmine_feat_embeddings.shape: %s", goldmine_feat_embeddings.shape) node_seq_input = tf.concat([node_seq_input, goldmine_feat_embeddings], axis=2) logging.info("node_seq_input.shape after goldmine: %s", node_seq_input.shape) # Node-level LSTM modeling. if params["node_encoder"] == "lstm": # Node-Sequence-LSTM. n_t = tf.transpose(node_seq_input, perm=[1, 0, 2]) # Need time-major. node_output_fw, node_output_bw = _bidirectional_lstm( n_t, params["node_lstm_size"], nnodes) node_seq_output = tf.concat([node_output_fw, node_output_bw], axis=-1) node_seq_output = tf.transpose(node_seq_output, perm=[1, 0, 2]) elif params["node_encoder"] == "cnn": node_weights = tf.sequence_mask(nnodes) node_seq_output = masked_conv1d_and_max( node_seq_input, node_weights, params["node_filters"], params["node_kernel_size"], reducemax=False) elif params["node_encoder"] == "transformer": # Node-Sequence-Transformer. node_seq_output = transformer_encoding(node_seq_input, nnodes, params, mode) else: node_seq_output = node_seq_input logging.info("node_seq_input.shape after encoder: %s", node_seq_output.shape) if params["node_encoder"] != "transformer": # Add the dropout layer if the encoder is not a transformer. node_seq_output = tf.layers.dropout( node_seq_output, rate=dropout, training=training) if params["use_friends_discrete_feature"] and params["circle_features"]: friend_has_label = tf.expand_dims(friend_has_label, axis=-1) node_seq_output = tf.concat([node_seq_output, friend_has_label], axis=-1) logging.info("node_seq_input.shape after friend_has_label: %s", node_seq_output.shape) node_seq_output = tf.layers.dense(node_seq_output, params["last_hidden_layer_size"]) logits = tf.layers.dense(node_seq_output, num_tags, name="label_dense_1") if params["semantic_encoder"] and params["circle_features"]: partner_similarity_emb = semantic_similarity(variable, vocab_words, partner_embeddings, attributes, params) node_seq_output = tf.concat( [node_seq_output, tf.nn.softmax(partner_similarity_emb)], axis=-1) logging.info("node_seq_output.shape after semantic encoder: %s", node_seq_output.shape) if params["use_friend_semantic"]: friends_similarity_emb = semantic_similarity(variable, vocab_words, friend_embeddings, attributes, params) node_seq_output = tf.concat([node_seq_output, friends_similarity_emb], axis=-1) if params["objective"] == "classification": node_seq_output = tf.layers.dense( node_seq_output, params["dim_word_embedding"], activation="relu") node_seq_output = tf.layers.dense(node_seq_output, params["last_hidden_layer_size"]) logging.info("node_seq_output.shape after semantic encoder: %s", node_seq_output.shape) logits = tf.layers.dense(node_seq_output, num_tags, name="label_dense_2") elif params["objective"] == "semantic_scorer": logits = semantic_scorer(attributes_plus_none, node_seq_output, params) elif params["objective"] == "binary_scorer": logits = binary_scorer(attributes_plus_none, node_seq_output, training, params) if params["use_crf"]: # CRF Layer. logging.info("logits.shape: %s", logits.shape) crf_params = tf.get_variable("crf", [num_tags, num_tags], dtype=tf.float32) pred_ids, _ = tfa.text.crf.crf_decode(logits, crf_params, nnodes) logging.info("pred_ids.shape: %s", pred_ids.shape) else: pred_ids = tf.argmax(logits, 2) logging.info("pred_ids.shape: %s", pred_ids.shape) # Predict for new sentences in target set. if mode == tf.estimator.ModeKeys.PREDICT: reverse_vocab_tags = _index_table_from_file(params["tags"], 1) pred_strings = reverse_vocab_tags.lookup(tf.strings.as_string(pred_ids)) predictions = { "pred_ids": pred_ids, "tags": pred_strings, "scores": tf.nn.softmax(logits), "raw_scores": logits, } # Store the intermediate weights. if params["semantic_encoder"]: predictions["similarity"] = partner_similarity_emb if params["friend_encoder"]: predictions["friends_embs"] = circle_representation if params["extract_node_emb"]: predictions["node_embs"] = node_seq_output return tf.estimator.EstimatorSpec(mode, predictions=predictions) vocab_tags = _index_table_from_file(params["tags"], 1) tags = vocab_tags.lookup(labels) logging.info("tags.shape: %s", logits.shape) logging.info( "Parameter size: %s", np.sum( [np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])) if params["use_crf"]: log_likelihood, _ = tfa.text.crf.crf_log_likelihood(logits, tags, nnodes, crf_params) loss = tf.reduce_mean(-log_likelihood) else: loss = tf.losses.sparse_softmax_cross_entropy(labels=tags, logits=logits) # Processing the metrics. weights = tf.sequence_mask(nnodes) metrics = { "acc": tf.metrics.accuracy(tags, pred_ids, weights), "precision": seq_tagging_metric_util.precision(tags, pred_ids, num_tags, indices, weights), "recall": seq_tagging_metric_util.recall(tags, pred_ids, num_tags, indices, weights), "f1": seq_tagging_metric_util.f1(tags, pred_ids, num_tags, indices, weights), } for metric_name, op in metrics.items(): tf.summary.scalar(metric_name, op[1]) if mode == tf.estimator.ModeKeys.TRAIN: with tf.name_scope("train_scope"): optimizer = tf.train.AdamOptimizer() train_op = optimizer.minimize( loss, global_step=tf.train.get_or_create_global_step()) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) return tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=metrics)
1e6eb2028c8924733329bc4fc3079ca12af12d94
11,753
def jp2yy (sent): """take a Japanese sentence in UTF8 convert to YY-mode using mecab""" ### (id, start, end, [link,] path+, form [surface], ipos, lrule+[, {pos p}+]) ### set ipos as lemma (just for fun) ### fixme: do the full lattice yid = 0 start = 0 cfrom = 0 cto = 0 yy = list() for tok in m.parse(sent.encode('utf-8')).split('\n'): if tok and tok != 'EOS': ##print tok (form, p, lemma, p1, p2, p3) = tok.decode('utf-8').split('\t') if form in punct: continue p2 = p2 or 'n' p3 = p3 or 'n' # pos = '-'.join([p1, p2, p3]) pos = "%s:%s-%s" % (p1, p2, p3) ## wierd format jacy requires cfrom = sent.find(form, cto) ## first instance after last token cto = cfrom + len(form) ## find the end yy.append('(%d, %d, %d, <%d:%d>, 1, "%s", %s, "null", "%s" 1.0)' % \ (yid, start, start +1, cfrom, cto, form, 0, pos)) yid += 1 start += 1 return yy
d59a047f95761aacdd1b371bc6f03072d708e505
11,754
def make_ss_matrices(sigma_x, dt): """ To make Q full-rank for inversion (so the mle makes sense), use: Q = [ dt**2 dt/2 dt/2 1 ] to approximate Q = (dt 1)(dt 1)' System: x = [p_x p_y v_x v_y] y = [p_x' p_y'] :param sigma_x: :param dt: :return: sigma_0: starting value for sigma_v, with process variance (sigma_v^2 Q) """ i2 = np.eye(2) _ = np.zeros((2, 2)) A = np.block([ [i2, dt*i2], [_, i2], ]) Q = np.block([ [dt**2 * i2, dt*i2 * .5], [dt*i2 * .5, i2], ]) C = np.block([i2, _]) R = sigma_x**2 * i2 sigma_0 = float(sigma_x) / 2 return A, Q, C, R, sigma_0
551a1d46ee67360e159ab966c0b3f30dd77254c8
11,755
def get_icp_val(tmr): """Read input capture value""" return peek(tmr + ICRx) | (peek(tmr + ICRx + 1) << 8)
0aef45e0c6edeb3c6540a51ae44013ded03c7be7
11,756
import torch def validate(segmenter, val_loader, epoch, num_classes=-1): """Validate segmenter Args: segmenter (nn.Module) : segmentation network val_loader (DataLoader) : training data iterator epoch (int) : current epoch num_classes (int) : number of classes to consider Returns: Mean IoU (float) """ val_loader.dataset.set_stage("val") segmenter.eval() cm = np.zeros((num_classes, num_classes), dtype=int) with torch.no_grad(): for i, sample in enumerate(val_loader): input = sample["image"] target = sample["mask"] input_var = torch.autograd.Variable(input).float().cuda() # Compute output output = segmenter(input_var) output = ( cv2.resize( output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target.size()[1:][::-1], interpolation=cv2.INTER_CUBIC, ) .argmax(axis=2) .astype(np.uint8) ) # Compute IoU gt = target[0].data.cpu().numpy().astype(np.uint8) gt_idx = ( gt < num_classes ) # Ignore every class index larger than the number of classes cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes) if i % args.print_every == 0: logger.info( " Val epoch: {} [{}/{}]\t" "Mean IoU: {:.3f}".format( epoch, i, len(val_loader), compute_iu(cm).mean() ) ) ious = compute_iu(cm) logger.info(" IoUs: {}".format(ious)) miou = np.mean(ious) logger.info(" Val epoch: {}\tMean IoU: {:.3f}".format(epoch, miou)) return miou
716f1eda3a283c2707fa8ffd6e8073c351bda560
11,757
def detect_forward(CoreStateMachine, PostConditionStateMachine): """A 'forward ambiguity' denotes a case where the post condition implementation fails. This happens if an iteration in the core pattern is a valid path in the post- condition pattern. In this case no decision can be made about where to reset the input position. Example: x+/x At the end of the post condition an incoming 'x' guides through a path in the post condition and the core pattern. It cannot be determined by a flag where the input position ends. NOTE: For many cases where there is a forward ambiguity quex can gnerate an inverse post-condition that goes backwards from the end of the post condition (see function 'mount()'). However, there are cases where even this is not possible (see function 'detect_backward()'). """ ## print_callstack() __assert_state_machines(CoreStateMachine, PostConditionStateMachine) core_acceptance_state_list = CoreStateMachine.get_acceptance_state_list() pcsm_init_state = PostConditionStateMachine.get_init_state() for csm_state in core_acceptance_state_list: if __dive_to_detect_iteration(CoreStateMachine, csm_state, PostConditionStateMachine, pcsm_init_state): return True return False
4d6c1952a201f3505b0770f12f42609847728a54
11,758
def price_sensitivity(results): """ Calculate the price sensitivity of a strategy results results dataframe or any dataframe with the columns open, high, low, close, profit returns the percentage of returns sensitive to open price Note ----- Price sensitivity is calculated by 1) Calculating the profit in cases where open=high and open=low 2) Dividing these profits by the total profits A high percentage indicates that most of your orders may not get executed at the LIMIT price since the stock tends have a sharp movement when open=low or open=high. A value of 1 indicates that all returns are sensitive to prices This is somewhat a rough measure and it doesn't take into account whether you BUY or SELL """ profit = results["profit"].sum() sen1 = results.query("open==low")["profit"].sum() sen2 = results.query("open==high")["profit"].sum() return (sen1 + sen2) / profit
02ab811bf689e760e011db6d091dcb7c3079f0d1
11,759
def wrap_zone(tz, key=KEY_SENTINEL, _cache={}): """Wrap an existing time zone object in a shim class. This is likely to be useful if you would like to work internally with non-``pytz`` zones, but you expose an interface to callers relying on ``pytz``'s interface. It may also be useful for passing non-``pytz`` zones to libraries expecting to use ``pytz``'s interface. :param tz: A :pep:`495`-compatible time zone, such as those provided by :mod:`dateutil.tz` or :mod:`zoneinfo`. :param key: The value for the IANA time zone key. This is optional for ``zoneinfo`` zones, but required for ``dateutil.tz`` zones. :return: A shim time zone. """ if key is KEY_SENTINEL: key = getattr(tz, "key", KEY_SENTINEL) if key is KEY_SENTINEL: raise TypeError( "The `key` argument is required when wrapping zones that do not " + "have a `key` attribute." ) instance = _cache.get((id(tz), key), None) if instance is None: instance = _cache.setdefault((id(tz), key), _PytzShimTimezone(tz, key)) return instance
7776153859b30ee758b16498b1122d0af294d371
11,760
async def async_browse_media( hass, media_content_type, media_content_id, *, can_play_artist=True ): """Browse Spotify media.""" info = list(hass.data[DOMAIN].values())[0] return await async_browse_media_internal( hass, info[DATA_SPOTIFY_CLIENT], info[DATA_SPOTIFY_ME], media_content_type, media_content_id, can_play_artist=can_play_artist, )
d3f912ecbd8949a637d461a453a4b9a9ea73a20c
11,761
from typing import Tuple import json def bounds(url: str) -> Tuple[str, str, str]: """Handle bounds requests.""" info = main.bounds(url) return ("OK", "application/json", json.dumps(info))
2da1ec2db8b2c0c3a3d28854dc2f68b71aa96bf1
11,762
import email def make_message_id(): """ Generates rfc message id. The returned message id includes the angle brackets. """ return email.utils.make_msgid('sndlatr')
7030efe1d61f4e54d833bb5c808f582689c626c6
11,763
def _understand_err_col(colnames): """Get which column names are error columns Examples -------- >>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr'] >>> serr, terr = _understand_err_col(colnames) >>> np.allclose(serr, [1]) True >>> np.allclose(terr, [2]) True >>> serr, terr = _understand_err_col(['a', 'a_nerr']) Traceback (most recent call last): ... ValueError: Missing positive error... >>> serr, terr = _understand_err_col(['a', 'a_perr']) Traceback (most recent call last): ... ValueError: Missing negative error... """ shift = 0 serr = [] terr = [] for i, col in enumerate(colnames): if col.endswith("_err"): # The previous column, but they're numbered from 1! # Plus, take shift into account serr.append(i - shift) shift += 1 elif col.endswith("_perr"): terr.append(i - shift) if len(colnames) == i + 1 or not colnames[i + 1].endswith('_nerr'): raise ValueError("Missing negative error") shift += 2 elif col.endswith("_nerr") and not colnames[i - 1].endswith('_perr'): raise ValueError("Missing positive error") return serr, terr
2fab9346a3ea8fa6e84e406856eef8ad14ad9f66
11,764
def unpivot(frame): """ Example: >>> df date variable value 0 2000-01-03 A 0.895557 1 2000-01-04 A 0.779718 2 2000-01-05 A 0.738892 3 2000-01-03 B -1.513487 4 2000-01-04 B -0.543134 5 2000-01-05 B 0.902733 6 2000-01-03 C -0.053496 7 2000-01-04 C 0.298079 8 2000-01-05 C -1.962022 9 2000-01-03 D -0.174269 10 2000-01-04 D -0.047428 11 2000-01-05 D -1.871996 >>> tm.makeTimeDataFrame(3) A B C D 2000-01-03 -0.911447 0.274853 -0.740769 2.330942 2000-01-04 -0.208471 -1.024612 0.512266 -0.708707 2000-01-05 -1.368389 -3.464163 -1.940530 -1.149835 """ N, K = frame.shape data = { "value": frame.to_numpy().ravel("F"), "variable": np.asarray(frame.columns).repeat(N), "date": np.tile(np.asarray(frame.index), K), } return pd.DataFrame(data, columns=["date", "variable", "value"])
6cda1c29e7e7c9b4176e83b6a0e1d907458721b2
11,765
import type def find_viable_generators_aux (target_type, prop_set): """ Returns generators which can be used to construct target of specified type with specified properties. Uses the following algorithm: - iterates over requested target_type and all it's bases (in the order returned bt type.all-bases. - for each type find all generators that generate that type and which requirements are satisfied by properties. - if the set of generators is not empty, returns that set. Note: this algorithm explicitly ignores generators for base classes if there's at least one generator for requested target_type. """ # Select generators that can create the required target type. viable_generators = [] initial_generators = [] # Try all-type generators first. Assume they have # quite specific requirements. all_bases = type.all_bases(target_type) for t in all_bases: initial_generators = __type_to_generators.get(t, []) if initial_generators: dout("there are generators for this type") if t != target_type: # We're here, when no generators for target-type are found, # but there are some generators for a base type. # We'll try to use them, but they will produce targets of # base type, not of 'target-type'. So, we clone the generators # and modify the list of target types. generators2 = [] for g in initial_generators[:]: # generators.register adds generator to the list of generators # for toolsets, which is a bit strange, but should work. # That list is only used when inheriting toolset, which # should have being done before generators are run. ng = g.clone_and_change_target_type(t, target_type) generators2.append(ng) register(ng) initial_generators = generators2 break for g in initial_generators: dout("trying generator " + g.id() + "(" + str(g.source_types()) + "->" + str(g.target_types()) + ")") m = g.match_rank(prop_set) if m: dout(" is viable") viable_generators.append(g) return viable_generators
40764a16b17b54c28495e08623d616d0927451d1
11,766
def analyze(model, Y, print_to_console=True): """ Perform variance-based sensitivty analysis for each process. Parameters ---------- model : object The model defined in the sammpy Y : numpy.array A NumPy array containing the model outputs print_to_console : bool Print results directly to console (default False) Returns ---------- Returns a dictionary with keys 'PSK', 'PSTK', where each entry is a list of size of the number of process. """ # Number of sample realizations obs = Y.shape[1] # Number of process and process models npros = len(model.frames['names']) # Creat a dict to store the results S = create_si_dict(npros) # Perfrom the difference-based process sensitivty anlaysis if print_to_console: print('Runing MMDS difference-based process sensitivy analysis...') MMDS = mmds_mean_var(model, Y) # Save results to the dict for i in range(npros): S['mean'][i] = MMDS[0, i] S['variance'][i] = MMDS[1, i] # Print results to console if print_to_console: print_indices(model, S) return S
119c00becb1c3b507e35cbcecd98762fcb924521
11,767
import copy def GetMorganFingerprint(mol, atomId=-1, radius=2, fpType='bv', nBits=2048, useFeatures=False, **kwargs): """ Calculates the Morgan fingerprint with the environments of atomId removed. Parameters: mol -- the molecule of interest radius -- the maximum radius fpType -- the type of Morgan fingerprint: 'count' or 'bv' atomId -- the atom to remove the environments for (if -1, no environments is removed) nBits -- the size of the bit vector (only for fpType = 'bv') useFeatures -- if false: ConnectivityMorgan, if true: FeatureMorgan any additional keyword arguments will be passed to the fingerprinting function. """ if fpType not in ['bv', 'count']: raise ValueError("Unknown Morgan fingerprint type") if not hasattr(mol, '_fpInfo'): info = {} # get the fingerprint if fpType == 'bv': molFp = rdMD.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits, useFeatures=useFeatures, bitInfo=info, **kwargs) else: molFp = rdMD.GetMorganFingerprint(mol, radius, useFeatures=useFeatures, bitInfo=info, **kwargs) # construct the bit map if fpType == 'bv': bitmap = [DataStructs.ExplicitBitVect(nBits) for _ in range(mol.GetNumAtoms())] else: bitmap = [[] for _ in range(mol.GetNumAtoms())] for bit, es in info.items(): for at1, rad in es: if rad == 0: # for radius 0 if fpType == 'bv': bitmap[at1][bit] = 1 else: bitmap[at1].append(bit) else: # for radii > 0 env = Chem.FindAtomEnvironmentOfRadiusN(mol, rad, at1) amap = {} Chem.PathToSubmol(mol, env, atomMap=amap) for at2 in amap.keys(): if fpType == 'bv': bitmap[at2][bit] = 1 else: bitmap[at2].append(bit) mol._fpInfo = (molFp, bitmap) if atomId < 0: return mol._fpInfo[0] else: # remove the bits of atomId if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms") if len(mol._fpInfo) != 2: raise ValueError("_fpInfo not set") if fpType == 'bv': molFp = mol._fpInfo[0] ^ mol._fpInfo[1][atomId] # xor else: # count molFp = copy.deepcopy(mol._fpInfo[0]) # delete the bits with atomId for bit in mol._fpInfo[1][atomId]: molFp[bit] -= 1 return molFp
9fd8077c4f35c83e8996a53981f99baa0e4510a6
11,768
import math def _rgb2lab(rgb): """Convert an RGB integer to Lab tuple""" def xyzHelper(value): """Helper function for XYZ colourspace conversion""" c = value / 255 if c > 0.0445: c = (c + 0.055) / 1.055 c = math.pow(c, 2.4) else: c /= 12.92 c *= 100 return c def labHelper(value): """Helper function for Lab colourspace conversion""" c = value if c > 0.008856: c = math.pow(c, 1.0 / 3.0) else: c = (7.787 * c) + (16.0 / 116.0) return c # convert into XYZ colourspace c1 = xyzHelper((rgb >> 16) & 0xFF) c2 = xyzHelper((rgb >> 8) & 0xFF) c3 = xyzHelper(rgb & 0xFF) x = (c1 * 0.4124) + (c2 * 0.3576) + (c3 * 0.1805) y = (c1 * 0.2126) + (c2 * 0.7152) + (c3 * 0.0722) z = (c1 * 0.0193) + (c2 * 0.1192) + (c3 * 0.9505) # convert into Lab colourspace c1 = labHelper(x / 95.047) c2 = labHelper(y / 100.0) c3 = labHelper(z / 108.883) l = (116.0 * c2) - 16 a = 500.0 * (c1 - c2) b = 200.0 * (c2 - c3) return LabColour(l, a, b)
a663370e3908daa9ba795bb0dc2ecb945653221e
11,769
import os def _resolve_dir(env_name, dflt_dir): """Resolve a directory given the override env var and its default directory. And if '~' is used to indicate the home directory, then expand that.""" folder = os.environ.get(env_name, dflt_dir) if folder is not None: return os.path.expanduser(folder) return None
677c9b3bab970c56f1b3ea0ac8cff75d083e5328
11,770
import torch def biband_mask(n: int, kernel_size: int, device: torch.device, v=-1e9): """compute mask for local attention with kernel size. Args: n (torch.Tensor): the input length. kernel_size (int): The local attention kernel size. device (torch.device): transformer mask to the device. Returns: torch.Tensor. shape: [n,n]. The masked locations are -1e9 and unmasked locations are 0. """ if kernel_size is None: return None half = kernel_size // 2 mask1 = torch.ones(n, n).triu(diagonal=-half) mask2 = torch.ones(n, n).tril(diagonal=half) mask = mask1 * mask2 mask = (1 - mask) * v return mask.to(device)
ab3a5f25f9fe0f83579d0492caa2913a13daa2d7
11,771
def containsIfElse(node): """ Checks whether the given node contains another if-else-statement """ if node.type == "if" and hasattr(node, "elsePart"): return True for child in node: if child is None: pass # Blocks reset this if-else problem so we ignore them # (and their content) for our scan. elif child.type == "block": pass # Script blocks reset as well (protected by other function) elif child.type == "script": pass elif containsIfElse(child): return True return False
255f58fdf4abe69f10e9b433562ade12cb0bc215
11,772
import os def lstm_create_dataset(data_home, batch_size, repeat_num=1, training=True): """Data operations.""" ds.config.set_seed(1) data_dir = os.path.join(data_home, "aclImdb_train.mindrecord0") if not training: data_dir = os.path.join(data_home, "aclImdb_test.mindrecord0") data_set = ds.MindDataset(data_dir, columns_list=["feature", "label"], num_parallel_workers=4) # apply map operations on images data_set = data_set.shuffle(buffer_size=data_set.get_dataset_size()) data_set = data_set.batch(batch_size=batch_size, drop_remainder=True) data_set = data_set.repeat(count=repeat_num) return data_set
c0cfe9e37edbae523e580e0e218f9cfe4c0ff835
11,773
def get_gitlab_scripts(data): """GitLab is nice, as far as I can tell its files have a flat hierarchy with many small job entities""" def flatten_nested_string_lists(data): """helper function""" if isinstance(data, str): return data elif isinstance(data, list): return "\n".join([flatten_nested_string_lists(item) for item in data]) else: raise ValueError( f"unexpected data type {type(data)} in script section: {data}" ) result = {} for jobkey in data: if not isinstance(data[jobkey], dict): continue for section in ["script", "before_script", "after_script"]: if section in data[jobkey]: script = data[jobkey][section] result[f"{jobkey}/{section}"] = flatten_nested_string_lists(script) return result
ad73c1ea6d4edcbce51eea18de317d7ab2d5e536
11,774
import new def method(cls): """Adds the function as a method to the given class.""" def _wrap(f): cls.__dict__[f.func_name] = new.instancemethod(f,None,cls) return None return _wrap
0f746420bf9870dec5d8a5e69bcec414530fc1cb
11,775
def maps_from_echse(conf): """Produces time series of rainfall maps from ECHSE input data and catchment shapefiles. """ # Read sub-catchment rainfall from file fromfile = np.loadtxt(conf["f_data"], dtype="string", delimiter="\t") if len(fromfile)==2: rowix = 1 elif len(fromfile)>2: rowix = slice(1,len(fromfile)) else: raise Exception("Data file is empty: %s" % conf["f_data"]) var = fromfile[rowix,1:].astype("f4") dtimes = fromfile[rowix,0] dtimes_file = np.array([wradlib.util.iso2datetime(dtime) for dtime in dtimes]) dtimesfromconf = wradlib.util.from_to(conf["tstart"], conf["tend"], conf["interval"]) dtimes = np.intersect1d(dtimes_file, dtimesfromconf) if len(dtimes)==0: print "No datetimes for mapping based on intersection of data file and config info." return(0) # objects = fromfile[0,1:] cats = plt.genfromtxt(conf["f_coords"], delimiter="\t", names=True, dtype=[('id', '|S20'), ('lat', 'f4'), ('lon', 'f4'), ('x', 'f4'), ('y', 'f4')]) mapx, mapy = wradlib.georef.reproject(cats["x"],cats["y"], projection_source=conf["trg_proj"], projection_target=conf["map_proj"]) # Read shapefile dataset, inLayer = wradlib.io.open_shape(conf["f_cats_shp"]) polys, keys = wradlib.georef.get_shape_coordinates(inLayer, key='DN') keys = np.array(keys) # Preprocess polygons (remove minors, sort in same order as in coords file) polys2 = [] for i, id in enumerate(cats["id"]): keyix = np.where( keys==eval(id.strip("cats_")) )[0] if len(keyix) > 1: # More than one key matching? Find largest matching polygon keyix = keyix[np.argmax([len(polys[key]) for key in keyix])] else: keyix = keyix[0] poly = polys[keyix].copy() if poly.ndim==1: # Multi-Polygons - keep only the largest polygon # (just for plotting - no harm done) poly2 = poly[np.argmax([len(subpoly) for subpoly in poly])].copy() else: poly2 = poly.copy() polys2.append ( wradlib.georef.reproject(poly2, projection_source=conf["trg_proj"], projection_target=conf["map_proj"]) ) colors = plt.cm.spectral(np.linspace(0,1,len(conf["levels"]))) mycmap, mynorm = from_levels_and_colors(conf["levels"], colors, extend="max") plt.interactive(False) for dtime in dtimes: datestr = (dtime-dt.timedelta(seconds=conf["interval"])).strftime("%Y%m%d.png") i = np.where(dtimes_file==dtime)[0][0] print datestr, i figpath = os.path.join(conf["savefigs"], datestr) fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(111, aspect="equal") ax, coll = tl.vis.plot_cats(polys2, var[i], ax=ax, bbox=conf["bbox"], cmap=mycmap, norm=mynorm, edgecolors='none') cb = plt.colorbar(coll, ax=ax, ticks=conf["levels"], shrink=0.6) cb.ax.tick_params(labelsize="small") cb.set_label("(mm)") plt.xlabel("Longitude") plt.ylabel("Latitude") tl.vis.plot_trmm_grid_lines(ax) plt.text(conf["bbox"]["left"]+0.25, conf["bbox"]["top"]-0.25, "%s\n%s to\n%s" % (conf["figtxtbody"], (dtime-dt.timedelta(seconds=conf["interval"])).isoformat(" "), dtime.isoformat(" ") ), color="red", fontsize="small", verticalalignment="top") plt.tight_layout() plt.savefig(figpath) plt.close() plt.interactive(True)
31e09c5bed2f7fe3e0d750a59137c05ef987dc2e
11,776
def utility_assn(tfr_dfs): """Harvest a Utility-Date-State Association Table.""" # These aren't really "data" tables, and should not be searched for associations non_data_dfs = [ "balancing_authority_eia861", "service_territory_eia861", ] # The dataframes from which to compile BA-Util-State associations data_dfs = [tfr_dfs[table] for table in tfr_dfs if table not in non_data_dfs] logger.info("Building an EIA 861 Util-State-Date association table.") tfr_dfs["utility_assn_eia861"] = _harvest_associations( data_dfs, ["report_date", "utility_id_eia", "state"]) return tfr_dfs
6b0357f1d7024bcfddac6981d968e67e5dbeba51
11,777
def is_smtp_enabled(backend=None): """ Check if the current backend is SMTP based. """ if backend is None: backend = get_mail_backend() return backend not in settings.SENTRY_SMTP_DISABLED_BACKENDS
988d2173923dc53cd3179cf0866c702ab9fe69d4
11,778
import requests def get_presentation_requests_received(tenant: str, state: str = ''): """ state: must be in ['propsal-sent', 'proposal-received', 'request-sent', 'request-received', 'presentation-sent', 'presentation-received', 'done', 'abondoned'] """ possible_states = ['', 'propsal-sent', 'proposal-received', 'request-sent', 'request-received', 'presentation-sent', 'presentation-received', 'done', 'abondoned'] if state not in possible_states: raise HTTPException(400, "state must be in: " + possible_states) params = None if state: params = { 'state': state, } j = requests.get(ACAPY_API + '/present-proof-2.0/records', params=params, headers=prepare_headers(tenant=tenant)).json() return j['results']
1157712b8e4df1b269892a2d3ec15dae366d8d71
11,779
def generate_round(): """ Генерируем раунд. Returns: question: Вопрос пользователю result: Правильный ответ на вопрос """ total_num, random_num = generate_numbers() question = " ".join(total_num) answer = str(random_num) return question, answer
d4b535016e6ca6c6d673c1a6a2ee2c20eca87bc1
11,780
from datetime import datetime def get_basic_activity(): """ A basic set of activity records for a 'Cohort 1' and CoreParticipant participant. """ return [ {'timestamp': datetime(2018, 3, 6, 0, 0), 'group': 'Profile', 'group_id': 1, 'event': p_event.EHRFirstReceived}, {'timestamp': datetime(2018, 3, 6, 20, 20, 57), 'group': 'Profile', 'group_id': 1, 'event': p_event.SignupTime}, {'timestamp': datetime(2018, 3, 6, 20, 35, 12), 'group': 'QuestionnaireModule', 'group_id': 40, 'event': p_event.ConsentPII, 'answer': 'ConsentPermission_Yes', 'answer_id': 767}, {'timestamp': datetime(2018, 3, 6, 20, 43, 50), 'group': 'QuestionnaireModule', 'group_id': 40, 'event': p_event.EHRConsentPII, 'answer': 'ConsentPermission_Yes', 'answer_id': 767}, {'timestamp': datetime(2018, 3, 6, 20, 46, 48), 'group': 'QuestionnaireModule', 'group_id': 40, 'event': p_event.TheBasics, 'ConsentAnswer': None}, {'timestamp': datetime(2018, 3, 6, 20, 49, 0), 'group': 'QuestionnaireModule', 'group_id': 40, 'event': p_event.OverallHealth, 'ConsentAnswer': None}, {'timestamp': datetime(2018, 3, 6, 20, 51, 6), 'group': 'QuestionnaireModule', 'group_id': 40, 'event': p_event.Lifestyle, 'ConsentAnswer': None}, {'timestamp': datetime(2018, 3, 28, 20, 18, 59), 'group': 'Biobank', 'group_id': 20, 'event': p_event.BiobankConfirmed, 'dna_tests': 3, 'basline_tests': 4}, {'timestamp': datetime(2018, 5, 17, 2, 11, 37), 'group': 'Biobank', 'group_id': 20, 'event': p_event.BiobankOrder, 'dna_tests': 0, 'basline_tests': 0}, # ROC-295: duplicate record, manually cancelled {'timestamp': datetime(2018, 5, 21, 18, 9, 8), 'group': 'Profile', 'group_id': 1, 'event': p_event.PhysicalMeasurements, 'status': 'CANCELLED', 'status_id': 2}, {'timestamp': datetime(2018, 5, 21, 18, 9, 12), 'group': 'Profile', 'group_id': 1, 'event': p_event.PhysicalMeasurements, 'status': 'COMPLETED', 'status_id': 1}, {'timestamp': datetime(2019, 6, 13, 0, 0), 'group': 'Profile', 'group_id': 1, 'event': p_event.EHRLastReceived} ]
4ee13cf35326d6c09fb4174f0e4217b17a34a545
11,781
def bad_multi_examples_per_input_estimator_misaligned_input_refs( export_path, eval_export_path): """Like the above (good) estimator, but the input_refs is misaligned.""" estimator = tf.estimator.Estimator(model_fn=_model_fn) estimator.train(input_fn=_train_input_fn, steps=1) return util.export_model_and_eval_model( estimator=estimator, serving_input_receiver_fn=_serving_input_receiver_fn, eval_input_receiver_fn=_bad_eval_input_receiver_fn_misaligned_input_refs, export_path=export_path, eval_export_path=eval_export_path)
c08fac8d0ae8679db56128dc8d4a36a5492a6737
11,782
import os def page_is_dir(path) -> bool: """ Tests whether a path corresponds to a directory arguments: path -- a path to a file returns: True if the path represents a directory else False """ return os.path.isdir(path)
bb52f6f09110e085fbb4cd8aeb9d03b36fe07b84
11,783
import logging import yaml def read_configuration(dirname_f: str) -> dict: """ :param dirname_f: path to the project ending with .../cameras_robonomics :type dirname_f: str :return: dictionary containing all the configurations :rtype: dict Reading config, containing all the required data, such as filepath, robonomics parameters (remote wss, seed), camera parameters (ip, login, password, port), etc """ config_path = dirname_f + "/config/config.yaml" logging.debug(config_path) try: with open(config_path) as f: content = f.read() config_f = yaml.load(content, Loader=yaml.FullLoader) # logging.debug(f"Configuration dict: {content}") return config_f except Exception as e: while True: logging.error("Error in configuration file!") logging.error(e) exit()
784c45095d1dd5530c65e9000b0e9d7f95662b20
11,784
def caption_example(image): """Convert image caption data into an Example proto. Args: image: A ImageMetadata instance. Returns: example: An Example proto with serialized tensor data. """ # Collect image object information from metadata. image_features, positions = read_object(image.objects, image.image_id) # Serialize multi-dimensional tensor data. captions_proto = tf.make_tensor_proto(np.array(image.captions)) features_proto = tf.make_tensor_proto(image_features) positions_proto = tf.make_tensor_proto(positions) # Create final features dict. features = dict( image_id=int64_feature(image.image_id), captions=bytes_feature(captions_proto.SerializeToString()), object_features=bytes_feature(features_proto.SerializeToString()), object_positions=bytes_feature(positions_proto.SerializeToString())) return tf.train.Example(features=tf.train.Features(feature=features))
f989774a0d3321717cbb09f6342a6c86f5433c54
11,785
def GetAttributeTableByFid(fileshp, layername=0, fid=0): """ GetAttributeTableByFid """ res = {} dataset = ogr.OpenShared(fileshp) if dataset: layer = dataset.GetLayer(layername) feature = layer.GetFeature(fid) geom = feature.GetGeometryRef() res["geometry"] = geom.ExportToWkt() layerDefinition = layer.GetLayerDefn() for j in range(layerDefinition.GetFieldCount()): fieldname = layerDefinition.GetFieldDefn(j).GetName() res[fieldname] = feature.GetField(j) dataset = None return res
42b845ae5b1a3c9976262cc37f5854b80aa7b290
11,786
def get_root_folder_id(db, tree_identifier, linked_to, link_id): """Get id of the root folder for given data category and profile or user group Args: db (object): The db object tree_identifier (str): The identifier of the tree linked_to (str): ['profile'|'group'] link_id (int): The profile id or the group id (depending on linked_to) Returns: The id of the root folder. """ if linked_to not in ['profile', 'group']: raise MSGException(Error.CORE_INVALID_PARAMETER, "Incorrect 'linked_to' value.") root_folder_id = None SQL_PROFILE = """SELECT root_folder_id FROM data_profile_tree WHERE profile_id=? AND tree_identifier=?""" SQL_USER_GROUP = """SELECT root_folder_id FROM data_user_group_tree WHERE user_group_id=? AND tree_identifier=?""" sql = SQL_PROFILE if linked_to == 'profile' else SQL_USER_GROUP res = db.execute(sql, (link_id, tree_identifier)).fetch_one() if res: root_folder_id = res['root_folder_id'] return root_folder_id
7378ec4852d90913282109dcce5d8168613c835e
11,787
def str_cell(cell): """Get a nice string of given Cell statistics.""" result = f"-----Cell ({cell.x}, {cell.y})-----\n" result += f"sugar: {cell.sugar}\n" result += f"max sugar: {cell.capacity}\n" result += f"height/level: {cell.level}\n" result += f"Occupied by Agent {cell.agent.id if cell.agent else None}\n" return result
d62801290321d5d2b8404dbe6243f2f0ae03ecef
11,788
def get_idx_pair(mu): """get perturbation position""" idx = np.where(mu != 0)[0] idx = [idx[0], idx[-1]] return idx
eed8b77f3f21af93c28c84d6f325dd2161740e6f
11,789
def zeeman_transitions(ju, jl, type): """ Find possible mu and ml for valid ju and jl for a given transistion polarization Parameters: ju (scalar): Upper level J jl (scalar): Lower level J type (string): "Pi", "S+", or "S-" for relevant polarization type Returns: tuple: MU, ML arrays for given Js and polarization type """ assert np.isscalar(ju) and np.isscalar(jl), "non-scalar J non supported" assert type.lower() in ["pi", "s+", "s-"], "unknown transition type" assert ju - jl in [-1, 0, 1], "delta-J should belong to {-1, 0, 1}" assert ju > 0 and jl >= 0, "only for positive ju and non-negative for jl" if type.lower() == "pi": J = min(ju, jl) return np.arange(-J, J + 1), np.arange(-J, J + 1) elif type.lower() == "s+": if ju < jl: return np.arange(-ju, ju+1), np.arange(-ju+1, ju+2) elif ju == jl: return np.arange(-ju, ju), np.arange(-ju+1, ju+1) else: return np.arange(-ju, jl), np.arange(-ju+1, jl+1) elif type.lower() == "s-": if ju < jl: return np.arange(-ju, ju+1), np.arange(-jl, ju) elif ju == jl: return np.arange(-ju+1, ju+1), np.arange(-ju, ju) else: return np.arange(-ju+2, ju+1), np.arange(-ju+1, ju)
446d9683da6cc027003b2ec755d8828ccb01db5d
11,790
def get_reachable_nodes(node): """ returns a list with all the nodes from the tree with root *node* """ ret = [] stack = [node] while len(stack) > 0: cur = stack.pop() ret.append(cur) for c in cur.get_children(): stack.append(c) return ret
c9ffaca113a5f85484433f214015bf93eea602d1
11,791
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc(): """separate rgb embeddings.""" hparams = imagetransformer_sep_channels_12l_16h_imagenet_large() hparams.num_hidden_layers = 16 hparams.local_attention = True hparams.batch_size = 1 hparams.block_length = 256 return hparams
1d428cae33a6a34a7844171c72c7821a44fc3e97
11,792
def torsion_coordinate_names(zma): """ z-matrix torsional coordinate names (currently assumes torsional coordinates generated through x2z) """ name_dct = standard_names(zma) inv_name_dct = dict(map(reversed, name_dct.items())) geo = automol.geom.without_dummy_atoms(geometry(zma)) tors_names = automol.convert.geom.zmatrix_torsion_coordinate_names(geo) tors_names = tuple(map(inv_name_dct.__getitem__, tors_names)) return tors_names
a7820e1619d4a73260ec4d9255b78cdec2263a55
11,793
from typing import List from typing import Dict def extract_other_creditors_d( page: pdfplumber.pdf.Page, markers: List[Dict], creditors: List ) -> None: """Crop and extract address, key and acct # from the PDf :param page: PDF page :param markers: The top and bottom markers :return: Address, key and account information """ adjust = 0 if len(markers) == 5 else 12 addy_bbox = ( 0, markers[0]["top"], int(markers[-1]["x1"]) * 0.35, markers[-1]["top"], ) key_bbox = ( markers[-3]["x0"], markers[0]["top"] - adjust, markers[-3]["x1"], markers[-3]["top"], ) acct_bbox = ( markers[1]["x0"], markers[1]["top"] - 12, markers[1]["x1"], markers[1]["top"], ) address = page.crop(addy_bbox).filter(keys_and_input_text).extract_text() key = ( page.crop(key_bbox).filter(keys_and_input_text).extract_text().strip() ) acct = page.crop(acct_bbox).filter(keys_and_input_text).extract_text() for creditor in creditors: if creditor["key"] == key: other_creditors = creditor["other_creditors"] other_creditors.append( {"key": key, "address": address, "acct": acct} ) creditor["other_creditors"] = other_creditors return creditors
cb66185c68c7ab3febeee611e4384b839b42417e
11,794
from typing import List from pathlib import Path def get_dicdirs(mecab_config: str = "mecab-config") -> List[Path]: """Get MeCab dictionary directories. Parameters ---------- mecab_config : str Executable path of mecab-config, by default "mecab-config". Returns ------- List[Path] MeCab dictionary directories. """ dicdirs = [] for path in _mecab_config_dicdir(mecab_config).glob("**/dicrc"): dicdirs.append(path.parent.resolve()) return dicdirs
26d7969c072a9aa0668db31c296ee930b567049f
11,795
def new_instance(settings): """ MAKE A PYTHON INSTANCE `settings` HAS ALL THE `kwargs`, PLUS `class` ATTRIBUTE TO INDICATE THE CLASS TO CREATE """ settings = set_default({}, settings) if not settings["class"]: Log.error("Expecting 'class' attribute with fully qualified class name") # IMPORT MODULE FOR HANDLER path = settings["class"].split(".") class_name = path[-1] path = ".".join(path[:-1]) constructor = None try: temp = __import__(path, globals(), locals(), [class_name], 0) constructor = object.__getattribute__(temp, class_name) except Exception as e: Log.error("Can not find class {{class}}", {"class": path}, cause=e) settings['class'] = None try: return constructor(kwargs=settings) # MAYBE IT TAKES A KWARGS OBJECT except Exception as e: pass try: return constructor(**settings) except Exception as e: Log.error("Can not create instance of {{name}}", name=".".join(path), cause=e)
bf32bd41105052816a9a54efb71143f2a250502f
11,796
from sys import path def find_entry(entries, fn): """Find an entry that matches the given filename fn more or less.""" entry = get_entry_by_filename(entries, curr_file) if entry is not None: return entry key = lambda fn: path.splitext(fn)[0] entry = get_entry_by_filename(entries, curr_file, key) if entry is not None: return entry key = lambda fn: path.basename(path.splitext(fn)[0]) entry = get_entry_by_filename(entries, curr_file, key) if entry is not None: return entry return None
0552d61bea835b5eb3975a388287062fcb733b33
11,797
def get_type(k): """Takes a dict. Returns undefined if not keyed, otherwise returns the key type.""" try: v = { 'score': '#text', 'applicant': 'str', 'applicant_sort': 'str', 'author': 'str', 'author_sort': 'str', 'brief': 'bool', 'city': 'str', 'daNumber': 'str', 'dateCommentPeriod': 'date', 'dateReplyComment': 'date', 'dateRcpt': 'date', 'disseminated': 'date', 'exParte': 'bool', 'fileNumber': 'str', 'id': 'long', 'lawfirm': 'str', 'lawfirm_sort': 'str', 'modified': 'date', 'pages': 'int', 'proceeding': 'str', 'reportNumber': 'str', 'regFlexAnalysis': 'bool', 'smallBusinessImpact': 'bool', 'stateCd': 'str', 'submissionType': 'str', 'text': 'str', 'viewingStatus': 'str', 'zip': 'str' }[k] except: v = False return v
fec3b7e04531dd202c46366f096f687160c68320
11,798
def al(p): """ Given a quaternion p, return the 4x4 matrix A_L(p) which when multiplied with a column vector q gives the quaternion product pq. Parameters ---------- p : numpy.ndarray 4 elements, represents quaternion Returns ------- numpy.ndarray 4x4 matrix describing action of quaternion multiplication """ # Given a quaternion p, return the 4x4 matrix A_L(p) # which when multiplied with a column vector q gives # the quaternion product pq. return np.array([[p[0], -p[1], -p[2], -p[3]], [p[1], p[0], -p[3], p[2]], [p[2], p[3], p[0], -p[1]], [p[3], -p[2], p[1], p[0]]])
1e4803bffd75fb841b723d504261c51019d5d45e
11,799