filename
stringlengths
13
19
text
stringlengths
134
1.04M
the-stack_106_16992
# -*- coding: utf-8 -*- # # Database upgrade script # # RLPPTM Template Version 1.8.1 => 1.8.2 # # Execute in web2py folder after code upgrade like: # python web2py.py -S eden -M -R applications/eden/modules/templates/RLPPTM/upgrade/1.8.1-1.8.2.py # import sys #from gluon.storage import Storage #from gluon.tools import callback #from s3 import S3Duplicate # Override auth (disables all permission checks) auth.override = True # Failed-flag failed = False # Info def info(msg): sys.stderr.write("%s" % msg) def infoln(msg): sys.stderr.write("%s\n" % msg) # Load models for tables ltable = s3db.org_service_site IMPORT_XSLT_FOLDER = os.path.join(request.folder, "static", "formats", "s3csv") TEMPLATE_FOLDER = os.path.join(request.folder, "modules", "templates", "RLPPTM") # ----------------------------------------------------------------------------- # Remove empty shipments # if not failed: info("Remove empty shipments") stable = s3db.inv_send titable = s3db.inv_track_item left = titable.on((titable.send_id == stable.id) & \ (titable.deleted == False)) query = (stable.deleted == False) & \ (titable.id == None) rows = db(query).select(stable.id, groupby = stable.id, left = left, ) empty = [row.id for row in rows] if empty: resource = s3db.resource("inv_send", id=empty) deleted = resource.delete(cascade = True) if resource.error: infoln("...failed (%s)" % resource.error) failed = True else: infoln("...done (%s shipments deleted)" % deleted) else: infoln("...skip (no empty shipments found)") # ----------------------------------------------------------------------------- # Date all requests # if not failed: info("Date all requests") rtable = s3db.req_req query = (rtable.date == None) & \ (rtable.deleted == False) updated = db(query).update(date = rtable.created_on, modified_on = rtable.modified_on, modified_by = rtable.modified_by, ) infoln("...done (%s records fixed)" % updated) # ----------------------------------------------------------------------------- # Finishing up # if failed: db.rollback() infoln("UPGRADE FAILED - Action rolled back.") else: db.commit() infoln("UPGRADE SUCCESSFUL.")
the-stack_106_16993
import utils import string import consts import torch import torch.nn as nn from tqdm import tqdm from pathlib import Path PUNCS = set(string.punctuation) - {'-'} class BaseModel(nn.Module): def __init__(self, model_dir) -> None: super().__init__() self.sigmoid = nn.Sigmoid() self.dropout = nn.Dropout(.2) self.loss = nn.BCEWithLogitsLoss() self.model_dir = Path(model_dir) self.model_dir.mkdir(exist_ok=True) @property def config(self): raise NotImplementedError @classmethod def _from_config(cls, config_dict: dict): raise NotImplementedError @classmethod def from_config(cls, config_or_path_or_dir): config = None if isinstance(config_or_path_or_dir, dict): config = config_or_path_or_dir else: assert isinstance(config_or_path_or_dir, str) or isinstance(config_or_path_or_dir, Path) path_or_dir = Path(config_or_path_or_dir) if path_or_dir.is_dir(): config_path = path_or_dir / 'model_config.json' config = utils.Json.load(config_path) else: assert path_or_dir.is_file() config = utils.Json.load(path_or_dir) return cls._from_config(config) def get_probs(self, *features): logits = self(*features) return self.sigmoid(logits) def get_loss(self, labels, *features): logits = self(*features) logits = logits.flatten() labels = labels.flatten().to(torch.float32) loss = self.loss(logits, labels).mean() return loss def predict(self, path_predict_docs, dir_output, batch_size, use_cache): raise NotImplementedError @staticmethod def _par_decode_doc(predicted_doc, threshold): sents = [] for predicted_sent in predicted_doc['sents']: tokens = consts.LM_TOKENIZER.convert_ids_to_tokens(predicted_sent['ids']) predicted_spans = [s for s in predicted_sent['spans'] if s[2] > threshold] predicted_spans = sorted(predicted_spans, key=lambda s: (s[1] - s[0], s[2]), reverse=True) idxs_taken = set() spans = [] for l_idx, r_idx, prob in predicted_spans: idxs_set = set(range(l_idx, r_idx + 1)) if idxs_set & idxs_taken: continue idxs_taken |= idxs_set phrase = consts.roberta_tokens_to_str(tokens[l_idx: r_idx + 1]) spans.append([l_idx, r_idx, phrase]) sents.append({'tokens': tokens, 'spans': spans}) return sents @staticmethod def decode(path_predicted_docs, output_dir, threshold, use_cache, use_tqdm): output_dir = Path(output_dir) output_dir.mkdir(exist_ok=True) path_output = output_dir / (f'doc2sents-{threshold}-' + path_predicted_docs.stem + '.json') if use_cache and utils.IO.is_valid_file(path_output): print(f'[Decode] Use cache: {path_output}') return path_output utils.Log.info(f'Decode: {path_output}') path_predicted_docs = Path(path_predicted_docs) predicted_docs = utils.Pickle.load(path_predicted_docs) to_iterate = tqdm(predicted_docs, ncols=100, desc='Decode') if use_tqdm else predicted_docs doc2sents = {doc['_id_']: BaseModel._par_decode_doc(doc, threshold) for doc in to_iterate} utils.OrJson.dump(doc2sents, path_output) # after decode decoded_corpus = DecodedCorpus(path_output) decoded_corpus.dump_html() return path_output @staticmethod def _par_get_doc_cands(predicted_doc, threshold, filter_punc=True): cands = set() for predicted_sent in predicted_doc['sents']: tokens = consts.LM_TOKENIZER.convert_ids_to_tokens(predicted_sent['ids']) predicted_spans = predicted_sent['spans'] for l_idx, r_idx, prob in predicted_spans: if prob > threshold: cand = consts.roberta_tokens_to_str(tokens[l_idx: r_idx + 1]) cand = utils.stem_cand(cand) if cand: cands.add(cand) if filter_punc: cands = {c for c in cands if not (c[0] in PUNCS or c[-1] in PUNCS)} return list(cands) @ staticmethod def get_doc2cands(path_predicted_docs, output_dir, expected_num_cands_per_doc, use_cache, use_tqdm): output_dir = Path(output_dir) path_output = output_dir / (f'doc2cands-{expected_num_cands_per_doc}-' + path_predicted_docs.stem + '.json') if use_cache and utils.IO.is_valid_file(path_output): print(f'[Doc2cands] Use cache: {path_output}') return path_output utils.Log.info(f'Doc2cands: {path_output}') path_predicted_docs = Path(path_predicted_docs) predicted_docs = utils.Pickle.load(path_predicted_docs) to_iterate = tqdm(predicted_docs, ncols=100, desc='Decode') if use_tqdm else predicted_docs threshold_l = 0.0 threshold_r = 1.0 min_average_num_cands = expected_num_cands_per_doc - .5 max_average_num_cands = expected_num_cands_per_doc + .5 for _ in range(20): threshold = (threshold_l + threshold_r) / 2 doc2cands = {doc['_id_']: BaseModel._par_get_doc_cands(doc, threshold) for doc in to_iterate} num_cands = [len(cands) for doc, cands in doc2cands.items() if doc in consts.DOCIDS_WITH_GOLD] average_num_cands = utils.mean(num_cands) print(f'threshold={threshold:.3f} num_cands={average_num_cands}') if min_average_num_cands <= average_num_cands <= max_average_num_cands: print('threshold OK!') break if average_num_cands < min_average_num_cands: threshold_r = threshold else: assert max_average_num_cands < average_num_cands threshold_l = threshold print(f'path_output: {path_output}') utils.Json.dump(doc2cands, path_output) return path_output @ staticmethod def load_ckpt(path_ckpt): ckpt = torch.load(path_ckpt, map_location='cpu') return ckpt['model'] class DecodedCorpus: def __init__(self, path_decoded_doc2sents): self.path_decoded_doc2sents = Path(path_decoded_doc2sents) self.decoded_doc2sents = utils.Json.load(path_decoded_doc2sents) def dump_html(self): path_output = self.path_decoded_doc2sents.with_name(self.path_decoded_doc2sents.name.replace('doc2sents', 'html')).with_suffix('.html') html_lines = [] for doc, sents in self.decoded_doc2sents.items(): html_lines.append(f'DOC {doc}') for sent in sents: # ipdb.set_trace() tokens = sent['tokens'] for l, r, _ in sent['spans']: tokens[l] = consts.HTML_BP + tokens[l] tokens[r] = tokens[r] + consts.HTML_EP + ' |' html_lines.append(consts.roberta_tokens_to_str(tokens)) html_lines = [f'<p>{line}<p>' for line in html_lines] utils.TextFile.dumplist(html_lines, path_output) class BaseFeatureExtractor: def __init__(self, output_dir, use_cache=True) -> None: super().__init__() self.use_cache = use_cache self.output_dir = Path(output_dir) self.output_dir.mkdir(exist_ok=True, parents=True) @ staticmethod def _get_batch_size(seqlen): return 64 * 128 // seqlen @ staticmethod def _batchify(marked_sents, is_train=False): batches = [] # each element is a tuple of (input_ids_batch, input_masks_batch) pointer = 0 total_num = len(marked_sents) while pointer < total_num: maxlen = len(marked_sents[pointer]['ids']) batch_size = BaseFeatureExtractor._get_batch_size(maxlen) input_ids_batch = [] input_masks_batch = [] pos_spans_batch = [] neg_spans_batch = [] possible_spans_batch = [] for marked_sent in marked_sents[pointer: pointer + batch_size]: input_id = marked_sent['ids'] word_idxs = marked_sent['widxs'] if not is_train: possible_spans = utils.get_possible_spans(word_idxs, len(input_id), consts.MAX_WORD_GRAM, consts.MAX_SUBWORD_GRAM) possible_spans_batch.append(possible_spans) len_diff = maxlen - len(input_id) assert len_diff >= 0, 'Input ids must have been sorted!' input_ids_batch.append([consts.LM_TOKENIZER.cls_token_id] + input_id + [consts.LM_TOKENIZER.pad_token_id] * len_diff) input_masks_batch.append([1] + [1] * len(input_id) + [0] * len_diff) if is_train: pos_spans = marked_sent['pos_spans'] neg_spans = marked_sent['neg_spans'] pos_spans_batch.append(pos_spans) neg_spans_batch.append(neg_spans) batch_size = len(input_ids_batch) pointer += batch_size input_ids_batch = torch.tensor(input_ids_batch, device=consts.DEVICE) input_masks_batch = torch.tensor(input_masks_batch, device=consts.DEVICE) if is_train: batches.append((input_ids_batch, input_masks_batch, pos_spans_batch, neg_spans_batch)) else: batches.append((input_ids_batch, input_masks_batch, possible_spans_batch)) return batches
the-stack_106_16994
#! /usr/bin/env python3 # Note that, for readability purposes, we only plot 1-4 threads for uFS. # Empirically, we observe uFS-5 to uFS-10 perform similar to uFS-4, which causes # the curve overlapping with each other and make it difficult to read. import sys from zplot import * def print_usage_and_exit(): print(f"Usage: {sys.argv[0]} <plot_name> <varmail_data_path>") exit(1) if len(sys.argv) != 3: print_usage_and_exit() plot_name = sys.argv[1] varmail_data_path = sys.argv[2] DRAWABLE_X_LEN = 200 DRAWABLE_Y_LEN = 100 DRAWABLE_COORD_X = 20 DRAWABLE_COORD_Y = 22 YLABEL_SHIFT = [0, 7] XTITLE_SHIFT = [0, 2] YTITLE_SHIFT = [12, 0] LEGEND_BASE = [30, 120] LEGEND_X_OFFSET = 40 LEGEND_Y_OFFSET = 10 LEGEND_EACH_ROW = 2 LEGEND_FONT_SZ = 6 TITLE_FONT_SZ = 6 LABEL_FONT_SZ = 6 LINE_WIDTH = 1 LINES = [ "ufs_1", "ufs_2", "ufs_3", "ufs_4", # "ufs_5", "ufs_6", "ufs_7", "ufs_8", # "ufs_9", "ufs_10", "ext4" ] LINES_COLOR_MAP = { "ufs_1": "0.85,0.85,0.85", "ufs_2": "0.6,0.6,0.6", "ufs_3": "0.4,0.4,0.4", "ufs_4": "0,0,0", # "ufs_5": "green", # "ufs_6": "greenyellow", # "ufs_7": "pink", # "ufs_8": "orange", # "ufs_9": "tomato", # "ufs_10": "red", "ext4": "black" } LINES_DASH_MAP = { "ufs_1": 0, "ufs_2": 0, "ufs_3": 0, "ufs_4": 0, # "ufs_5": "green", # "ufs_6": "greenyellow", # "ufs_7": "pink", # "ufs_8": "orange", # "ufs_9": "tomato", # "ufs_10": "red", "ext4": [4, 1.6] } LINES_NAME_MAP = { "ufs_1": "uFS-1", "ufs_2": "uFS-2", "ufs_3": "uFS-3", "ufs_4": "uFS-4", "ufs_5": "uFS-5", "ufs_6": "uFS-6", "ufs_7": "uFS-7", "ufs_8": "uFS-8", "ufs_9": "uFS-9", "ufs_10": "uFS-10", "ext4": "ext4" } legend_map = {line: legend() for line in LINES} ctype = 'eps' if len(sys.argv) == 2: ctype = sys.argv[1] c = canvas(ctype, title=plot_name, dimensions=[ DRAWABLE_X_LEN + DRAWABLE_COORD_X + 5, DRAWABLE_Y_LEN + DRAWABLE_COORD_Y + 10 ]) p = plotter() t = table(file=varmail_data_path) d = drawable(canvas=c, coord=[DRAWABLE_COORD_X, DRAWABLE_COORD_Y], dimensions=[DRAWABLE_X_LEN, DRAWABLE_Y_LEN], xrange=[1, 10], yrange=[0, 400000]) ymanual = [[f"{y//1000}K", y] for y in range(100000, 400001, 100000)] # ymanual[0] = ["0", 0] for line in LINES: p.line(drawable=d, table=t, xfield='num_client', yfield=line, linedash=LINES_DASH_MAP[line], linewidth=LINE_WIDTH, linecolor=LINES_COLOR_MAP[line], legend=legend_map[line], legendtext=LINES_NAME_MAP[line]) axis( drawable=d, # title='Varmail Throughput', xtitle='Number of Clients', ytitle='IOPS', xlabelfontsize=LABEL_FONT_SZ, ylabelfontsize=LABEL_FONT_SZ, xtitlesize=TITLE_FONT_SZ, ytitlesize=TITLE_FONT_SZ, ylabelshift=YLABEL_SHIFT, xtitleshift=XTITLE_SHIFT, ytitleshift=YTITLE_SHIFT, ylabelrotate=90, ymanual=ymanual) legend_base_x, legend_base_y = LEGEND_BASE cnt = 0 for line in LINES: legend = legend_map[line] legend.draw(canvas=c, coord=[ legend_base_x + (cnt % LEGEND_EACH_ROW) * LEGEND_X_OFFSET, legend_base_y - (cnt // LEGEND_EACH_ROW) * LEGEND_Y_OFFSET ], fontsize=LEGEND_FONT_SZ) cnt += 1 c.render()
the-stack_106_16996
import originpro as op import numpy as np from skimage.util import invert #load image stack fn = op.path('e') + r'Samples\Image Processing and Analysis\*.tif' iw=op.new_image() iw.from_file(fn) print(iw.frames) #get the 3rd image im2 = iw.to_np2d(2) im2 *= 2 im2 = invert(im2) #put it back into 2nd image iw.from_np2d(im2, 1) #show thumbnails iw.set_int('NAV',1) iw.set_str('Palette', 'Fire')
the-stack_106_16998
# model settings model = dict( type='MaskRCNN', pretrained='modelzoo://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', normalize=dict(type='SyncBN', frozen=False), norm_eval=False, ), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_scales=[8], anchor_ratios=[0.5, 1.0, 2.0], anchor_strides=[4, 8, 16, 32, 64], target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], use_sigmoid_cls=True), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='SharedFCBBoxHead', num_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=81, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2], reg_class_agnostic=False), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=81)) # model training and testing settings train_cfg = dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, smoothl1_beta=1 / 9.0, debug=False), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)) test_cfg = dict( rpn=dict( nms_across_levels=False, nms_pre=2000, nms_post=2000, max_num=2000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100, mask_thr_binary=0.5)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) data = dict( imgs_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', img_scale=(1333, 800), img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=0.5, with_mask=True, with_crowd=True, with_label=True), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', img_scale=(1333, 800), img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=0, with_mask=True, with_crowd=True, with_label=True), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', img_scale=(1333, 800), img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=0, with_mask=False, with_label=False, test_mode=True)) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[8, 11]) checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable # runtime settings total_epochs = 12 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/mask_rcnn_r50_sbn_fpn_1x' load_from = None resume_from = None workflow = [('train', 1)]
the-stack_106_17001
import requests import re import lxml.html import date import MySQLdb conn = MySQLdb.connect(db='Crawler', user='cloud', passwd='1111', charset='utf8mb4') c=conn.cursor() def crawling(page_count): front_url="http://www.jobkorea.co.kr/Starter/?JoinPossible_Stat=0&schOrderBy=0&LinkGubun=0&LinkNo=0&schType=0&schGid=0&Page=" for i in range(1, page_count+1): url = front_url+str(i) list_page=requests.get(url) root=lxml.html.fromstring(list_page.content) for everything in root.cssselect('.filterList'): for thing in everything.cssselect('li'): t = 0 companies = thing.cssselect('.co .coTit a') company = companies[0].text.strip() titles = thing.cssselect('.info .tit a') title = titles[0].text_content().strip() title_url = titles[0].get('href') site_name = '잡코리아' field1 = thing.cssselect('.info .sTit span:nth-child(1)') field1 = field1[0].text field2 = thing.cssselect('.info .sTit span:nth-child(2)') if not field2: field2 = 'NULL' elif field2: field2 = field2[0].text field3 = thing.cssselect('.info .sTit span:nth-child(3)') if not field3: field3 = 'NULL' elif field3: field3 = field3[0].text careers = thing.cssselect('.sDesc strong') career = careers[0].text academics = thing.cssselect('.sDesc span:nth-child(2)') academic = academics[0].text title_url = 'http://www.jobkorea.co.kr'+title_url detail_page = requests.get(title_url) work = lxml.html.fromstring(detail_page.content) working = work.cssselect('.tbRow.clear div:nth-child(2) dd:nth-child(2) .addList .col_1') if not working: workingcondition = '' elif working: workingcondition = working[0].text areas = thing.cssselect('.sDesc span:nth-child(3)') area = areas[0].text area = area.split(', ')[0] deadlines = thing.cssselect('.side .day') deadline = deadlines[0].text if deadline == "내일마감": deadline = datetime.datetime.now()+1 elif deadline == "오늘마감": deadline == datetime.datetime.now() elif deadline == "채용시": deadline == datetime.datetime.now() else: tmp = deadline.split('(')[0] deadline = datetime.datetime.strptime(tmp, "~%Y.%m.%d").date() print(deadline) select_sql = 'SELECT title, titlelink FROM re_info' c.execute(select_sql) for row in c.fetchall(): for i in range(len(row)): if row[i] == title or row[i] == title_url: t = 1 if t == 0: insert_sql = 'INSERT INTO re_info(company, title, titlelink, sitename, field1, field2, field3, career, academic, area, workingcondition, deadline) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)' insert_val = company, title, title_url, site_name, field1, field2, field3, career, academic, area, workingcondition, deadline c.execute(insert_sql, insert_val) conn.commit() def main(): page_count = 4 crawling(page_count) conn.close() main()
the-stack_106_17002
# Copyright (c) 2014 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Server Group API Extension.""" import webob from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi import nova.exception from nova.i18n import _ from nova.i18n import _LE from nova import objects from nova.openstack.common import log as logging from nova import utils LOG = logging.getLogger(__name__) SUPPORTED_POLICIES = ['anti-affinity', 'affinity'] authorize = extensions.extension_authorizer('compute', 'server_groups') def _authorize_context(req): context = req.environ['nova.context'] authorize(context) return context class ServerGroupController(wsgi.Controller): """The Server group API controller for the OpenStack API.""" def __init__(self, ext_mgr): self.ext_mgr = ext_mgr def _format_server_group(self, context, group): # the id field has its value as the uuid of the server group # There is no 'uuid' key in server_group seen by clients. # In addition, clients see policies as a ["policy-name"] list; # and they see members as a ["server-id"] list. server_group = {} server_group['id'] = group.uuid server_group['name'] = group.name server_group['policies'] = group.policies or [] # NOTE(danms): This has been exposed to the user, but never used. # Since we can't remove it, just make sure it's always empty. server_group['metadata'] = {} members = [] if group.members: # Display the instances that are not deleted. filters = {'uuid': group.members, 'deleted': False} instances = objects.InstanceList.get_by_filters( context, filters=filters) members = [instance.uuid for instance in instances] server_group['members'] = members return server_group def _validate_policies(self, policies): """Validate the policies. Validates that there are no contradicting policies, for example 'anti-affinity' and 'affinity' in the same group. Validates that the defined policies are supported. :param policies: the given policies of the server_group """ if ('anti-affinity' in policies and 'affinity' in policies): msg = _("Conflicting policies configured!") raise nova.exception.InvalidInput(reason=msg) not_supported = [policy for policy in policies if policy not in SUPPORTED_POLICIES] if not_supported: msg = _("Invalid policies: %s") % ', '.join(not_supported) raise nova.exception.InvalidInput(reason=msg) # Note(wingwj): It doesn't make sense to store duplicate policies. if sorted(set(policies)) != sorted(policies): msg = _("Duplicate policies configured!") raise nova.exception.InvalidInput(reason=msg) def _validate_input_body(self, body, entity_name): if not self.is_valid_body(body, entity_name): msg = _("the body is invalid.") raise nova.exception.InvalidInput(reason=msg) subbody = dict(body[entity_name]) expected_fields = ['name', 'policies'] for field in expected_fields: value = subbody.pop(field, None) if not value: msg = _("'%s' is either missing or empty.") % field raise nova.exception.InvalidInput(reason=msg) if field == 'name': utils.check_string_length(value, field, min_length=1, max_length=255) if not common.VALID_NAME_REGEX.search(value): msg = _("Invalid format for name: '%s'") % value raise nova.exception.InvalidInput(reason=msg) elif field == 'policies': if isinstance(value, list): [utils.check_string_length(v, field, min_length=1, max_length=255) for v in value] self._validate_policies(value) else: msg = _("'%s' is not a list") % value raise nova.exception.InvalidInput(reason=msg) if subbody: msg = _("unsupported fields: %s") % subbody.keys() raise nova.exception.InvalidInput(reason=msg) def show(self, req, id): """Return data about the given server group.""" context = _authorize_context(req) try: sg = objects.InstanceGroup.get_by_uuid(context, id) except nova.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return {'server_group': self._format_server_group(context, sg)} def delete(self, req, id): """Delete an server group.""" context = _authorize_context(req) try: sg = objects.InstanceGroup.get_by_uuid(context, id) except nova.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) quotas = None if self.ext_mgr.is_loaded('os-server-group-quotas'): quotas = objects.Quotas() project_id, user_id = objects.quotas.ids_from_server_group(context, sg) try: # We have to add the quota back to the user that created # the server group quotas.reserve(context, project_id=project_id, user_id=user_id, server_groups=-1) except Exception: quotas = None LOG.exception(_LE("Failed to update usages deallocating " "server group")) try: sg.destroy() except nova.exception.InstanceGroupNotFound as e: if quotas: quotas.rollback() raise webob.exc.HTTPNotFound(explanation=e.format_message()) if quotas: quotas.commit() return webob.Response(status_int=204) def index(self, req): """Returns a list of server groups.""" context = _authorize_context(req) project_id = context.project_id if 'all_projects' in req.GET and context.is_admin: sgs = objects.InstanceGroupList.get_all(context) else: sgs = objects.InstanceGroupList.get_by_project_id( context, project_id) limited_list = common.limited(sgs.objects, req) result = [self._format_server_group(context, group) for group in limited_list] return {'server_groups': result} def create(self, req, body): """Creates a new server group.""" context = _authorize_context(req) try: self._validate_input_body(body, 'server_group') except nova.exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) quotas = None if self.ext_mgr.is_loaded('os-server-group-quotas'): quotas = objects.Quotas() try: quotas.reserve(context, project_id=context.project_id, user_id=context.user_id, server_groups=1) except nova.exception.OverQuota: msg = _("Quota exceeded, too many server groups.") raise exc.HTTPForbidden(explanation=msg) vals = body['server_group'] sg = objects.InstanceGroup(context) sg.project_id = context.project_id sg.user_id = context.user_id try: sg.name = vals.get('name') sg.policies = vals.get('policies') sg.create() except ValueError as e: if quotas: quotas.rollback() raise exc.HTTPBadRequest(explanation=e) if quotas: quotas.commit() return {'server_group': self._format_server_group(context, sg)} class Server_groups(extensions.ExtensionDescriptor): """Server group support.""" name = "ServerGroups" alias = "os-server-groups" namespace = ("http://docs.openstack.org/compute/ext/" "servergroups/api/v2") updated = "2013-06-20T00:00:00Z" def get_resources(self): resources = [] res = extensions.ResourceExtension( 'os-server-groups', controller=ServerGroupController(self.ext_mgr), member_actions={"action": "POST", }) resources.append(res) return resources
the-stack_106_17003
# Copyright 2010 Hakan Kjellerstrand [email protected] # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Secret Santa problem in Google CP Solver. From Ruby Quiz Secret Santa http://www.rubyquiz.com/quiz2.html ''' Honoring a long standing tradition started by my wife's dad, my friends all play a Secret Santa game around Christmas time. We draw names and spend a week sneaking that person gifts and clues to our identity. On the last night of the game, we get together, have dinner, share stories, and, most importantly, try to guess who our Secret Santa was. It's a crazily fun way to enjoy each other's company during the holidays. To choose Santas, we use to draw names out of a hat. This system was tedious, prone to many 'Wait, I got myself...' problems. This year, we made a change to the rules that further complicated picking and we knew the hat draw would not stand up to the challenge. Naturally, to solve this problem, I scripted the process. Since that turned out to be more interesting than I had expected, I decided to share. This weeks Ruby Quiz is to implement a Secret Santa selection script. Your script will be fed a list of names on STDIN. ... Your script should then choose a Secret Santa for every name in the list. Obviously, a person cannot be their own Secret Santa. In addition, my friends no longer allow people in the same family to be Santas for each other and your script should take this into account. ''' Comment: This model skips the file input and mail parts. We assume that the friends are identified with a number from 1..n, and the families is identified with a number 1..num_families. Compare with the following model: * MiniZinc: http://www.hakank.org/minizinc/secret_santa.mzn This model gives 4089600 solutions and the following statistics: - failures: 31264 - branches: 8241726 - WallTime: 23735 ms (note: without any printing of the solutions) This model was created by Hakan Kjellerstrand ([email protected]) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ """ from __future__ import print_function import sys from ortools.constraint_solver import pywrapcp def main(): # Create the solver. solver = pywrapcp.Solver('Secret Santa problem') # # data # family = [1, 1, 1, 1, 2, 3, 3, 3, 3, 3, 4, 4] num_families = max(family) n = len(family) # # declare variables # x = [solver.IntVar(0, n - 1, 'x[%i]' % i) for i in range(n)] # # constraints # solver.Add(solver.AllDifferent(x)) # Can't be one own's Secret Santa # Ensure that there are no fix-point in the array for i in range(n): solver.Add(x[i] != i) # No Secret Santa to a person in the same family for i in range(n): solver.Add(family[i] != solver.Element(family, x[i])) # # solution and search # db = solver.Phase(x, solver.INT_VAR_SIMPLE, solver.INT_VALUE_SIMPLE) solver.NewSearch(db) num_solutions = 0 while solver.NextSolution(): num_solutions += 1 print('x:', [x[i].Value() for i in range(n)]) print() print('num_solutions:', num_solutions) print('failures:', solver.Failures()) print('branches:', solver.Branches()) print('WallTime:', solver.WallTime(), 'ms') if __name__ == '__main__': main()
the-stack_106_17006
import os import shutil import sys from typing import Optional import click from valohai_cli.api import get_host_and_token from valohai_cli.commands.project.create import create from valohai_cli.commands.project.link import link from valohai_cli.ctx import get_project from valohai_cli.exceptions import NotLoggedIn from valohai_cli.messages import error, warn from valohai_cli.models.project import Project from valohai_cli.utils import get_project_directory from valohai_cli.yaml_wizard import yaml_wizard DONE_TEXT = """ All done! You can now create an ad-hoc execution with $ {command} to see that everything works as it should. For better repeatability, we recommend that your code is in a Git repository; you can link the repository to the project in the Valohai webapp. Happy (machine) learning! """ @click.command() def init() -> None: """ Interactively initialize a Valohai project. """ current_project = get_project() if current_project: error( 'The directory {directory} is already linked to {name}. Please unlink the directory first.'.format( directory=current_project.directory, name=current_project.name, ) ) sys.exit(1) click.secho('Hello! This wizard will help you start a Valohai compatible project.', fg='green', bold=True) directory = get_project_directory() if not click.confirm( 'First, let\'s make sure {dir} is the root directory of your project. Is that correct?'.format( dir=click.style(directory, bold=True), ) ): # pragma: no cover click.echo('Alright! Please change to the root directory of your project and try again.') return valohai_yaml_path = os.path.join(directory, 'valohai.yaml') if not os.path.isfile(valohai_yaml_path): click.echo('Looks like you don\'t have a Valohai.yaml file. Let\'s create one!') yaml_wizard(directory) else: click.echo('There is a Valohai.yaml file in this directory, so let\'s skip the creation wizard.') try: get_host_and_token() except NotLoggedIn: # pragma: no cover error('Please log in with `vh login` before continuing.') sys.exit(3) project = link_or_create_prompt(directory) if not project: # If we didn't link or create a project, don't show the "all good to go" text. return width = min(70, shutil.get_terminal_size()[0]) click.secho('*' * width, fg='green', bold=True) click.echo(DONE_TEXT.strip().format( command=click.style('vh exec run --adhoc --watch execute', bold=True), )) click.secho('*' * width, fg='green', bold=True) def link_or_create_prompt(cwd: str) -> Optional[Project]: while True: response = click.prompt( 'Do you want to link this directory to a pre-existing project, or create a new one? [l/c]\n' 'If you\'d prefer to do neither at this point, respond [n].' ).lower().strip() if response.startswith('l'): link.main(prog_name='vh-link', args=[], standalone_mode=False) elif response.startswith('c'): create.main(prog_name='vh-create', args=[], standalone_mode=False) elif response.startswith('n'): click.echo( 'Okay, skipping linking or creating a project for the time being.\n' 'You can do that later with `vh project link` or `vh project create`.' ) return None else: warn('Sorry, I couldn\'t understand that.') continue project = get_project(cwd) if not project: error('Oops, looks like something went wrong.') sys.exit(2) return project
the-stack_106_17007
# -*- coding: utf-8 -*- # Copyright (c) 2020, bikbuk and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe import json from frappe.model.document import Document class VetProductQuantity(Document): pass @frappe.whitelist() def get_quantity_list(filters=None, valuation=False): default_sort = "creation desc" td_filters = [] filter_json = False group_by = False page = 1 if filters: try: filter_json = json.loads(filters) except: filter_json = False if filter_json: sort = filter_json.get('sort', False) filters_json = filter_json.get('filters', False) gudang = filter_json.get('gudang', False) product = filter_json.get('product', False) group_by_json = filter_json.get('group_by', False) currentpage = filter_json.get('currentpage', False) if currentpage: page = currentpage if filters_json: for fj in filters_json: td_filters.append(fj) if sort: default_sort = sort if gudang: td_filters.append({'gudang': gudang}) if product: td_filters.append({'product': product}) if group_by_json: group_by = group_by_json try: stockable_product_category = frappe.get_list("VetProductCategory", filters={'stockable': True}, fields=['name']) stockable_product = frappe.get_list("VetProduct", filters={'product_category': ['in', list(pc.name for pc in stockable_product_category)]}, fields=['name']) product_quantity_search = frappe.get_list("VetProductQuantity", filters=td_filters, fields=["*"], order_by=default_sort, group_by=group_by, start=(page - 1) * 10, page_length= 10) product_quantity = list(pqs for pqs in product_quantity_search if pqs.product in list(sp.name for sp in stockable_product)) datalength = len(frappe.get_all("VetProductQuantity", filters=td_filters, as_list=True)) for pq in product_quantity: product = frappe.get_doc('VetProduct', pq.product) if(group_by == 'product'): filters2 = {'product': pq.product} if gudang: filters2.update({'gudang': gudang}) quantity_list = frappe.get_list("VetProductQuantity", filters=filters2, fields=["sum(quantity) as total_quantity"], order_by="creation desc") pq.update({'quantity': quantity_list[0].total_quantity, 'total_value': quantity_list[0].total_quantity*product.price}) elif(group_by == 'gudang'): quantity_list = frappe.get_list("VetProductQuantity", filters={'product': pq.product, 'gudang': pq.gudang}, fields=["sum(quantity) as total_quantity"], order_by="creation desc") pq.update({'quantity': quantity_list[0].total_quantity, 'total_value': quantity_list[0].total_quantity*product.price}) if valuation and product: purchase_list_search = frappe.get_list("VetPurchaseProducts", filters={'product': pq.product}, fields=["*"], order_by="creation desc") purchase_list = (pl for pl in purchase_list_search if pl.quantity_stocked) pq.update({'product': product, 'purchase_list': purchase_list}) pq.update({'product': product}) return {'product_quantity': product_quantity, 'datalength': datalength} except PermissionError as e: return {'error': e} @frappe.whitelist() def get_product_quantity(warehouse, product): quantity = frappe.db.get_value('VetProductQuantity', {'product': product, 'gudang': warehouse}, 'quantity') or 0 return quantity
the-stack_106_17008
import copy from typing import Union, Any, Optional, List import numpy as np from ding.worker.replay_buffer import IBuffer from ding.utils import LockContext, LockContextType, BUFFER_REGISTRY from .utils import UsedDataRemover @BUFFER_REGISTRY.register('naive') class NaiveReplayBuffer(IBuffer): r""" Overview: Naive replay buffer, can store and sample data. An naive implementation of replay buffer with no priority or any other advanced features. This buffer refers to multi-thread/multi-process and guarantees thread-safe, which means that methods like ``sample``, ``push``, ``clear`` are all mutual to each other. Interface: start, close, push, update, sample, clear, count, state_dict, load_state_dict, default_config Property: replay_buffer_size, push_count """ config = dict( type='naive', replay_buffer_size=10000, deepcopy=False, # default `False` for serial pipeline enable_track_used_data=False, ) def __init__( self, cfg: 'EasyDict', # noqa name: str = 'default', exp_name: Optional[str] = 'default_experiment', instance_name: Optional[str] = 'buffer', ) -> None: """ Overview: Initialize the buffer Arguments: - cfg (:obj:`dict`): Config dict. - name (:obj:`Optional[str]`): Buffer name, used to generate unique data id and logger name. """ self._exp_name = exp_name self._instance_name = instance_name self._cfg = cfg self._replay_buffer_size = self._cfg.replay_buffer_size self._deepcopy = self._cfg.deepcopy # ``_data`` is a circular queue to store data (full data or meta data) self._data = [None for _ in range(self._replay_buffer_size)] # Current valid data count, indicating how many elements in ``self._data`` is valid. self._valid_count = 0 # How many pieces of data have been pushed into this buffer, should be no less than ``_valid_count``. self._push_count = 0 # Point to the tail position where next data can be inserted, i.e. latest inserted data's next position. self._tail = 0 # Lock to guarantee thread safe self._lock = LockContext(type_=LockContextType.THREAD_LOCK) self._end_flag = False self._enable_track_used_data = self._cfg.enable_track_used_data if self._enable_track_used_data: self._used_data_remover = UsedDataRemover() def start(self) -> None: """ Overview: Start the buffer's used_data_remover thread if enables track_used_data. """ if self._enable_track_used_data: self._used_data_remover.start() def close(self) -> None: """ Overview: Clear the buffer; Join the buffer's used_data_remover thread if enables track_used_data. """ self.clear() if self._enable_track_used_data: self._used_data_remover.close() def push(self, data: Union[List[Any], Any], cur_collector_envstep: int) -> None: r""" Overview: Push a data into buffer. Arguments: - data (:obj:`Union[List[Any], Any]`): The data which will be pushed into buffer. Can be one \ (in `Any` type), or many(int `List[Any]` type). - cur_collector_envstep (:obj:`int`): Collector's current env step. \ Not used in naive buffer, but preserved for compatibility. """ if isinstance(data, list): self._extend(data, cur_collector_envstep) else: self._append(data, cur_collector_envstep) def sample(self, size: int, cur_learner_iter: int) -> Optional[list]: r""" Overview: Sample data with length ``size``. Arguments: - size (:obj:`int`): The number of the data that will be sampled. - cur_learner_iter (:obj:`int`): Learner's current iteration. \ Not used in naive buffer, but preserved for compatibility. Returns: - sample_data (:obj:`list`): A list of data with length ``size``. """ if size == 0: return [] can_sample = self._sample_check(size) if not can_sample: return None with self._lock: indices = self._get_indices(size) result = self._sample_with_indices(indices, cur_learner_iter) return result def _append(self, ori_data: Any, cur_collector_envstep: int = -1) -> None: r""" Overview: Append a data item into ``self._data``. Arguments: - ori_data (:obj:`Any`): The data which will be inserted. - cur_collector_envstep (:obj:`int`): Not used in this method, but preserved for compatibility. """ with self._lock: if self._deepcopy: data = copy.deepcopy(ori_data) else: data = ori_data self._push_count += 1 if self._data[self._tail] is None: self._valid_count += 1 elif self._enable_track_used_data: self._used_data_remover.add_used_data(self._data[self._tail]) self._data[self._tail] = data self._tail = (self._tail + 1) % self._replay_buffer_size def _extend(self, ori_data: List[Any], cur_collector_envstep: int = -1) -> None: r""" Overview: Extend a data list into queue. Add two keys in each data item, you can refer to ``_append`` for details. Arguments: - ori_data (:obj:`List[Any]`): The data list. - cur_collector_envstep (:obj:`int`): Not used in this method, but preserved for compatibility. """ with self._lock: if self._deepcopy: data = copy.deepcopy(ori_data) else: data = ori_data length = len(data) # When updating ``_data`` and ``_use_count``, should consider two cases regarding # the relationship between "tail + data length" and "replay buffer size" to check whether # data will exceed beyond buffer's max length limitation. if self._tail + length <= self._replay_buffer_size: if self._valid_count != self._replay_buffer_size: self._valid_count += length elif self._enable_track_used_data: for i in range(length): self._used_data_remover.add_used_data(self._data[self._tail + i]) self._push_count += length self._data[self._tail:self._tail + length] = data else: new_tail = self._tail data_start = 0 residual_num = len(data) while True: space = self._replay_buffer_size - new_tail L = min(space, residual_num) if self._valid_count != self._replay_buffer_size: self._valid_count += L elif self._enable_track_used_data: for i in range(L): self._used_data_remover.add_used_data(self._data[new_tail + i]) self._push_count += L self._data[new_tail:new_tail + L] = data[data_start:data_start + L] residual_num -= L assert residual_num >= 0 if residual_num == 0: break else: new_tail = 0 data_start += L # Update ``tail`` and ``next_unique_id`` after the whole list is pushed into buffer. self._tail = (self._tail + length) % self._replay_buffer_size def _sample_check(self, size: int) -> bool: r""" Overview: Check whether this buffer has more than `size` datas to sample. Arguments: - size (:obj:`int`): Number of data that will be sampled. Returns: - can_sample (:obj:`bool`): Whether this buffer can sample enough data. """ if self._valid_count < size: print("No enough elements for sampling (expect: {} / current: {})".format(size, self._valid_count)) return False else: return True def update(self, info: dict) -> None: r""" Overview: Naive Buffer does not need to update any info, but this method is preserved for compatibility. """ print( '[BUFFER WARNING] Naive Buffer does not need to update any info, \ but `update` method is preserved for compatibility.' ) def clear(self) -> None: """ Overview: Clear all the data and reset the related variables. """ with self._lock: for i in range(len(self._data)): if self._data[i] is not None: if self._enable_track_used_data: self._used_data_remover.add_used_data(self._data[i]) self._data[i] = None self._valid_count = 0 self._push_count = 0 self._tail = 0 def __del__(self) -> None: """ Overview: Call ``close`` to delete the object. """ self.close() def _get_indices(self, size: int) -> list: r""" Overview: Get the sample index list. Arguments: - size (:obj:`int`): The number of the data that will be sampled Returns: - index_list (:obj:`list`): A list including all the sample indices, whose length should equal to ``size``. """ assert self._valid_count <= self._replay_buffer_size if self._valid_count == self._replay_buffer_size: tail = self._replay_buffer_size else: tail = self._tail indices = list(np.random.choice(a=tail, size=size, replace=False)) return indices def _sample_with_indices(self, indices: List[int], cur_learner_iter: int) -> list: r""" Overview: Sample data with ``indices``. Arguments: - indices (:obj:`List[int]`): A list including all the sample indices. - cur_learner_iter (:obj:`int`): Not used in this method, but preserved for compatibility. Returns: - data (:obj:`list`) Sampled data. """ data = [] for idx in indices: assert self._data[idx] is not None, idx if self._deepcopy: copy_data = copy.deepcopy(self._data[idx]) else: copy_data = self._data[idx] data.append(copy_data) return data def count(self) -> int: """ Overview: Count how many valid datas there are in the buffer. Returns: - count (:obj:`int`): Number of valid data. """ return self._valid_count def state_dict(self) -> dict: """ Overview: Provide a state dict to keep a record of current buffer. Returns: - state_dict (:obj:`Dict[str, Any]`): A dict containing all important values in the buffer. \ With the dict, one can easily reproduce the buffer. """ return { 'data': self._data, 'tail': self._tail, 'valid_count': self._valid_count, 'push_count': self._push_count, } def load_state_dict(self, _state_dict: dict) -> None: """ Overview: Load state dict to reproduce the buffer. Returns: - state_dict (:obj:`Dict[str, Any]`): A dict containing all important values in the buffer. """ assert 'data' in _state_dict if set(_state_dict.keys()) == set(['data']): self._extend(_state_dict['data']) else: for k, v in _state_dict.items(): setattr(self, '_{}'.format(k), v) @property def replay_buffer_size(self) -> int: return self._replay_buffer_size @property def push_count(self) -> int: return self._push_count
the-stack_106_17009
import angr from angr.sim_type import SimTypeString, SimTypeLength, SimTypeInt import logging l = logging.getLogger("angr.procedures.libc.strncmp") class strncmp(angr.SimProcedure): #pylint:disable=arguments-differ def run(self, a_addr, b_addr, limit, a_len=None, b_len=None, wchar=False, ignore_case=False): #pylint:disable=arguments-differ # TODO: smarter types here? self.argument_types = {0: self.ty_ptr(SimTypeString()), 1: self.ty_ptr(SimTypeString()), 2: SimTypeLength(self.state.arch)} self.return_type = SimTypeInt(32, True) strlen = angr.SIM_PROCEDURES['libc']['strlen'] a_strlen = a_len if a_len is not None else self.inline_call(strlen, a_addr, wchar=wchar) b_strlen = b_len if b_len is not None else self.inline_call(strlen, b_addr, wchar=wchar) a_len = a_strlen.ret_expr b_len = b_strlen.ret_expr match_constraints = [ ] variables = a_len.variables | b_len.variables | limit.variables ret_expr = self.state.se.Unconstrained("strncmp_ret", self.state.arch.bits, key=('api', 'strncmp')) # determine the maximum number of bytes to compare concrete_run = False #if not self.state.se.symbolic(a_len) and not self.state.se.symbolic(b_len) and not self.state.se.symbolic(limit): if self.state.se.single_valued(a_len) and self.state.se.single_valued(b_len) and self.state.se.single_valued(limit): c_a_len = self.state.se.eval(a_len) c_b_len = self.state.se.eval(b_len) c_limit = self.state.se.eval(limit) l.debug("everything is concrete: a_len %d, b_len %d, limit %d", c_a_len, c_b_len, c_limit) if (c_a_len < c_limit or c_b_len < c_limit) and c_a_len != c_b_len: l.debug("lengths < limit and unmatched") concrete_run = True maxlen = min(c_a_len, c_b_len, c_limit) else: if self.state.se.single_valued(limit): c_limit = self.state.se.eval(limit) maxlen = min(a_strlen.max_null_index, b_strlen.max_null_index, c_limit) else: maxlen = max(a_strlen.max_null_index, b_strlen.max_null_index) match_constraints.append(self.state.se.Or(a_len == b_len, self.state.se.And(self.state.se.UGE(a_len, limit), self.state.se.UGE(b_len, limit)))) if maxlen == 0: # there is a corner case: if a or b are not both empty string, and limit is greater than 0, we should return # non-equal. Basically we only return equal when limit is 0, or a_len == b_len == 0 if self.state.se.single_valued(limit) and self.state.se.eval(limit) == 0: # limit is 0 l.debug("returning equal for 0-limit") return self.state.se.BVV(0, self.state.arch.bits, variables=variables) elif self.state.se.single_valued(a_len) and self.state.se.single_valued(b_len) and \ self.state.se.eval(a_len) == self.state.se.eval(b_len) == 0: # two empty strings l.debug("returning equal for two empty strings") return self.state.se.BVV(0, self.state.arch.bits, variables=variables) else: # all other cases fall into this branch l.debug("returning non-equal for comparison of an empty string and a non-empty string") if a_strlen.max_null_index == 0: return self.state.se.BVV(-1, self.state.arch.bits, variables=variables) else: return self.state.se.BVV(1, self.state.arch.bits, variables=variables) # the bytes a_bytes = self.state.memory.load(a_addr, maxlen, endness='Iend_BE') b_bytes = self.state.memory.load(b_addr, maxlen, endness='Iend_BE') # TODO: deps # all possible return values in static mode return_values = [ ] for i in range(maxlen): l.debug("Processing byte %d", i) maxbit = (maxlen-i)*8 a_byte = a_bytes[maxbit-1:maxbit-8] b_byte = b_bytes[maxbit-1:maxbit-8] if concrete_run and self.state.se.single_valued(a_byte) and self.state.se.single_valued(b_byte): a_conc = self.state.se.eval(a_byte) b_conc = self.state.se.eval(b_byte) variables |= a_byte.variables variables |= b_byte.variables if ignore_case: # convert both to lowercase if ord('a') <= a_conc <= ord('z'): a_conc -= ord(' ') if ord('a') <= b_conc <= ord('z'): b_conc -= ord(' ') if a_conc != b_conc: l.debug("... found mis-matching concrete bytes 0x%x and 0x%x", a_conc, b_conc) if a_conc < b_conc: return self.state.se.BVV(-1, self.state.arch.bits, variables=variables) else: return self.state.se.BVV(1, self.state.arch.bits, variables=variables) else: if self.state.mode == 'static': return_values.append(a_byte - b_byte) concrete_run = False if self.state.mode != 'static': if ignore_case: byte_constraint = self.state.se.Or( self.state.se.Or( a_byte == b_byte, self.state.se.And( ord('A') <= a_byte, a_byte <= ord('Z'), ord('a') <= b_byte, b_byte <= ord('z'), b_byte - ord(' ') == a_byte, ), self.state.se.And( ord('A') <= b_byte, b_byte <= ord('Z'), ord('a') <= a_byte, a_byte <= ord('z'), a_byte - ord(' ') == b_byte, ), ), self.state.se.ULT(a_len, i), self.state.se.ULT(limit, i) ) else: byte_constraint = self.state.se.Or(a_byte == b_byte, self.state.se.ULT(a_len, i), self.state.se.ULT(limit, i)) match_constraints.append(byte_constraint) if concrete_run: l.debug("concrete run made it to the end!") return self.state.se.BVV(0, self.state.arch.bits, variables=variables) if self.state.mode == 'static': ret_expr = self.state.se.ESI(8) for expr in return_values: ret_expr = ret_expr.union(expr) ret_expr = ret_expr.sign_extend(self.state.arch.bits - 8) else: # make the constraints l.debug("returning symbolic") match_constraint = self.state.se.And(*match_constraints) nomatch_constraint = self.state.se.Not(match_constraint) #l.debug("match constraints: %s", match_constraint) #l.debug("nomatch constraints: %s", nomatch_constraint) match_case = self.state.se.And(limit != 0, match_constraint, ret_expr == 0) nomatch_case = self.state.se.And(limit != 0, nomatch_constraint, ret_expr == 1) l0_case = self.state.se.And(limit == 0, ret_expr == 0) empty_case = self.state.se.And(a_strlen.ret_expr == 0, b_strlen.ret_expr == 0, ret_expr == 0) self.state.add_constraints(self.state.se.Or(match_case, nomatch_case, l0_case, empty_case)) return ret_expr
the-stack_106_17011
# coding: utf-8 import pprint import re import six class CreateConnectorRequest: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'instance_id': 'str', 'body': 'CreateConnectorReq' } attribute_map = { 'instance_id': 'instance_id', 'body': 'body' } def __init__(self, instance_id=None, body=None): """CreateConnectorRequest - a model defined in huaweicloud sdk""" self._instance_id = None self._body = None self.discriminator = None self.instance_id = instance_id if body is not None: self.body = body @property def instance_id(self): """Gets the instance_id of this CreateConnectorRequest. :return: The instance_id of this CreateConnectorRequest. :rtype: str """ return self._instance_id @instance_id.setter def instance_id(self, instance_id): """Sets the instance_id of this CreateConnectorRequest. :param instance_id: The instance_id of this CreateConnectorRequest. :type: str """ self._instance_id = instance_id @property def body(self): """Gets the body of this CreateConnectorRequest. :return: The body of this CreateConnectorRequest. :rtype: CreateConnectorReq """ return self._body @body.setter def body(self, body): """Sets the body of this CreateConnectorRequest. :param body: The body of this CreateConnectorRequest. :type: CreateConnectorReq """ self._body = body def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CreateConnectorRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
the-stack_106_17013
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """ Contains a class and method for porting ROS1 python code to ROS2 """ import sys from .utils import find_warnings, get_functions_with class PythonSourcePorter(): """ Contains rules for automatically changing ros1 python code to ros2 and warnings for elements that may need to be changed but aren't be done automatically. """ @staticmethod def port(source, extra_rules=[], extra_warnings=[]): """ Makes some automatic changes to ROS1 python source code to port to ROS2 and prints warnings for more complex cases. Arguments: source - A string of the python source code extra_rules - a list of functions to apply changes to the source code extra_warnings - a list of functions that return warnings about the source code Returns: The new source code """ #Pulls out all methods in this class with name starting with "rule" rules = get_functions_with(criteria=lambda name: name.startswith("rule"), from_class=PythonSourcePorter) for rule in rules + extra_rules: source = rule(source) #Pulls out all methods in this class with name starting with "warn" warning_checks = get_functions_with(criteria=lambda name: name.startswith("warn"), from_class=PythonSourcePorter) warnings = [] source_lines = source.split("\n") for warning_check in warning_checks + extra_warnings: warnings.extend(warning_check(source_lines)) for warning in sorted(warnings, key=lambda warning: warning.line_number): print(str(warning)) return source ######################### # WARNINGS # ######################### @staticmethod def warn_rospy(source_lines): warning_str = "rospy is the ros1 python library, use the rclpy equivalent" return find_warnings(source_lines=source_lines, warning_condition=lambda line: "rospy" in line, warning_msg=lambda line: warning_str) @staticmethod def warn_publisher(source_lines): warning_str = ("Publishers are created with the <node>.create_publisher method.\n" "See: https://github.com/ros2/rclpy/blob/master/rclpy/rclpy/node.py") return find_warnings(source_lines=source_lines, warning_condition=lambda line: "rospy.Publisher" in line, warning_msg=lambda line: warning_str) @staticmethod def warn_subscriber(source_lines): warning_str = ("Subscribers are created with the <node>.create_subscription method.\n" "See: https://github.com/ros2/rclpy/blob/master/rclpy/rclpy/node.py") return find_warnings(source_lines=source_lines, warning_condition=lambda line: "rospy.Subscriber" in line, warning_msg=lambda line: warning_str) @staticmethod def warn_logging(source_lines): warning_str = "rospy.log<sev> has been replaced with <node>.get_logger().<sev>" return find_warnings(source_lines=source_lines, warning_condition=lambda line: "rospy.log" in line, warning_msg=lambda line: warning_str) @staticmethod def warn_time(source_lines): warning_str = ("rospy.time has been replaced, try rclpy.clock. Not all features currently exist.\n" "See: https://github.com/ros2/rclpy/issues/186") return find_warnings(source_lines=source_lines, warning_condition=lambda line: "rospy.time" in line, warning_msg=lambda line: warning_str) @staticmethod def warn_node_creation(source_lines): warning_str = ("In ROS1 init_node initialized ros and created a node for that process.\n" "In ROS2 you need to initialize ros and create the node separately" "rclp.y.init(args)" "node = rclpy.create_node(<name>)") return find_warnings(source_lines=source_lines, warning_condition=lambda line: "rospy.init_node" in line, warning_msg=lambda line: warning_str) @staticmethod def warn_shutdown(source_lines): warning_str = ("is_shutdown isn't implemented in ros2 yet\n" "See: https://github.com/ros2/rclpy/issues/190") return find_warnings(source_lines=source_lines, warning_condition=lambda line: "rospy.is_shutdown" in line, warning_msg=lambda line: warning_str) @staticmethod def warn_exceptions(source_lines): warning_str = ("Some exceptions have changed names and some have not been implemented in ROS2\n" "See https://github.com/ros2/rclpy/blob/master/rclpy/rclpy/exceptions.py for existing exceptions") return find_warnings(source_lines=source_lines, warning_condition=lambda line: "Exception" in line, warning_msg=lambda line: warning_str) def port_python(src, dst): """ Makes some automatic changes ROS1 python source code to ROS2 and prints warnings Arguments: src - the source file for the ROS1 python code dst - the destination file for ROS2 python code """ with open(src, 'r') as src_file: source = src_file.read() with open(dst, 'w') as dst_file: dst_file.write(PythonSourcePorter.port(source=source)) def main(): with open(sys.argv[1], 'r') as file: print(PythonSourcePorter.port(source=file.read())) if __name__ == '__main__': main()
the-stack_106_17016
from pydub import AudioSegment from pydub.playback import play from musicBoxMaker import * partition = parsePartitionFile("listNotes.txt")#put your partition here notes = AudioSegment.from_mp3("recording_notes.mp3")#put your recording here #put the start time (in ms) of each note here startTimes = [4984, 5538, 6071, 6647, 7249, 7736, 8288, 8910, 9485, 9984, 10705, 11322, 11953, 12615, 13210, 13825, 14553, 15237] noteLength = 200 #length of each note (in ms) song = AudioSegment.empty() nbNotes = partition.shape[0] for i in range(partition.shape[1]):#loop over the partition note = AudioSegment.silent(duration=noteLength) for j in range(nbNotes):#loop over the notes if partition[nbNotes-j-1,i]: note = note.overlay(notes[startTimes[j]:startTimes[j]+300]) song += note play(song) #play the partition
the-stack_106_17017
#! /usr/bin/env python # -*- coding: utf-8 -*- """This module contains some variables settings for COCO. These variables are used for producing figures and tables in rungeneric1, -2, and -many. For setting variables dynamically see config.py, where some of the variables here and some """ import os import warnings import numpy as np test = False # debug/test flag, set to False for committing the final version if 1 < 3 and test: np.seterr(all='raise') np.seterr(under='ignore') # ignore underflow #global instancesOfInterest, tabDimsOfInterest, tabValsOfInterest, figValsOfInterest, rldDimsOfInterest, rldValsOfInterest #set_trace() force_assertions = False # another debug flag for time-consuming assertions in_a_hurry = 1000 # [0, 1000] lower resolution, no eps, saves 30% time maxevals_fix_display = None # 3e2 is the expensive setting only used in config, yet to be improved!? runlength_based_targets = 'auto' # 'auto' means automatic choice, otherwise True or False dimensions_to_display = (2, 3, 5, 10, 20, 40) # this could be used to set the dimensions in respective modules generate_svg_files = False # generate the svg figures scaling_figures_with_boxes = True # should replace ppfigdim.dimsBBOB, ppfig2.dimensions, ppfigparam.dimsBBOB? # Variables used in the routines defining desired output for BBOB. tabDimsOfInterest = (5, 20) # dimension which are displayed in the tables target_runlengths_in_scaling_figs = [0.5, 1.2, 3, 10, 50] # used in config target_runlengths_in_table = [0.5, 1.2, 3, 10, 50] # [0.5, 2, 10, 50] # used in config target_runlengths_in_single_rldistr = [0.5, 2, 10, 50] # used in config xlimit_expensive = 1e3 # used in tableconstant_target_function_values = (1e1, 1e0, 1e-1, 1e-3, 1e-5, 1e-7) # used as input for pptables.main in rungenericmany # tableconstant_target_function_values = (1e3, 1e2, 1e1, 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-7) # for post-workshop landscape tables rldValsOfInterest = (10, 1e-1, 1e-4, 1e-8) tabValsOfInterest = (1.0, 1.0e-2, 1.0e-4, 1.0e-6, 1.0e-8) #tabValsOfInterest = (10, 1.0, 1e-1, 1e-3, 1e-5, 1.0e-8) rldDimsOfInterest = (5, 20) figValsOfInterest = (10, 1e-1, 1e-4, 1e-8) # this is a bad name that should improve, which fig, what vals??? # figValsOfInterest = (10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-8) #in/outcomment if desired ##Put backward to have the legend in the same order as the lines. simulated_runlength_bootstrap_sample_size = 10 + 990 / (1 + 10 * max((0, in_a_hurry))) simulated_runlength_bootstrap_sample_size_rld = 10 + 90 / (1 + 10 * max((0, in_a_hurry))) # single_target_pprldistr_values = (10., 1e-1, 1e-4, 1e-8) # used as default in pprldistr.plot method, on graph for each # single_target_function_values = (1e1, 1e0, 1e-1, 1e-2, 1e-4, 1e-6, 1e-8) # one figure for each, seems not in use # summarized_target_function_values = (1e0, 1e-1, 1e-3, 1e-5, 1e-7) # currently not in use, all in one figure (graph?) # summarized_target_function_values = (100, 10, 1, 1e-1, 1e-2, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8) # summarized_target_function_values = tuple(10**np.r_[-8:2:0.2]) # 1e2 and 1e-8 # summarized_target_function_values = tuple(10**numpy.r_[-7:-1:0.2]) # 1e2 and 1e-1 # summarized_target_function_values = [-1, 3] # easy easy # summarized_target_function_values = (10, 1e0, 1e-1) # all in one figure (means what?) # not (yet) in use: pprldmany_target_values = pproc.TargetValues(10**np.arange(-8, 2, 0.2)) # might not work because of cyclic import instancesOfInterest2009 = {1:3, 2:3, 3:3, 4:3, 5:3} # 2009 instances instancesOfInterest2010 = {1:1, 2:1, 3:1, 4:1, 5:1, 6:1, 7:1, 8:1, 9:1, 10:1, 11:1, 12:1, 13:1, 14:1, 15:1} # 2010 instances instancesOfInterest2012 = {1:1, 2:1, 3:1, 4:1, 5:1, 21:1, 22:1, 23:1, 24:1, 25:1, 26:1, 27:1, 28:1, 29:1, 30:1} # 2012 instances instancesOfInterest2013 = {1:1, 2:1, 3:1, 4:1, 5:1, 31:1, 32:1, 33:1, 34:1, 35:1, 36:1, 37:1, 38:1, 39:1, 40:1} # 2013 instances instancesOfInterest = {1:1, 2:1, 3:1, 4:1, 5:1, 41:1, 42:1, 43:1, 44:1, 45:1, 46:1, 47:1, 48:1, 49:1, 50:1} # 2015 instances; only for consistency checking line_styles = [ # used by ppfigs and pprlmany {'marker': 'o', 'markersize': 31, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue' {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta' {'marker': '*', 'markersize': 33, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange' {'marker': 'v', 'markersize': 28, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue' {'marker': 'h', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red' {'marker': '^', 'markersize': 25, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen' # {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'g'}, # 'green' avoid green because of # {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light # {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue # {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed' seems too close to red {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, {'marker': 'H', 'markersize': 23, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray' # {'marker': 'o', 'markersize': 23, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow' {'marker': '3', 'markersize': 23, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow' {'marker': '1', 'markersize': 23, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen' {'marker': 'D', 'markersize': 23, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender' {'marker': '<', 'markersize': 23, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue' close to CornflowerBlue {'marker': 'v', 'markersize': 23, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue' {'marker': '*', 'markersize': 23, 'linestyle': '--', 'color': 'r'}, # 'Red' {'marker': 's', 'markersize': 23, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod' {'marker': 'd', 'markersize': 23, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed' {'marker': '^', 'markersize': 23, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue' {'marker': '<', 'markersize': 23, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange' {'marker': 'h', 'markersize': 23, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta' # {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square, magenta {'marker': 'p', 'markersize': 23, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray' {'marker': 'H', 'markersize': 23, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue' {'marker': '1', 'markersize': 23, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender' {'marker': '2', 'markersize': 23, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen' {'marker': '4', 'markersize': 23, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen' {'marker': '3', 'markersize': 23, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen' {'marker': 'D', 'markersize': 23, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow' ] line_styles_old = [ # used by ppfigs and pprlmany {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'c'}, {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'y'}, {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'}, {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'b'}, {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'r'}, {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'b'}, {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'm'}, {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'c'}, # square {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'y'}, {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'k'}, {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'r'} ] more_old_line_styles = [ # used by ppfigs and pprlmany {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue' {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red' {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed' {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue' {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange' {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta' {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray' {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue' {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender' {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen' {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': '#32cd32'}, # 'LimeGreen' {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen' {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow' #{'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow' {'marker': 'v', 'markersize': 30, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue' {'marker': '*', 'markersize': 31, 'linestyle': '--', 'color': 'r'}, # 'Red' {'marker': 's', 'markersize': 20, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod' {'marker': 'd', 'markersize': 27, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed' {'marker': '^', 'markersize': 26, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue' {'marker': '<', 'markersize': 25, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange' {'marker': 'h', 'markersize': 24, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta' {'marker': 'p', 'markersize': 24, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray' {'marker': 'H', 'markersize': 24, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue' {'marker': '1', 'markersize': 24, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender' {'marker': '2', 'markersize': 24, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen' {'marker': '4', 'markersize': 24, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen' {'marker': '3', 'markersize': 24, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen' {'marker': 'D', 'markersize': 24, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow' ] if 11 < 3: # in case using my own linestyles line_styles = [ # used by ppfigs and pprlmany, to be modified {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, {'marker': 'o', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'b'}, {'marker': '*', 'markersize': 20, 'linestyle': '-', 'color': 'r'}, {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'b'}, {'marker': '^', 'markersize': 26, 'linestyle': '-', 'color': 'r'}, {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'}, {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'b'}, {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'} ] minmax_algorithm_fontsize = [10, 15] # depending on the number of algorithms rcaxeslarger = {"labelsize": 24, "titlesize": 28.8} rcticklarger = {"labelsize": 24} rcfontlarger = {"size": 24} rclegendlarger = {"fontsize": 24} rcaxes = {"labelsize": 20, "titlesize": 24} rctick = {"labelsize": 20} rcfont = {"size": 20} rclegend = {"fontsize": 20} single_algorithm_file_name = 'templateBBOBarticle' two_algorithm_file_name = 'templateBBOBcmp' many_algorithm_file_name = 'templateBBOBmany' latex_commands_for_html = 'latex_commands_for_html' extraction_folder_prefix = '_extracted_' # default settings for rungeneric, rungeneric1, rungeneric2, and rungenericmany inputCrE = 0. isFig = True isTab = True isNoisy = False isNoiseFree = False isConv = False verbose = False outputdir = 'ppdata' inputsettings = 'color' isExpensive = None isRldOnSingleFcts = False isRLDistr = True ## isLogLoss = True # only affects rungeneric1 isPickled = False # only affects rungeneric1 ## isScatter = True # only affects rungeneric2 isScaleUp = True # only affects rungeneric2, only set here and not altered by any command line argument for now # Used by getopt: shortoptlist = "hvpo:" longoptlist = ["help", "output-dir=", "noisy", "noise-free", "tab-only", "fig-only", "rld-only", "rld-single-fcts", "verbose", "settings=", "conv", "expensive", "not-expensive", "runlength-based", "los-only", "crafting-effort=", "pickle", "sca-only", "svg"] # thereby, "los-only", "crafting-effort=", and "pickle" affect only rungeneric1 # and "sca-only" only affects rungeneric2 def getFigFormats(): if in_a_hurry: fig_formats = ('pdf', 'svg') if generate_svg_files else ('pdf',) else: fig_formats = ('eps', 'pdf', 'svg') if generate_svg_files else ('eps', 'pdf') # fig_formats = ('eps', 'pdf', 'pdf', 'png', 'svg') return fig_formats class Testbed(object): """this might become the future way to have settings related to testbeds TODO: should go somewhere else than genericsettings.py TODO: how do we pass information from the benchmark to the post-processing? """ def info(self, fun_number=None): """info on the testbed if ``fun_number is None`` or one-line info for function with number ``fun_number``. """ if fun_number is None: return self.__doc__ for line in open(os.path.join(os.path.abspath(os.path.split(__file__)[0]), self.info_filename)).readlines(): if line.split(): # ie if not empty try: # empty lines are ignored fun = int(line.split()[0]) if fun == fun_number: return 'F'+str(fun) + ' ' + ' '.join(line.split()[1:]) except ValueError: continue # ignore annotations class GECCOBBOBTestbed(Testbed): """Testbed used in the GECCO BBOB workshops 2009, 2010, 2012, 2013, 2015. """ def __init__(self): # TODO: should become a function, as low_budget is a display setting # not a testbed setting # only the short info, how to deal with both infos? self.info_filename = 'GECCOBBOBbenchmarkinfos.txt' # 'benchmarkshortinfos.txt' self.short_names = {} try: info_list = open(os.path.join(os.path.dirname(__file__), 'benchmarkshortinfos.txt'), 'r').read().split('\n') info_dict = {} for info in info_list: key_val = info.split(' ', 1) if len(key_val) > 1: info_dict[int(key_val[0])] = key_val[1] self.short_names = info_dict except: warnings.warn('benchmark infos not found') class GECCOBBOBNoisefreeTestbed(GECCOBBOBTestbed): __doc__ = GECCOBBOBTestbed.__doc__ # TODO: this needs to be set somewhere, e.g. in rungeneric* # or even better by investigating in the data attributes current_testbed = GECCOBBOBNoisefreeTestbed()
the-stack_106_17020
#! /usr/bin/env python2 # -*- coding: utf-8 -*- import sys # Enable dynamic imports sys.path.append(".") from argparse import ArgumentParser from syncdirector import SyncDirector # Publish rdf patch files as resource dumps. # Bundle up to max_files_compressed rdf patch files as successive definitely published resources; # bundle the remainder of rdf patch files as temporary bundled resources. parser = ArgumentParser() # parser arguments: # --source_dir: directory containing files to be synced # --sink_dir: directory where files will be published # --publish_url: public url pointing to sink dir # --builder_class: class to handle the publishing of resources # --max_files_compressed: the maximum number of resource files that should be compressed in one file # --write_separate_manifest: 'y' to write manifest included in published dump also in sink_dir as a separate file # --move_resources: 'y' to move definitely published resources from source_dir to sink_dir, # otherwise simply remove them from resource_dir. parser.add_argument('--source_dir', required=True) parser.add_argument('--sink_dir', required=True) parser.add_argument('--publish_url', required=True) parser.add_argument('--builder_class', default="zipsynchronizer.ZipSynchronizer") parser.add_argument('--max_files_compressed', type=int, default=50000) parser.add_argument('--write_separate_manifest', default="y") parser.add_argument('--move_resources', default="n") args = parser.parse_args() write_separate_manifest = args.write_separate_manifest == "y" move_resources = args.move_resources == "y" director = SyncDirector(args.source_dir, args.sink_dir, args.publish_url, args.builder_class, args.max_files_compressed, write_separate_manifest, move_resources) director.synchronize()
the-stack_106_17022
import math from flask import current_app, request class PagedResult: def __init__(self, items=None, total=None, page_size=None, page_number=None): self.items = items or [] self.total = total or len(self.items) self.page_size = page_size self.page_number = page_number @property def total_pages(self): if self.page_size == 0: return 1 return math.ceil(self.total / self.page_size) @property def has_previous_page(self): return self.page_number > 1 @property def has_next_page(self): return self.page_number < self.total_pages def page_url(self, page_number): if page_number < 1 or page_number > self.total_pages: return None qs = dict(request.args) qs.update({"page_size": self.page_size, "page_number": page_number}) qs = "&".join(f"{k}={v}" for k, v in qs.items()) return f"{request.path}?{qs}" def __repr__(self): return f"<PagedResult items=[{len(self.items)} items] page={self.page_number}>" def __eq__(self, other): if not isinstance(other, self.__class__): raise ValueError("Unsupported operation") return all( [ getattr(self, attr) == getattr(other, attr) for attr in ["items", "total", "page_size", "page_number"] ] ) def paginated_find(ipa, representation, *args, **kwargs): pkey_name = representation.get_ipa_pkey() object_name = representation.ipa_object find_method = getattr(ipa, f"{object_name}_find") # Get parameters from the query string try: page_number = int(request.args.get('page_number')) except (TypeError, ValueError): page_number = 1 try: page_size = int(request.args.get('page_size')) except (TypeError, ValueError): page_size = current_app.config["PAGE_SIZE"] # If we don't want pagination, take a shortcut if page_size == 0: results = find_method(*args, **kwargs, all=True)["result"] return PagedResult( items=[representation(result) for result in results], page_size=page_size, page_number=page_number, ) # Get all primary keys regardless of paging pkeys = find_method(pkey_only=True, *args, **kwargs)["result"] total = len(pkeys) # Find out which items we need for this page first = (page_number - 1) * page_size last = first + page_size pkeys_page = [item[pkey_name][0] for item in pkeys[first:last]] # Batch-request the itemps in the page batch_methods = [ { "method": f"{object_name}_show", "params": [args, {pkey_name: pkey, 'all': True}], } for pkey in pkeys_page ] items = [ representation(result['result']) for result in ipa.batch(methods=batch_methods)['results'] ] return PagedResult( items=items, page_size=page_size, page_number=page_number, total=total, )
the-stack_106_17023
from __future__ import print_function import os try: import io except ImportError: import cStringIO as io import token import tokenize def do_path(pathname): if os.path.isdir(pathname): for rootdir, dirs, files in os.walk(pathname): for file in files: if file.endswith('.py') or file.endswith('.pyx') or file.endswith('.pxd'): path = os.path.join(rootdir, file) print('Doing %s' % (path,)) do_file(path) else: do_file(pathname) def do_file(fname): """ Run on just one file. """ mod = io.StringIO() with open(fname, "r") as source: prev_toktype = token.INDENT first_line = None last_lineno = -1 last_col = 0 tokgen = tokenize.generate_tokens(source.readline) for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen: if 0: # Change to if 1 to see the tokens fly by. print(u'%10s %-14s %-20r %r' % ( tokenize.tok_name.get(toktype, toktype), '%d.%d-%d.%d' % (slineno, scol, elineno, ecol), ttext, ltext )) if isinstance(ttext, bytes): # for Python 2.7 ttext = ttext.decode('utf-8') if slineno > last_lineno: last_col = 0 if scol > last_col: mod.write(u" " * (scol - last_col)) if toktype == token.STRING and prev_toktype == token.INDENT: # Docstring mod.write(u"#--") elif toktype == tokenize.COMMENT: # Comment mod.write(u"##\n") else: mod.write(ttext) prev_toktype = toktype last_col = ecol last_lineno = elineno with open(fname, 'w') as f_out: f_out.write(mod.getvalue())
the-stack_106_17024
# Django settings for stati_project project. import os.path import pinax PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__)) PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) DEBUG = True TEMPLATE_DEBUG = DEBUG # tells Pinax to serve media through django.views.static.serve. SERVE_MEDIA = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASE_USER = '' # Not used with sqlite3. DATABASE_PASSWORD = '' # Not used with sqlite3. DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3. DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3. # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory that holds static files like app media. # Example: "/home/media/media.lawrence.com/apps/" STATIC_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'static') # URL that handles the static files like app media. # Example: "http://media.lawrence.com" STATIC_URL = '/site_media/static/' # Additional directories which hold static files STATICFILES_DIRS = ( ('static_project', os.path.join(PROJECT_ROOT, 'media')), ) # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = '0=b6e8#n2*dn9o)%f(h4go)_onbswji9*a#2tj+st^o3x-ak&b' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'static_project.urls' TEMPLATE_DIRS = ( os.path.join(PROJECT_ROOT, "templates"), ) TEMPLATE_CONTEXT_PROCESSORS = ( "django.core.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.request", "pinax.core.context_processors.pinax_settings", ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'staticfiles', ) # @@@ this shouldn't beed need but are :-( CONTACT_EMAIL = "" SITE_NAME = ""
the-stack_106_17025
#!/usr/bin/env python3 try: import polyinterface except ImportError: import pgc_interface as polyinterface import sys import requests LOGGER = polyinterface.LOGGER class LinkTap: def __init__(self, username, apiKey): self.base_url = 'https://www.link-tap.com/api/' self.username = username self.apiKey = apiKey def call_api(self, url, payload): try: r = requests.post(url, data=payload) if r.status_code == requests.codes.ok: data = r.json() if data['result'] == 'error': return 'error' elif data is None: return 'error' else: return data else: return 'error' except requests.exceptions.RequestException: LOGGER.info("Request failed: RequestException") pass except socket.gaierror: LOGGER.info("Request failed: gaierror Name does not resolve") pass except urllib3.exceptions.NewConnectionError: LOGGER.info("Request failed: NewConnectionError") pass except urllib3.exceptions.MaxRetryError: LOGGER.info("Request failed: MaxRetryError") pass except requests.exceptions.ConnectionError: LOGGER.info("Request failed: ConnectionError") pass def activate_instant_mode(self, gatewayId, taplinkerId, action, duration, eco): url = self.base_url + 'activateInstantMode' # autoBack: Re-activate watering plan after Instant Mode auto_back = "true" if action: action = "true" else: action = "false" if eco: eco = "true" else: eco = "false" payload = {'username': self.username, 'apiKey': self.apiKey, 'gatewayId': gatewayId, 'taplinkerId': taplinkerId, 'action': action, 'duration': duration, 'eco': eco, 'autoBack': auto_back, } ret = self.call_api(url, payload) return ret def activate_interval_mode(self, gatewayId, taplinkerId): url = self.base_url + 'activateIntervalMode' payload = {'username': self.username, 'apiKey': self.apiKey, 'gatewayId': gatewayId, 'taplinkerId': taplinkerId } ret = self.call_api(url, payload) return ret def activate_odd_even_mode(self, gatewayId, taplinkerId): url = self.base_url + 'activateOddEvenMode' payload = {'username': self.username, 'apiKey': self.apiKey, 'gatewayId': gatewayId, 'taplinkerId': taplinkerId } ret = self.call_api(url, payload) return ret def activate_seven_day_mode(self, gatewayId, taplinkerId): url = self.base_url + 'activateSevenDayMode' payload = {'username': self.username, 'apiKey': self.apiKey, 'gatewayId': gatewayId, 'taplinkerId': taplinkerId } ret = self.call_api(url, payload) return ret def activate_month_mode(self, gatewayId, taplinkerId): url = self.base_url + 'activateMonthMode' payload = {'username': self.username, 'apiKey': self.apiKey, 'gatewayId': gatewayId, 'taplinkerId': taplinkerId } ret = self.call_api(url, payload) return ret def get_all_devices(self): url = self.base_url + 'getAllDevices' payload = {'username': self.username, 'apiKey': self.apiKey} ret = self.call_api(url, payload) return ret def get_watering_status(self, taplinkerId): url = self.base_url + 'getWateringStatus' payload = {'username': self.username, 'apiKey': self.apiKey, 'taplinkerId': taplinkerId } ret = self.call_api(url, payload) return ret if __name__ == "__main__": try: import json with open('test_data.json') as json_file: all_devices = json.load(json_file) for ctl in all_devices['devices']: print('Name: ' + ctl['name']) print('Gateway ID: ' + ctl['gatewayId']) print('ISY GW ID: ' + ctl['gatewayId'][0:8].lower()) for tl in ctl['taplinker']: print('TL Name: ' + tl['taplinkerName']) print('TL ID: ' + tl['taplinkerId'][0:8].lower()) for gw in all_devices['devices']: for tl in gw['taplinker']: if tl['taplinkerId'][0:8].lower(): if tl['status'] == 'Connected': print("setting driver ON") else: print("setting driver OFF") if tl['watering'] is not None: for i in tl['watering']: print(i) print(tl['watering'][i]) except (KeyboardInterrupt, SystemExit): sys.exit(0)
the-stack_106_17026
# -*- encoding: UTF-8 - import logging import filecmp import os import re import shutil from time import sleep from zipfile import ZipFile from django.conf import settings from django.contrib.auth.models import AnonymousUser from django.core.management.base import BaseCommand, CommandError from django.db.models import Q from django.http import StreamingHttpResponse from django.test.client import RequestFactory from django.utils import translation, timezone from django.utils.translation import ugettext as _ from landez import TilesManager from landez.sources import DownloadError from geotrek.common.models import FileType # NOQA from geotrek.altimetry.views import ElevationProfile, ElevationArea, serve_elevation_chart from geotrek.common import models as common_models from geotrek.common.views import ThemeViewSet from geotrek.core.views import ParametersView from geotrek.feedback.views import CategoryList as FeedbackCategoryList from geotrek.flatpages.models import FlatPage from geotrek.flatpages.views import FlatPageViewSet, FlatPageMeta from geotrek.infrastructure import models as infrastructure_models from geotrek.infrastructure.views import InfrastructureViewSet from geotrek.signage.views import SignageViewSet from geotrek.tourism import models as tourism_models from geotrek.tourism import views as tourism_views from geotrek.trekking import models as trekking_models from geotrek.trekking.views import (TrekViewSet, POIViewSet, TrekPOIViewSet, TrekGPXDetail, TrekKMLDetail, TrekServiceViewSet, ServiceViewSet, TrekDocumentPublic, TrekMeta, Meta, TrekInfrastructureViewSet, TrekSignageViewSet,) if 'geotrek.sensitivity' in settings.INSTALLED_APPS: from geotrek.sensitivity import models as sensitivity_models from geotrek.sensitivity import views as sensitivity_views # Register mapentity models from geotrek.trekking import urls # NOQA from geotrek.tourism import urls # NOQA logger = logging.getLogger(__name__) class ZipTilesBuilder(object): def __init__(self, filepath, close_zip, **builder_args): builder_args['tile_format'] = self.format_from_url(builder_args['tiles_url']) self.close_zip = close_zip self.zipfile = ZipFile(filepath, 'w') self.tm = TilesManager(**builder_args) if not isinstance(settings.MOBILE_TILES_URL, str) and len(settings.MOBILE_TILES_URL) > 1: for url in settings.MOBILE_TILES_URL[1:]: args = builder_args args['tiles_url'] = url args['tile_format'] = self.format_from_url(args['tiles_url']) self.tm.add_layer(TilesManager(**args), opacity=1) self.tiles = set() def format_from_url(self, url): """ Try to guess the tile mime type from the tiles URL. Should work with basic stuff like `http://osm.org/{z}/{x}/{y}.png` or funky stuff like WMTS (`http://server/wmts?LAYER=...FORMAT=image/jpeg...) """ m = re.search(r'FORMAT=([a-zA-Z/]+)&', url) if m: return m.group(1) return url.rsplit('.')[-1] def add_coverage(self, bbox, zoomlevels): self.tiles |= set(self.tm.tileslist(bbox, zoomlevels)) def run(self): for tile in self.tiles: name = '{0}/{1}/{2}{ext}'.format(*tile, ext=settings.MOBILE_TILES_EXTENSION or self.tm._tile_extension) try: data = self.tm.tile(tile) except DownloadError: logger.warning("Failed to download tile %s" % name) else: self.zipfile.writestr(name, data) self.close_zip(self.zipfile) class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('path') parser.add_argument('--url', '-u', dest='url', default='http://localhost', help='Base url') parser.add_argument('--rando-url', '-r', dest='rando_url', default='http://localhost', help='Base url of public rando site') parser.add_argument('--source', '-s', dest='source', default=None, help='Filter by source(s)') parser.add_argument('--portal', '-P', dest='portal', default=None, help='Filter by portal(s)') parser.add_argument('--skip-pdf', '-p', action='store_true', dest='skip_pdf', default=False, help='Skip generation of PDF files') parser.add_argument('--skip-tiles', '-t', action='store_true', dest='skip_tiles', default=False, help='Skip generation of zip tiles files') parser.add_argument('--skip-dem', '-d', action='store_true', dest='skip_dem', default=False, help='Skip generation of DEM files for 3D') parser.add_argument('--skip-profile-png', '-e', action='store_true', dest='skip_profile_png', default=False, help='Skip generation of PNG elevation profile'), parser.add_argument('--languages', '-l', dest='languages', default='', help='Languages to sync') parser.add_argument('--with-touristicevents', '-w', action='store_true', dest='with_events', default=False, help='include touristic events') parser.add_argument('--with-touristiccontent-categories', '-c', dest='content_categories', default=None, help='include touristic contents ' '(filtered by category ID ex: --with-touristiccontent-categories="1,2,3")'), parser.add_argument('--with-signages', '-g', action='store_true', dest='with_signages', default=False, help='include signages') parser.add_argument('--with-infrastructures', '-i', action='store_true', dest='with_infrastructures', default=False, help='include infrastructures') def mkdirs(self, name): dirname = os.path.dirname(name) if not os.path.exists(dirname): os.makedirs(dirname) def sync_global_tiles(self): """ Creates a tiles file on the global extent. """ zipname = os.path.join('zip', 'tiles', 'global.zip') if self.verbosity == 2: self.stdout.write(u"\x1b[36m**\x1b[0m \x1b[1m{name}\x1b[0m ...".format(name=zipname), ending="") self.stdout.flush() global_extent = settings.LEAFLET_CONFIG['SPATIAL_EXTENT'] logger.info("Global extent is %s" % unicode(global_extent)) global_file = os.path.join(self.tmp_root, zipname) logger.info("Build global tiles file...") self.mkdirs(global_file) def close_zip(zipfile): return self.close_zip(zipfile, zipname) tiles = ZipTilesBuilder(global_file, close_zip, **self.builder_args) tiles.add_coverage(bbox=global_extent, zoomlevels=settings.MOBILE_TILES_GLOBAL_ZOOMS) tiles.run() def sync_trek_tiles(self, trek): """ Creates a tiles file for the specified Trek object. """ zipname = os.path.join('zip', 'tiles', '{pk}.zip'.format(pk=trek.pk)) if self.verbosity == 2: self.stdout.write(u"\x1b[36m**\x1b[0m \x1b[1m{name}\x1b[0m ...".format(name=zipname), ending="") self.stdout.flush() trek_file = os.path.join(self.tmp_root, zipname) def _radius2bbox(lng, lat, radius): return (lng - radius, lat - radius, lng + radius, lat + radius) self.mkdirs(trek_file) def close_zip(zipfile): return self.close_zip(zipfile, zipname) tiles = ZipTilesBuilder(trek_file, close_zip, **self.builder_args) geom = trek.geom if geom.geom_type == 'MultiLineString': geom = geom[0] # FIXME geom.transform(4326) for (lng, lat) in geom.coords: large = _radius2bbox(lng, lat, settings.MOBILE_TILES_RADIUS_LARGE) small = _radius2bbox(lng, lat, settings.MOBILE_TILES_RADIUS_SMALL) tiles.add_coverage(bbox=large, zoomlevels=settings.MOBILE_TILES_LOW_ZOOMS) tiles.add_coverage(bbox=small, zoomlevels=settings.MOBILE_TILES_HIGH_ZOOMS) tiles.run() def sync_view(self, lang, view, name, url='/', params={}, zipfile=None, fix2028=False, **kwargs): if self.verbosity == 2: self.stdout.write(u"\x1b[36m{lang}\x1b[0m \x1b[1m{name}\x1b[0m ...".format(lang=lang, name=name), ending="") self.stdout.flush() fullname = os.path.join(self.tmp_root, name) self.mkdirs(fullname) request = self.factory.get(url, params, HTTP_HOST=self.host) request.LANGUAGE_CODE = lang request.user = AnonymousUser() try: response = view(request, **kwargs) if hasattr(response, 'render'): response.render() except Exception as e: self.successfull = False if self.verbosity == 2: self.stdout.write(u"\x1b[3D\x1b[31mfailed ({})\x1b[0m".format(e)) return if response.status_code != 200: self.successfull = False if self.verbosity == 2: self.stdout.write(u"\x1b[3D\x1b[31;1mfailed (HTTP {code})\x1b[0m".format(code=response.status_code)) return f = open(fullname, 'w') if isinstance(response, StreamingHttpResponse): content = b''.join(response.streaming_content) else: content = response.content # Fix strange unicode characters 2028 and 2029 that make Geotrek-mobile crash if fix2028: content = content.replace('\\u2028', '\\n') content = content.replace('\\u2029', '\\n') f.write(content) f.close() oldfilename = os.path.join(self.dst_root, name) # If new file is identical to old one, don't recreate it. This will help backup if os.path.isfile(oldfilename) and filecmp.cmp(fullname, oldfilename): os.unlink(fullname) os.link(oldfilename, fullname) if self.verbosity == 2: self.stdout.write(u"\x1b[3D\x1b[32munchanged\x1b[0m") else: if self.verbosity == 2: self.stdout.write(u"\x1b[3D\x1b[32mgenerated\x1b[0m") # FixMe: Find why there are duplicate files. if zipfile: if name not in zipfile.namelist(): zipfile.write(fullname, name) def sync_json(self, lang, viewset, name, zipfile=None, params={}, as_view_args=[], **kwargs): view = viewset.as_view(*as_view_args) name = os.path.join('api', lang, '{name}.json'.format(name=name)) if self.source: params['source'] = ','.join(self.source) if self.portal: params['portal'] = ','.join(self.portal) self.sync_view(lang, view, name, params=params, zipfile=zipfile, fix2028=True, **kwargs) def sync_geojson(self, lang, viewset, name, zipfile=None, params={}, **kwargs): view = viewset.as_view({'get': 'list'}) name = os.path.join('api', lang, name) params.update({'format': 'geojson'}) if self.source: params['source'] = ','.join(self.source) elif 'source' in params.keys(): # bug source is still in cache when executing command del params['source'] if self.portal: params['portal'] = ','.join(self.portal) elif 'portal' in params.keys(): del params['portal'] self.sync_view(lang, view, name, params=params, zipfile=zipfile, fix2028=True, **kwargs) def sync_trek_infrastructures(self, lang, trek, zipfile=None): params = {'format': 'geojson'} view = TrekInfrastructureViewSet.as_view({'get': 'list'}) name = os.path.join('api', lang, 'treks', str(trek.pk), 'infrastructures.geojson') self.sync_view(lang, view, name, params=params, zipfile=zipfile, pk=trek.pk) def sync_trek_signages(self, lang, trek, zipfile=None): params = {'format': 'geojson'} view = TrekSignageViewSet.as_view({'get': 'list'}) name = os.path.join('api', lang, 'treks', str(trek.pk), 'signages.geojson') self.sync_view(lang, view, name, params=params, zipfile=zipfile, pk=trek.pk) def sync_trek_pois(self, lang, trek, zipfile=None): params = {'format': 'geojson'} if settings.ZIP_TOURISTIC_CONTENTS_AS_POI: view = tourism_views.TrekTouristicContentAndPOIViewSet.as_view({'get': 'list'}) name = os.path.join('api', lang, 'treks', str(trek.pk), 'pois.geojson') self.sync_view(lang, view, name, params=params, zipfile=zipfile, pk=trek.pk) view = TrekPOIViewSet.as_view({'get': 'list'}) self.sync_view(lang, view, name, params=params, zipfile=None, pk=trek.pk) else: view = TrekPOIViewSet.as_view({'get': 'list'}) name = os.path.join('api', lang, 'treks', str(trek.pk), 'pois.geojson') self.sync_view(lang, view, name, params=params, zipfile=zipfile, pk=trek.pk) def sync_trek_services(self, lang, trek, zipfile=None): view = TrekServiceViewSet.as_view({'get': 'list'}) name = os.path.join('api', lang, 'treks', str(trek.pk), 'services.geojson') self.sync_view(lang, view, name, params={'format': 'geojson'}, zipfile=zipfile, pk=trek.pk) def sync_object_view(self, lang, obj, view, basename_fmt, zipfile=None, params={}, **kwargs): modelname = obj._meta.model_name name = os.path.join('api', lang, '{modelname}s'.format(modelname=modelname), str(obj.pk), basename_fmt.format(obj=obj)) self.sync_view(lang, view, name, params=params, zipfile=zipfile, pk=obj.pk, **kwargs) def sync_trek_pdf(self, lang, obj): if self.skip_pdf: return view = TrekDocumentPublic.as_view(model=type(obj)) params = {} if self.source: params['source'] = self.source[0] if self.portal: params['portal'] = ','.join(self.portal) self.sync_object_view(lang, obj, view, '{obj.slug}.pdf', params=params) def sync_profile_json(self, lang, obj, zipfile=None): view = ElevationProfile.as_view(model=type(obj)) self.sync_object_view(lang, obj, view, 'profile.json', zipfile=zipfile) def sync_profile_png(self, lang, obj, zipfile=None): view = serve_elevation_chart model_name = type(obj)._meta.model_name self.sync_object_view(lang, obj, view, 'profile.png', zipfile=zipfile, model_name=model_name, from_command=True) def sync_dem(self, lang, obj): if self.skip_dem: return view = ElevationArea.as_view(model=type(obj)) self.sync_object_view(lang, obj, view, 'dem.json') def sync_gpx(self, lang, obj): self.sync_object_view(lang, obj, TrekGPXDetail.as_view(), '{obj.slug}.gpx') def sync_kml(self, lang, obj): self.sync_object_view(lang, obj, TrekKMLDetail.as_view(), '{obj.slug}.kml') def sync_meta(self, lang): name = os.path.join('meta', lang, 'index.html') self.sync_view(lang, Meta.as_view(), name, params={'rando_url': self.rando_url, 'lang': lang}) def sync_trek_meta(self, lang, obj): name = os.path.join('meta', lang, obj.rando_url, 'index.html') self.sync_view(lang, TrekMeta.as_view(), name, pk=obj.pk, params={'rando_url': self.rando_url}) def sync_touristiccontent_meta(self, lang, obj): name = os.path.join('meta', lang, obj.rando_url, 'index.html') self.sync_view(lang, tourism_views.TouristicContentMeta.as_view(), name, pk=obj.pk, params={'rando_url': self.rando_url}) def sync_touristicevent_meta(self, lang, obj): name = os.path.join('meta', lang, obj.rando_url, 'index.html') self.sync_view(lang, tourism_views.TouristicEventMeta.as_view(), name, pk=obj.pk, params={'rando_url': self.rando_url}) def sync_file(self, lang, name, src_root, url, zipfile=None): url = url.strip('/') src = os.path.join(src_root, name) dst = os.path.join(self.tmp_root, url, name) self.mkdirs(dst) if not os.path.isfile(dst): os.link(src, dst) if zipfile: zipfile.write(dst, os.path.join(url, name)) if self.verbosity == 2: self.stdout.write(u"\x1b[36m{lang}\x1b[0m \x1b[1m{url}/{name}\x1b[0m \x1b[32mcopied\x1b[0m".format(lang=lang, url=url, name=name)) def sync_static_file(self, lang, name): self.sync_file(lang, name, settings.STATIC_ROOT, settings.STATIC_URL) def sync_media_file(self, lang, field, zipfile=None): if field and field.name: self.sync_file(lang, field.name, settings.MEDIA_ROOT, settings.MEDIA_URL, zipfile=zipfile) def sync_pictograms(self, lang, model, zipfile=None): for obj in model.objects.all(): self.sync_media_file(lang, obj.pictogram, zipfile=zipfile) def sync_poi_media(self, lang, poi): if poi.resized_pictures: self.sync_media_file(lang, poi.resized_pictures[0][1], zipfile=self.trek_zipfile) for picture, resized in poi.resized_pictures[1:]: self.sync_media_file(lang, resized) for other_file in poi.files: self.sync_media_file(lang, other_file.attachment_file) def sync_trek(self, lang, trek): zipname = os.path.join('zip', 'treks', lang, '{pk}.zip'.format(pk=trek.pk)) zipfullname = os.path.join(self.tmp_root, zipname) self.mkdirs(zipfullname) self.trek_zipfile = ZipFile(zipfullname, 'w') self.sync_json(lang, ParametersView, 'parameters', zipfile=self.zipfile) self.sync_json(lang, ThemeViewSet, 'themes', as_view_args=[{'get': 'list'}], zipfile=self.zipfile) self.sync_trek_pois(lang, trek, zipfile=self.zipfile) if self.with_infrastructures: self.sync_trek_infrastructures(lang, trek) if self.with_signages: self.sync_trek_signages(lang, trek) self.sync_trek_services(lang, trek, zipfile=self.zipfile) self.sync_gpx(lang, trek) self.sync_kml(lang, trek) self.sync_trek_meta(lang, trek) self.sync_trek_pdf(lang, trek) self.sync_profile_json(lang, trek) if not self.skip_profile_png: self.sync_profile_png(lang, trek, zipfile=self.zipfile) self.sync_dem(lang, trek) for desk in trek.information_desks.all(): self.sync_media_file(lang, desk.thumbnail, zipfile=self.trek_zipfile) for poi in trek.published_pois: self.sync_poi_media(lang, poi) if settings.ZIP_TOURISTIC_CONTENTS_AS_POI: for content in trek.published_touristic_contents: if content.resized_pictures: self.sync_media_file(lang, content.resized_pictures[0][1], zipfile=self.trek_zipfile) self.sync_media_file(lang, trek.thumbnail, zipfile=self.zipfile) for picture, resized in trek.resized_pictures: self.sync_media_file(lang, resized, zipfile=self.trek_zipfile) if self.with_events: self.sync_trek_touristicevents(lang, trek, zipfile=self.zipfile) if self.categories: self.sync_trek_touristiccontents(lang, trek, zipfile=self.zipfile) if 'geotrek.sensitivity' in settings.INSTALLED_APPS: self.sync_trek_sensitiveareas(lang, trek) if self.verbosity == 2: self.stdout.write(u"\x1b[36m{lang}\x1b[0m \x1b[1m{name}\x1b[0m ...".format(lang=lang, name=zipname), ending="") self.close_zip(self.trek_zipfile, zipname) def close_zip(self, zipfile, name): oldzipfilename = os.path.join(self.dst_root, name) zipfilename = os.path.join(self.tmp_root, name) try: oldzipfile = ZipFile(oldzipfilename, 'r') except IOError: uptodate = False else: old = set([(zi.filename, zi.CRC) for zi in oldzipfile.infolist()]) new = set([(zi.filename, zi.CRC) for zi in zipfile.infolist()]) uptodate = (old == new) oldzipfile.close() zipfile.close() if uptodate: stat = os.stat(oldzipfilename) os.utime(zipfilename, (stat.st_atime, stat.st_mtime)) if self.verbosity == 2: if uptodate: self.stdout.write(u"\x1b[3D\x1b[32munchanged\x1b[0m") else: self.stdout.write(u"\x1b[3D\x1b[32mzipped\x1b[0m") def sync_flatpages(self, lang): self.sync_geojson(lang, FlatPageViewSet, 'flatpages.geojson', zipfile=self.zipfile) flatpages = FlatPage.objects.filter(published=True) if self.source: flatpages = flatpages.filter(source__name__in=self.source) if self.portal: flatpages = flatpages.filter(Q(portal__name__in=self.portal) | Q(portal=None)) for flatpage in flatpages: name = os.path.join('meta', lang, flatpage.rando_url, 'index.html') self.sync_view(lang, FlatPageMeta.as_view(), name, pk=flatpage.pk, params={'rando_url': self.rando_url}) def sync_trekking(self, lang): zipname = os.path.join('zip', 'treks', lang, 'global.zip') zipfullname = os.path.join(self.tmp_root, zipname) self.mkdirs(zipfullname) self.zipfile = ZipFile(zipfullname, 'w') self.sync_geojson(lang, TrekViewSet, 'treks.geojson', zipfile=self.zipfile) self.sync_geojson(lang, POIViewSet, 'pois.geojson') if self.with_infrastructures: self.sync_geojson(lang, InfrastructureViewSet, 'infrastructures.geojson') self.sync_static_file(lang, 'infrastructure/picto-infrastructure.png') if self.with_signages: self.sync_geojson(lang, SignageViewSet, 'signages.geojson') self.sync_static_file(lang, 'signage/picto-signage.png') if 'geotrek.flatpages' in settings.INSTALLED_APPS: self.sync_flatpages(lang) self.sync_geojson(lang, ServiceViewSet, 'services.geojson', zipfile=self.zipfile) self.sync_view(lang, FeedbackCategoryList.as_view(), os.path.join('api', lang, 'feedback', 'categories.json'), zipfile=self.zipfile) self.sync_static_file(lang, 'trekking/trek.svg') self.sync_static_file(lang, 'trekking/itinerancy.svg') self.sync_pictograms(lang, common_models.Theme, zipfile=self.zipfile) self.sync_pictograms(lang, common_models.RecordSource, zipfile=self.zipfile) if self.with_signages or self.with_infrastructures: self.sync_pictograms(lang, infrastructure_models.InfrastructureType) self.sync_pictograms(lang, trekking_models.TrekNetwork, zipfile=self.zipfile) self.sync_pictograms(lang, trekking_models.Practice, zipfile=self.zipfile) self.sync_pictograms(lang, trekking_models.Accessibility, zipfile=self.zipfile) self.sync_pictograms(lang, trekking_models.DifficultyLevel, zipfile=self.zipfile) self.sync_pictograms(lang, trekking_models.POIType, zipfile=self.zipfile) self.sync_pictograms(lang, trekking_models.ServiceType, zipfile=self.zipfile) self.sync_pictograms(lang, trekking_models.Route, zipfile=self.zipfile) self.sync_pictograms(lang, trekking_models.WebLinkCategory) if settings.ZIP_TOURISTIC_CONTENTS_AS_POI: self.sync_pictograms('**', tourism_models.TouristicContentCategory, zipfile=self.zipfile) treks = trekking_models.Trek.objects.existing().order_by('pk') treks = treks.filter( Q(**{'published_{lang}'.format(lang=lang): True}) | Q(**{'trek_parents__parent__published_{lang}'.format(lang=lang): True, 'trek_parents__parent__deleted': False}) ) if self.source: treks = treks.filter(source__name__in=self.source) if self.portal: treks = treks.filter(Q(portal__name__in=self.portal) | Q(portal=None)) for trek in treks: self.sync_trek(lang, trek) self.sync_tourism(lang) self.sync_meta(lang) if 'geotrek.sensitivity' in settings.INSTALLED_APPS: self.sync_sensitiveareas(lang) if self.verbosity == 2: self.stdout.write(u"\x1b[36m{lang}\x1b[0m \x1b[1m{name}\x1b[0m ...".format(lang=lang, name=zipname), ending="") self.close_zip(self.zipfile, zipname) def sync_tiles(self): if not self.skip_tiles: if self.celery_task: self.celery_task.update_state( state='PROGRESS', meta={ 'name': self.celery_task.name, 'current': 10, 'total': 100, 'infos': u"{}".format(_(u"Global tiles syncing ...")) } ) self.sync_global_tiles() if self.celery_task: self.celery_task.update_state( state='PROGRESS', meta={ 'name': self.celery_task.name, 'current': 20, 'total': 100, 'infos': u"{}".format(_(u"Trek tiles syncing ...")) } ) treks = trekking_models.Trek.objects.existing().order_by('pk') if self.source: treks = treks.filter(source__name__in=self.source) if self.portal: treks = treks.filter(Q(portal__name__in=self.portal) | Q(portal=None)) for trek in treks: if trek.any_published or any([parent.any_published for parent in trek.parents]): self.sync_trek_tiles(trek) if self.celery_task: self.celery_task.update_state( state='PROGRESS', meta={ 'name': self.celery_task.name, 'current': 30, 'total': 100, 'infos': u"{}".format(_(u"Tiles synced ...")) } ) def sync_content(self, lang, content): self.sync_touristiccontent_meta(lang, content) if not self.skip_pdf: params = {} if self.source: params['source'] = self.source[0] view = tourism_views.TouristicContentDocumentPublic.as_view(model=type(content)) self.sync_object_view(lang, content, view, '{obj.slug}.pdf', params=params) for picture, resized in content.resized_pictures: self.sync_media_file(lang, resized) def sync_event(self, lang, event): self.sync_touristicevent_meta(lang, event) if not self.skip_pdf: params = {} if self.source: params['source'] = self.source[0] if self.portal: params['portal'] = self.portal[0] view = tourism_views.TouristicEventDocumentPublic.as_view(model=type(event)) self.sync_object_view(lang, event, view, '{obj.slug}.pdf', params=params) for picture, resized in event.resized_pictures: self.sync_media_file(lang, resized) def sync_sensitiveareas(self, lang): self.sync_geojson(lang, sensitivity_views.SensitiveAreaViewSet, 'sensitiveareas.geojson', params={'practices': 'Terrestre'}) for area in sensitivity_models.SensitiveArea.objects.existing().filter(published=True): name = os.path.join('api', lang, 'sensitiveareas', '{obj.pk}.kml'.format(obj=area)) self.sync_view(lang, sensitivity_views.SensitiveAreaKMLDetail.as_view(), name, pk=area.pk) self.sync_media_file(lang, area.species.pictogram) def sync_trek_sensitiveareas(self, lang, trek): params = {'format': 'geojson', 'practices': 'Terrestre'} view = sensitivity_views.TrekSensitiveAreaViewSet.as_view({'get': 'list'}) name = os.path.join('api', lang, 'treks', str(trek.pk), 'sensitiveareas.geojson') self.sync_view(lang, view, name, params=params, pk=trek.pk) def sync_tourism(self, lang): self.sync_geojson(lang, tourism_views.TouristicContentViewSet, 'touristiccontents.geojson') self.sync_geojson(lang, tourism_views.TouristicEventViewSet, 'touristicevents.geojson', params={'ends_after': timezone.now().strftime('%Y-%m-%d')}) # picto touristic events self.sync_file(lang, os.path.join('tourism', 'touristicevent.svg'), settings.STATIC_ROOT, settings.STATIC_URL, zipfile=self.zipfile) # json with params = {} if self.categories: params.update({'categories': ','.join(category for category in self.categories), }) if self.with_events: params.update({'events': '1'}) self.sync_json(lang, tourism_views.TouristicCategoryView, 'touristiccategories', zipfile=self.zipfile, params=params) # pictos touristic content catgories for category in tourism_models.TouristicContentCategory.objects.all(): self.sync_media_file(lang, category.pictogram, zipfile=self.zipfile) contents = tourism_models.TouristicContent.objects.existing().order_by('pk') contents = contents.filter(**{'published_{lang}'.format(lang=lang): True}) if self.source: contents = contents.filter(source__name__in=self.source) if self.portal: contents = contents.filter(Q(portal__name__in=self.portal) | Q(portal=None)) for content in contents: self.sync_content(lang, content) events = tourism_models.TouristicEvent.objects.existing().order_by('pk') events = events.filter(**{'published_{lang}'.format(lang=lang): True}) if self.source: events = events.filter(source__name__in=self.source) if self.portal: events = events.filter(Q(portal__name__in=self.portal) | Q(portal=None)) for event in events: self.sync_event(lang, event) # Information desks self.sync_geojson(lang, tourism_views.InformationDeskViewSet, 'information_desks.geojson') for pk in tourism_models.InformationDeskType.objects.values_list('pk', flat=True): name = 'information_desks-{}.geojson'.format(pk) self.sync_geojson(lang, tourism_views.InformationDeskViewSet, name, type=pk) for desk in tourism_models.InformationDesk.objects.all(): self.sync_media_file(lang, desk.thumbnail) def sync_trek_touristiccontents(self, lang, trek, zipfile=None): params = {'format': 'geojson', 'categories': ','.join(category for category in self.categories)} if self.portal: params['portal'] = ','.join(portal for portal in self.portal) view = tourism_views.TrekTouristicContentViewSet.as_view({'get': 'list'}) name = os.path.join('api', lang, 'treks', str(trek.pk), 'touristiccontents.geojson') self.sync_view(lang, view, name, params=params, zipfile=zipfile, pk=trek.pk) for content in trek.touristic_contents.all(): self.sync_touristiccontent_media(lang, content, zipfile=self.trek_zipfile) def sync_trek_touristicevents(self, lang, trek, zipfile=None): params = {'format': 'geojson', 'portal': ','.join(portal for portal in self.portal)} view = tourism_views.TrekTouristicEventViewSet.as_view({'get': 'list'}) name = os.path.join('api', lang, 'treks', str(trek.pk), 'touristicevents.geojson') self.sync_view(lang, view, name, params=params, zipfile=zipfile, pk=trek.pk) for event in trek.touristic_events.all(): self.sync_touristicevent_media(lang, event, zipfile=self.trek_zipfile) def sync_touristicevent_media(self, lang, event, zipfile=None): if event.resized_pictures: self.sync_media_file(lang, event.resized_pictures[0][1], zipfile=zipfile) for picture, resized in event.resized_pictures[1:]: self.sync_media_file(lang, resized) def sync_touristiccontent_media(self, lang, content, zipfile=None): if content.resized_pictures: self.sync_media_file(lang, content.resized_pictures[0][1], zipfile=zipfile) for picture, resized in content.resized_pictures[1:]: self.sync_media_file(lang, resized) def sync(self): self.sync_tiles() step_value = int(50 / len(settings.MODELTRANSLATION_LANGUAGES)) current_value = 30 for lang in self.languages: if self.celery_task: self.celery_task.update_state( state='PROGRESS', meta={ 'name': self.celery_task.name, 'current': current_value + step_value, 'total': 100, 'infos': u"{} : {} ...".format(_(u"Language"), lang) } ) current_value = current_value + step_value translation.activate(lang) self.sync_trekking(lang) translation.deactivate() self.sync_static_file('**', 'tourism/touristicevent.svg') self.sync_pictograms('**', tourism_models.InformationDeskType) self.sync_pictograms('**', tourism_models.TouristicContentCategory) self.sync_pictograms('**', tourism_models.TouristicContentType) self.sync_pictograms('**', tourism_models.TouristicEventType) def check_dst_root_is_empty(self): if not os.path.exists(self.dst_root): return existing = set([os.path.basename(p) for p in os.listdir(self.dst_root)]) remaining = existing - set(('api', 'media', 'meta', 'static', 'zip')) if remaining: raise CommandError(u"Destination directory contains extra data") def rename_root(self): if os.path.exists(self.dst_root): tmp_root2 = os.path.join(os.path.dirname(self.dst_root), 'deprecated_sync_rando') os.rename(self.dst_root, tmp_root2) os.rename(self.tmp_root, self.dst_root) shutil.rmtree(tmp_root2) else: os.rename(self.tmp_root, self.dst_root) def handle(self, *args, **options): self.successfull = True self.verbosity = options['verbosity'] self.dst_root = options["path"].rstrip('/') self.check_dst_root_is_empty() if options['url'][:7] not in ('http://', 'https://'): raise CommandError('url parameter should start with http:// or https://') self.referer = options['url'] self.host = self.referer.split('://')[1] self.rando_url = options['rando_url'] if self.rando_url.endswith('/'): self.rando_url = self.rando_url[:-1] self.factory = RequestFactory() self.tmp_root = os.path.join(os.path.dirname(self.dst_root), 'tmp_sync_rando') os.mkdir(self.tmp_root) self.skip_pdf = options['skip_pdf'] self.skip_tiles = options['skip_tiles'] self.skip_dem = options['skip_dem'] self.skip_profile_png = options['skip_profile_png'] self.source = options['source'] if options['languages']: self.languages = options['languages'].split(',') else: self.languages = settings.MODELTRANSLATION_LANGUAGES self.with_events = options.get('with_events', False) self.categories = None if options.get('content_categories', u""): self.categories = options.get('content_categories', u"").split(',') self.with_signages = options.get('with_signages', False) self.with_infrastructures = options.get('with_infrastructures', False) self.celery_task = options.get('task', None) if self.source is not None: self.source = self.source.split(',') if options['portal'] is not None: self.portal = options['portal'].split(',') else: self.portal = [] if isinstance(settings.MOBILE_TILES_URL, str): tiles_url = settings.MOBILE_TILES_URL else: tiles_url = settings.MOBILE_TILES_URL[0] self.builder_args = { 'tiles_url': tiles_url, 'tiles_headers': {"Referer": self.referer}, 'ignore_errors': True, 'tiles_dir': os.path.join(settings.DEPLOY_ROOT, 'var', 'tiles'), } try: self.sync() if self.celery_task: self.celery_task.update_state( state='PROGRESS', meta={ 'name': self.celery_task.name, 'current': 100, 'total': 100, 'infos': u"{}".format(_(u"Sync ended")) } ) except Exception: shutil.rmtree(self.tmp_root) raise self.rename_root() done_message = 'Done' if self.successfull: done_message = self.style.SUCCESS(done_message) if self.verbosity >= 1: self.stdout.write(done_message) if not self.successfull: raise CommandError('Some errors raised during synchronization.') sleep(2) # end sleep to ensure sync page get result
the-stack_106_17027
#!/usr/bin/env python """ Copyright (C) 2014-2016 Twitter Inc and other contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import argparse import csv import re import hashlib import sys def setup(args, flags): """Sets up arguments and flags for processing hashes. Args: args: named to setup type, infile and outfile Returns: boolean: true or false """ if args.type == 'MOBILEDEVICEID': # mobile device IDs can be a mixture of IDFA, ADID and ANDROID in a single file flags['regex'] = re.compile('^[a-z0-9][a-z0-9\-]+[a-z0-9]$') elif args.type == 'IDFA': # flags['uppercase'] = True flags['regex'] = re.compile('^[a-z0-9][a-z0-9\-]+[a-z0-9]$') elif args.type == 'ADID': flags['regex'] = re.compile('^[a-z0-9][a-z0-9\-]+[a-z0-9]$') elif args.type == 'ANDROID': flags['regex'] = re.compile('^[a-z0-9]+$') elif args.type == 'EMAIL': flags['regex'] = re.compile('^[a-z0-9][a-z0-9_\-\.\+]+\@[a-z0-9][a-z0-9\.]+[a-z]$') elif args.type == 'PHONE' or args.type == 'TWITTERID': flags['dropleadingzeros'] = True flags['regex'] = re.compile('^\d+$') elif args.type == 'TWITTERSCREENNAME': flags['dropleadingat'] = True flags['regex'] = re.compile('^[a-z0-9_]+$') else: # There is an invalid type. print ("ERROR: invalid type") return False # Flags should be correctly set if so return a true value return True def hashFile(args, flags): """Hashes the file based on the params setup in args. Args: args: named to setup type, infile and outfile Returns: dict: {"written": N, "skipped": N} """ skipped = 0 written = 0 if args.infile.name.endswith(".csv"): csv_file = True reader = csv.reader(args.infile, dialect='excel') else: csv_file = False reader = args.infile for text in reader: if not csv_file: text = [text] for line in text: if not line: break line = line.rstrip() # Remove whitespace line = ''.join(line.split()) # Set case if flags['uppercase']: line = line.upper() else: line = line.lower() # Drop leading '@' if flags['dropleadingat']: line = line.lstrip('@') # Drop leading zeros if flags['dropleadingzeros']: line = line.lstrip('0') if flags['regex'].match(line) is None: skipped += 1 continue if debug: print ("\t" + line) hashed = hashlib.sha256(line).hexdigest() args.outfile.write(hashed + "\n") written += 1 # Close --infile and --outfile args.infile.close() args.outfile.close() hash_info = {"written": written, "skipped": skipped} return hash_info if __name__ == "__main__": debug = False parser = argparse.ArgumentParser(description='Hash the contents of a file for TA upload.') aud_types = ['MOBILEDEVICEID', 'IDFA', 'ADID', 'ANDROID', 'EMAIL', 'PHONE', 'TWITTERID', 'TWITTERSCREENNAME'] # Set the type. parser.add_argument('--type', required=True, metavar='TWITTERID', help='source data type.', choices=aud_types) # parse --infile e.g. the in location of the file. parser.add_argument('--infile', required=True, type=argparse.FileType('rU'), metavar='/path/to/source.txt', help='input file to parse.') # parse --outfile e.g. the location of the file parser.add_argument('--outfile', required=True, type=argparse.FileType('w'), metavar='/path/to/output.txt', help='file to write output.') # Parse the arguments from the command to the variable args. args = parser.parse_args() # Setup a dictionary with Flags flags = {'uppercase': False, 'dropleadingzeros': False, 'dropleadingat': False} # If setup is correctly configured.. if setup(args, flags) is True: # Run the hashFile function with the variables hashed_info = hashFile(args, flags) print ("Written:\t" + str(hashed_info['written'])) print ("Skipped:\t" + str(hashed_info['skipped'])) # Exit sys.exit()
the-stack_106_17028
import numpy as np import pandas as pd import xarray as xr import Grid import pf_dynamic_sph import os import sys from timeit import default_timer as timer from copy import copy if __name__ == "__main__": start = timer() # ---- INITIALIZE GRIDS ---- higherCutoff = False; cutoffRat = 1.5 betterResolution = False; resRat = 0.5 (Lx, Ly, Lz) = (60, 60, 60) (dx, dy, dz) = (0.25, 0.25, 0.25) # (Lx, Ly, Lz) = (40, 40, 40) # (dx, dy, dz) = (0.25, 0.25, 0.25) # (Lx, Ly, Lz) = (21, 21, 21) # (dx, dy, dz) = (0.375, 0.375, 0.375) xgrid = Grid.Grid('CARTESIAN_3D') xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz) NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz) NGridPoints_desired = (1 + 2 * Lx / dx) * (1 + 2 * Lz / dz) Ntheta = 50 Nk = np.ceil(NGridPoints_desired / Ntheta) theta_max = np.pi thetaArray, dtheta = np.linspace(0, theta_max, Ntheta, retstep=True) # k_max = np.sqrt((np.pi / dx)**2 + (np.pi / dy)**2 + (np.pi / dz)**2) k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3) k_min = 1e-5 kArray, dk = np.linspace(k_min, k_max, Nk, retstep=True) if dk < k_min: print('k ARRAY GENERATION ERROR') kgrid = Grid.Grid("SPHERICAL_2D") if higherCutoff is True and betterResolution is False: kgrid.initArray('k', k_min, cutoffRat * k_max, dk) k_max = kgrid.getArray('k')[-1] elif higherCutoff is False and betterResolution is True: kgrid.initArray('k', k_min, k_max, resRat * dk) dk = kgrid.getArray('k')[1] - kgrid.getArray('k')[0] else: kgrid.initArray_premade('k', kArray) kgrid.initArray_premade('th', thetaArray) # for realdyn evolution tMax = 100 dt = 0.2 CoarseGrainRate = 50 tgrid = np.arange(0, tMax + dt, dt) gParams = [xgrid, kgrid, tgrid] NGridPoints = kgrid.size() print('Total time steps: {0}'.format(tgrid.size)) print('UV cutoff: {0}'.format(k_max)) print('dk: {0}'.format(dk)) print('NGridPoints: {0}'.format(NGridPoints)) print(NGridPoints_cart, NGridPoints) # Toggle parameters toggleDict = {'Location': 'cluster', 'Dynamics': 'real', 'Coupling': 'twophonon', 'Grid': 'spherical', 'Longtime': 'false', 'CoarseGrainRate': CoarseGrainRate} # ---- SET PARAMS ---- mB = 1 n0 = 1 gBB = (4 * np.pi / mB) * 0.05 # Dresher uses aBB ~ 0.2 instead of 0.5 here # gBB = (4 * np.pi / mB) * 0.02 # Dresher uses aBB ~ 0.2 instead of 0.5 here nu = np.sqrt(n0 * gBB / mB) aBB = (mB / (4 * np.pi)) * gBB xi = (8 * np.pi * n0 * aBB)**(-1 / 2) print(k_max * xi) print(5 * mB * xi**2) print(-3.0 / xi) print((n0 * aBB * 3)**(-1 / 2) * mB * xi**2) Params_List = [] mI_Vals = np.array([0.5, 1.0, 2, 5.0]) aIBi_Vals = np.array([-10.0, -5.0, -2.0]) if higherCutoff is True or betterResolution is True: mI_Vals = np.array([1.0, 5.0]) aIBi_Vals = np.array([-2.0, -1.0]) P_Vals_norm = np.concatenate((np.linspace(0.1, 0.8, 5, endpoint=False), np.linspace(0.8, 1.2, 10, endpoint=False), np.linspace(1.2, 3.0, 12, endpoint=False), np.linspace(3.0, 5.0, 3))) for mI in mI_Vals: P_Vals = mI * nu * P_Vals_norm for aIBi in aIBi_Vals: for P in P_Vals: sParams = [mI, mB, n0, gBB] cParams = [P, aIBi] if toggleDict['Location'] == 'home': datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart) elif toggleDict['Location'] == 'work': datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart) elif toggleDict['Location'] == 'cluster': datapath = '/n/scratchlfs/demler_lab/kis/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart) if higherCutoff is True: datapath = datapath + '_cutoffRat_{:.2f}'.format(cutoffRat) if betterResolution is True: datapath = datapath + '_resRat_{:.2f}'.format(resRat) gridpath = copy(datapath) datapath = datapath + '/massRatio={:.1f}'.format(mI / mB) if toggleDict['Dynamics'] == 'real': innerdatapath = datapath + '/redyn' elif toggleDict['Dynamics'] == 'imaginary': innerdatapath = datapath + '/imdyn' if toggleDict['Grid'] == 'cartesian': innerdatapath = innerdatapath + '_cart' elif toggleDict['Grid'] == 'spherical': innerdatapath = innerdatapath + '_spherical' if toggleDict['Coupling'] == 'frohlich': innerdatapath = innerdatapath + '_froh' elif toggleDict['Coupling'] == 'twophonon': innerdatapath = innerdatapath Params_List.append([sParams, cParams, innerdatapath]) # if os.path.isdir(gridpath) is False: # os.mkdir(gridpath) # if os.path.isdir(datapath) is False: # os.mkdir(datapath) # if os.path.isdir(innerdatapath) is False: # os.mkdir(innerdatapath) print(len(Params_List)) # # ---- COMPUTE DATA ON COMPUTER ---- # runstart = timer() # for ind, Params in enumerate(Params_List): # loopstart = timer() # [sParams, cParams, innerdatapath] = Params_List[ind] # [mI, mB, n0, gBB] = sParams # [P, aIBi] = cParams # dyncart_ds = pf_dynamic_cart.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict) # dyncart_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi)) # loopend = timer() # print('Index: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(ind, P, aIBi, loopend - loopstart)) # end = timer() # print('Total Time: {:.2f}'.format(end - runstart)) # ---- COMPUTE DATA ON CLUSTER ---- runstart = timer() taskCount = int(os.getenv('SLURM_ARRAY_TASK_COUNT')) taskID = int(os.getenv('SLURM_ARRAY_TASK_ID')) # taskCount = len(Params_List) # taskID = 72 if(taskCount > len(Params_List)): print('ERROR: TASK COUNT MISMATCH') P = float('nan') aIBi = float('nan') sys.exit() else: [sParams, cParams, innerdatapath] = Params_List[taskID] [mI, mB, n0, gBB] = sParams [P, aIBi] = cParams dynsph_ds = pf_dynamic_sph.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict) dynsph_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi)) end = timer() print('Task ID: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(taskID, P, aIBi, end - runstart))
the-stack_106_17029
from __future__ import print_function # This file is part of Androguard. # # Copyright (C) 2013, Anthony Desnos <desnos at t0t0.fr> # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from builtins import range from builtins import object from subprocess import Popen, PIPE, STDOUT import tempfile import os from androguard.core.androconf import rrmdir from androguard.decompiler.dad import decompile from androguard.util import read from pygments.filter import Filter from pygments import highlight from pygments.lexers import get_lexer_by_name from pygments.formatters import TerminalFormatter from pygments.token import Token import logging log = logging.getLogger("androguard.decompiler") class JADXDecompilerError(Exception): """ Exception for JADX related problems """ pass class MethodFilter(Filter): def __init__(self, **options): """ Filter Method Code from a whole class :param options: """ Filter.__init__(self, **options) self.method_name = options["method_name"] # self.descriptor = options["descriptor"] self.present = False self.get_desc = True # False def filter(self, lexer, stream): a = [] l = [] rep = [] for ttype, value in stream: if self.method_name == value and (ttype is Token.Name.Function or ttype is Token.Name): # print ttype, value item_decl = -1 for i in range(len(a) - 1, 0, -1): if a[i][0] is Token.Keyword.Declaration: if a[i][1] != "class": item_decl = i break if item_decl != -1: self.present = True l.extend(a[item_decl:]) if self.present and ttype is Token.Keyword.Declaration: item_end = -1 for i in range(len(l) - 1, 0, -1): if l[i][0] is Token.Operator and l[i][1] == "}": item_end = i break if item_end != -1: rep.extend(l[:item_end + 1]) l = [] self.present = False if self.present: l.append((ttype, value)) a.append((ttype, value)) if self.present: nb = 0 item_end = -1 for i in range(len(l) - 1, 0, -1): if l[i][0] is Token.Operator and l[i][1] == "}": nb += 1 if nb == 2: item_end = i break rep.extend(l[:item_end + 1]) return rep # TODO move it somewhere else class Dex2Jar(object): def __init__(self, vm, bin_dex2jar="dex2jar.sh", tmp_dir="/tmp/"): """ DEX2JAR is a tool to convert a Dalvik file into Java Classes :param vm: :param bin_dex2jar: :param tmp_dir: """ pathtmp = tmp_dir if not os.path.exists(pathtmp): os.makedirs(pathtmp) fd, fdname = tempfile.mkstemp(dir=pathtmp) with os.fdopen(fd, "w+b") as fd: fd.write(vm.get_buff()) fd.flush() cmd = Popen([bin_dex2jar, fdname], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() os.unlink(fdname) self.jarfile = fdname + "_dex2jar.jar" def get_jar(self): return self.jarfile class DecompilerDex2Jad(object): def __init__(self, vm, bin_dex2jar="dex2jar.sh", bin_jad="jad", tmp_dir="/tmp/"): """ Decompiler interface for JAD JAD is not a native Dalvik decompiler, therefore dex2jar is required. .. deprecated:: 3.3.5 JAD is not supported anymore in androguard! :param vm: :param bin_dex2jar: :param bin_jad: :param tmp_dir: """ warnings.warn("JAD is deprecated since 3.3.5.", DeprecationWarning) self.classes = {} self.classes_failed = [] pathtmp = tmp_dir if not os.path.exists(pathtmp): os.makedirs(pathtmp) fd, fdname = tempfile.mkstemp(dir=pathtmp) with os.fdopen(fd, "w+b") as fd: fd.write(vm.get_buff()) fd.flush() cmd = Popen([bin_dex2jar, fdname], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() os.unlink(fdname) pathclasses = fdname + "dex2jar/" cmd = Popen(["unzip", fdname + "_dex2jar.jar", "-d", pathclasses], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() os.unlink(fdname + "_dex2jar.jar") for root, dirs, files in os.walk(pathclasses, followlinks=True): if files: for f in files: real_filename = root if real_filename[-1] != "/": real_filename += "/" real_filename += f cmd = Popen([bin_jad, "-o", "-d", root, real_filename], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() for i in vm.get_classes(): fname = pathclasses + "/" + i.get_name()[1:-1] + ".jad" if os.path.isfile(fname): self.classes[i.get_name()] = read(fname, binary=False) else: self.classes_failed.append(i.get_name()) rrmdir(pathclasses) def get_source_method(self, method): class_name = method.get_class_name() method_name = method.get_name() if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) lexer.add_filter(MethodFilter(method_name=method_name)) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def display_source(self, method): print(self.get_source_method(method)) def get_source_class(self, _class): return self.classes[_class.get_name()] def get_all(self, class_name): if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def display_all(self, _class): print(self.get_all(_class.get_name())) class DecompilerDex2WineJad(object): def __init__(self, vm, bin_dex2jar="dex2jar.sh", bin_jad="jad", tmp_dir="/tmp/"): """ Use JAD on wine .. deprecated:: 3.3.5 JAD is not supported anymore by androguard! :param vm: :param bin_dex2jar: :param bin_jad: :param tmp_dir: """ warnings.warn("JAD is deprecated since 3.3.5.", DeprecationWarning) self.classes = {} self.classes_failed = [] pathtmp = tmp_dir if not os.path.exists(pathtmp): os.makedirs(pathtmp) fd, fdname = tempfile.mkstemp(dir=pathtmp) with os.fdopen(fd, "w+b") as fd: fd.write(vm.get_buff()) fd.flush() cmd = Popen([bin_dex2jar, fdname], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() os.unlink(fdname) pathclasses = fdname + "dex2jar/" cmd = Popen(["unzip", fdname + "_dex2jar.jar", "-d", pathclasses], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() os.unlink(fdname + "_dex2jar.jar") for root, dirs, files in os.walk(pathclasses, followlinks=True): if files: for f in files: real_filename = root if real_filename[-1] != "/": real_filename += "/" real_filename += f cmd = Popen(["wine", bin_jad, "-o", "-d", root, real_filename], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() for i in vm.get_classes(): fname = pathclasses + "/" + i.get_name()[1:-1] + ".jad" if os.path.isfile(fname): self.classes[i.get_name()] = read(fname, binary=False) else: self.classes_failed.append(i.get_name()) rrmdir(pathclasses) def get_source_method(self, method): class_name = method.get_class_name() method_name = method.get_name() if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) lexer.add_filter(MethodFilter(method_name=method_name)) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def display_source(self, method): print(self.get_source_method(method)) def get_source_class(self, _class): return self.classes[_class.get_name()] def get_all(self, class_name): if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def display_all(self, _class): print(self.get_all(_class.get_name())) class DecompilerDed(object): def __init__(self, vm, bin_ded="ded.sh", tmp_dir="/tmp/"): """ DED is an old, probably deprecated, decompiler http://siis.cse.psu.edu/ded/ .. deprecated:: 3.3.5 DED is not supported by androguard anymore! It is now replaced by DARE. :param vm: `DalvikVMFormat` object :param bin_ded: :param tmp_dir: """ warnings.warn("DED is deprecated since 3.3.5.", DeprecationWarning) self.classes = {} self.classes_failed = [] pathtmp = tmp_dir if not os.path.exists(pathtmp): os.makedirs(pathtmp) fd, fdname = tempfile.mkstemp(dir=pathtmp) with os.fdopen(fd, "w+b") as fd: fd.write(vm.get_buff()) fd.flush() dirname = tempfile.mkdtemp(prefix=fdname + "-src") cmd = Popen([bin_ded, "-c", "-o", "-d", dirname, fdname], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() os.unlink(fdname) findsrc = None for root, dirs, files in os.walk(dirname + "/optimized-decompiled/"): if dirs: for f in dirs: if f == "src": findsrc = root if findsrc[-1] != "/": findsrc += "/" findsrc += f break if findsrc is not None: break for i in vm.get_classes(): fname = findsrc + "/" + i.get_name()[1:-1] + ".java" # print fname if os.path.isfile(fname): self.classes[i.get_name()] = read(fname, binary=False) else: self.classes_failed.append(i.get_name()) rrmdir(dirname) def get_source_method(self, method): class_name = method.get_class_name() method_name = method.get_name() if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) lexer.add_filter(MethodFilter(method_name=method_name)) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def display_source(self, method): print(self.get_source_method(method)) def get_all(self, class_name): if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def get_source_class(self, _class): return self.classes[_class.get_name()] def display_all(self, _class): print(self.get_all(_class.get_name())) class DecompilerDex2Fernflower(object): def __init__(self, vm, bin_dex2jar="dex2jar.sh", bin_fernflower="fernflower.jar", options_fernflower={"dgs": '1', "asc": '1'}, tmp_dir="/tmp/"): """ Decompiler interface for Fernflower Fernflower is a java decompiler by IntelliJ: https://github.com/JetBrains/intellij-community/tree/master/plugins/java-decompiler/engine As it can not decompile Dalvik code directly, the DEX is first decompiled as a JAR file. .. deprecated:: 3.3.5 Fernflower is not supported anymore by androguard. :param vm: `DalvikVMFormtat` object :param bin_dex2jar: :param bin_fernflower: :param options_fernflower: :param tmp_dir: """ warnings.warn("Fernflower is deprecated since 3.3.5.", DeprecationWarning) self.classes = {} self.classes_failed = [] pathtmp = tmp_dir if not os.path.exists(pathtmp): os.makedirs(pathtmp) fd, fdname = tempfile.mkstemp(dir=pathtmp) with os.fdopen(fd, "w+b") as fd: fd.write(vm.get_buff()) fd.flush() cmd = Popen([bin_dex2jar, fdname], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() os.unlink(fdname) pathclasses = fdname + "dex2jar/" cmd = Popen(["unzip", fdname + "_dex2jar.jar", "-d", pathclasses], stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() os.unlink(fdname + "_dex2jar.jar") for root, dirs, files in os.walk(pathclasses, followlinks=True): if files: for f in files: real_filename = root if real_filename[-1] != "/": real_filename += "/" real_filename += f l = ["java", "-jar", bin_fernflower] for option in options_fernflower: l.append("-%s:%s" % (option, options_fernflower[option])) l.append(real_filename) l.append(root) cmd = Popen(l, stdout=PIPE, stderr=STDOUT) stdout, stderr = cmd.communicate() for i in vm.get_classes(): fname = pathclasses + "/" + i.get_name()[1:-1] + ".java" if os.path.isfile(fname): self.classes[i.get_name()] = read(fname, binary=False) else: self.classes_failed.append(i.get_name()) rrmdir(pathclasses) def get_source_method(self, method): class_name = method.get_class_name() method_name = method.get_name() if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) lexer.add_filter(MethodFilter(method_name=method_name)) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def display_source(self, method): print(self.get_source_method(method)) def get_source_class(self, _class): return self.classes[_class.get_name()] def get_all(self, class_name): if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def display_all(self, _class): print(self.get_all(_class.get_name())) class DecompilerDAD: def __init__(self, vm, vmx): """ Decompiler wrapper for DAD: **D**AD is **A** **D**ecompiler DAD is the androguard internal decompiler. This Method does not use the :class:`~androguard.decompiler.dad.decompile.DvMachine` but creates :class:`~androguard.decompiler.dad.decompile.DvClass` and :class:`~androguard.decompiler.dad.decompile.DvMethod` on demand. :param androguard.core.bytecodes.dvm.DalvikVMFormat vm: `DalvikVMFormat` object :param androguard.core.analysis.analysis.Analysis vmx: `Analysis` object """ self.vm = vm self.vmx = vmx def get_source_method(self, m): mx = self.vmx.get_method(m) z = decompile.DvMethod(mx) z.process() return z.get_source() def get_ast_method(self, m): mx = self.vmx.get_method(m) z = decompile.DvMethod(mx) z.process(doAST=True) return z.get_ast() def display_source(self, m): result = self.get_source_method(m) lexer = get_lexer_by_name("java", stripall=True) formatter = TerminalFormatter() result = highlight(result, lexer, formatter) print(result) def get_source_class(self, _class): c = decompile.DvClass(_class, self.vmx) c.process() return c.get_source() def get_ast_class(self, _class): c = decompile.DvClass(_class, self.vmx) c.process(doAST=True) return c.get_ast() def get_source_class_ext(self, _class): c = decompile.DvClass(_class, self.vmx) c.process() result = c.get_source_ext() return result def display_all(self, _class): result = self.get_source_class(_class) lexer = get_lexer_by_name("java", stripall=True) formatter = TerminalFormatter() result = highlight(result, lexer, formatter) print(result) def get_all(self, class_name): pass class DecompilerJADX: def __init__(self, vm, vmx, jadx="jadx", keepfiles=False): """ DecompilerJADX is a wrapper for the jadx decompiler: https://github.com/skylot/jadx Note, that jadx need to write files to your local disk. :param vm: `DalvikVMFormat` object :param vmx: `Analysis` object :param jadx: path to the jadx executable :param keepfiles: set to True, if you like to keep temporary files """ self.vm = vm self.vmx = vmx # Dictionary to store classnames: sourcecode self.classes = {} # Result directory: # TODO need to remove the folder correctly! tmpfolder = tempfile.mkdtemp() # We need to decompile the whole dex file, as we do not have an API... # dump the dex file into a temp file # THIS WILL NOT WORK ON WINDOWS!!! # See https://stackoverflow.com/q/15169101/446140 # Files can not be read, only if they specify temp file. But jadx does not do that... # # We need to trick jadx by setting the suffix, otherwise the file will not be loaded with tempfile.NamedTemporaryFile(suffix=".dex") as tf: tf.write(vm.get_buff()) cmd = [jadx, "-d", tmpfolder, "--escape-unicode", "--no-res", tf.name] log.debug("Call JADX with the following cmdline: {}".format(" ".join(cmd))) x = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, _ = x.communicate() # Looks like jadx does not use stderr log.info("Output of JADX during decompilation") for line in stdout.decode("UTF-8").splitlines(): log.info(line) if x.returncode != 0: rrmdir(tmpfolder) raise JADXDecompilerError("Could not decompile file. Args: {}".format(" ".join(cmd))) # Next we parse the folder structure for later lookup # We read the content of each file here, so we can later delete the folder # We check here two ways, first we iterate all files and see if the class exists # in androguard # then, we iterate all classes in androguard and check if the file exists. andr_class_names = {x.get_name()[1:-1]: x for x in vm.get_classes()} # First, try to find classes for the files we have for root, dirs, files in os.walk(tmpfolder): for f in files: if not f.endswith(".java"): log.warning("found a file in jadx folder which is not a java file: {}".format(f)) continue # as the path begins always with `self.res` (hopefully), we remove that length # also, all files should end with .java path = os.path.join(root, f)[len(tmpfolder) + 1:-5] path = path.replace(os.sep, "/") # Special care for files without package # All files that have no package set, will get the # package `defpackage` automatically if path.startswith("defpackage"): path = path[len("defpackage/"):] if path in andr_class_names: with open(os.path.join(root, f), "rb") as fp: # Need to convert back to the "full" classname self.classes["L{};".format(path)] = fp.read().decode("UTF-8") else: log.warning("Found a class called {}, which is not found by androguard!".format(path)) # Next, try to find files for the classes we have for cl in andr_class_names: fname = self._find_class(cl, tmpfolder) if fname: if "L{};".format(cl) not in self.classes: with open(fname, "rb") as fp: # TODO need to snip inner class from file self.classes["L{};".format(cl)] = fp.read().decode("UTF-8") else: # Class was already found... pass else: log.warning("Found a class called {} which is not decompiled by jadx".format(cl)) # check if we have good matching if len(self.classes) == len(andr_class_names): log.debug("JADX looks good, we have the same number of classes: {}".format(len(self.classes))) else: log.info("Looks like JADX is missing some classes or " "we decompiled too much: decompiled: {} vs androguard: {}".format(len(self.classes), len(andr_class_names))) if not keepfiles: rrmdir(tmpfolder) def _find_class(self, clname, basefolder): # check if defpackage if "/" not in clname: # this is a defpackage class probably... # Search first for defpackage, then search for requested string res = self._find_class("defpackage/{}".format(clname), basefolder) if res: return res # We try to map inner classes here if "$" in clname: # sometimes the inner class get's an extra file, sometimes not... # So we try all possibilities for x in range(clname.count("$")): tokens = clname.split("$", x + 1) base = "$".join(tokens[:-1]) res = self._find_class(base, basefolder) if res: return res # Check the whole supplied name fname = os.path.join(basefolder, clname.replace("/", os.sep) + ".java") if not os.path.isfile(fname): return None return fname def get_source_method(self, m): """ Return the Java source of a single method :param m: `EncodedMethod` Object :return: """ class_name = m.get_class_name() method_name = m.get_name() if class_name not in self.classes: return "" lexer = get_lexer_by_name("java", stripall=True) lexer.add_filter(MethodFilter(method_name=method_name)) formatter = TerminalFormatter() result = highlight(self.classes[class_name], lexer, formatter) return result def get_source_class(self, _class): """ Return the Java source code of a whole class :param _class: `ClassDefItem` object, to get the source from :return: """ if not _class.get_name() in self.classes: return "" return self.classes[_class.get_name()] def display_source(self, m): """ This method does the same as `get_source_method` but prints the result directly to stdout :param m: `EncodedMethod` to print :return: """ print(self.get_source_method(m)) def display_all(self, _class): """ ??? :param _class: :return: """ pass def get_all(self, class_name): """ ??? :param class_name: :return: """ pass
the-stack_106_17030
# # Copyright 2019 BrainPad Inc. All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # import os import re import subprocess from abc import abstractmethod from cliboa.conf import env from cliboa.core.file_parser import YamlScenarioParser from cliboa.core.listener import StepStatusListener from cliboa.core.scenario_queue import ScenarioQueue from cliboa.core.step_queue import StepQueue from cliboa.core.validator import DIScenarioFormat, ProjectDirectoryExistence, ScenarioFileExistence from cliboa.scenario import * # noqa from cliboa.util.cache import StepArgument from cliboa.util.exception import ScenarioFileInvalid from cliboa.util.helper import Helper from cliboa.util.http import BasicAuth, FormAuth # noqa from cliboa.util.lisboa_log import LisboaLog __all__ = ["YamlScenarioManager", "JsonScenarioManager"] class ScenarioManager(object): """ Management of scenario defined in files """ # dependency injection keys in scenario.yml DI_KEYS = ["auth", "base_auth"] def __init__(self, cmd_args): self._logger = LisboaLog.get_logger(__name__) self._cmd_args = cmd_args self._pj_dir = os.path.join(env.PROJECT_DIR, cmd_args.project_name) self._pj_scenario_dir = os.path.join( env.PROJECT_DIR, cmd_args.project_name, env.SCENARIO_DIR_NAME ) if cmd_args.format == "yaml": self._pj_scenario_file = ( os.path.join( env.PROJECT_DIR, cmd_args.project_name, env.SCENARIO_FILE_NAME ) + ".yml" ) self._cmn_scenario_file = ( os.path.join(env.COMMON_DIR, env.SCENARIO_FILE_NAME) + ".yml" ) else: self._pj_scenario_file = ( os.path.join( env.PROJECT_DIR, cmd_args.project_name, env.SCENARIO_FILE_NAME ) + "." + cmd_args.format ) self._cmn_scenario_file = ( os.path.join(env.COMMON_DIR, env.SCENARIO_FILE_NAME) + "." + cmd_args.format ) # key and var of dinamic variables self._dynamic_key_and_val = {} @abstractmethod def create_scenario_queue(self): """ Create Scenario Queue """ class YamlScenarioManager(ScenarioManager): """ Manage scenario with yml format """ def create_scenario_queue(self): self._logger.info("Start to create scenario queue") # validation self.__valid_essential_dir() self.__valid_essential_files() # parse scenario.yml parser = YamlScenarioParser(self._pj_scenario_file, self._cmn_scenario_file) yaml_scenario_list = parser.parse() if yaml_scenario_list and isinstance(yaml_scenario_list, list): self.__invoke_steps(yaml_scenario_list) else: raise ScenarioFileInvalid("scenario.yml is invalid.") self._logger.info("Finish to create scenario queue") def __valid_essential_dir(self): """ Project directory validation """ valid_instance = ProjectDirectoryExistence() valid_instance(self._pj_dir) def __valid_essential_files(self): """ Scenairo file validation """ valid_instance = ScenarioFileExistence() valid_instance(self._pj_scenario_file) def __invoke_steps(self, yaml_scenario_list): """ Create executable instance and push them to queue Args: yaml_scenario_list: parsed yaml list """ self._logger.info("Start to invoke scenario") # Create queue to save step instances q = StepQueue() for s_dict in yaml_scenario_list: if "multi_process_count" in s_dict.keys(): q.multi_proc_cnt = s_dict.get("multi_process_count") continue instances = [] if "parallel" in s_dict.keys(): for row in s_dict.get("parallel"): instance = self.__create_instance(row, yaml_scenario_list) Helper.set_property( instance, "logger", LisboaLog.get_logger(instance.__class__.__name__), ) instances.append(instance) StepArgument._put(row["step"], instance) else: instance = self.__create_instance(s_dict, yaml_scenario_list) Helper.set_property( instance, "logger", LisboaLog.get_logger(instance.__class__.__name__), ) instances.append(instance) StepArgument._put(s_dict["step"], instance) # Put instance to queue q.push(instances) # save queue to static area setattr(ScenarioQueue, "step_queue", q) self._logger.info("Finish to invoke scenario") def __create_instance(self, s_dict, yaml_scenario_list): cls_name = s_dict["class"] self._logger.debug("Create %s instance" % cls_name) if self.__is_custom_cls(cls_name) is True: from cliboa.core.factory import CustomInstanceFactory instance = CustomInstanceFactory.create(cls_name) else: cls = globals()[cls_name] instance = cls() base_args = ["step", "symbol", "parallel", "io", "listeners"] for arg in base_args: if arg == "listeners": self._append_listeners(instance, s_dict.get(arg)) else: Helper.set_property(instance, arg, s_dict.get(arg)) cls_attrs_dict = {} if isinstance(yaml_scenario_list, list) and "arguments" in s_dict.keys(): cls_attrs_dict = s_dict["arguments"] # loop and set class attribute di_key = None di_instance = None if cls_attrs_dict: cls_attrs_dict = self.__extract_with_vars(cls_attrs_dict) # check if the keys of dependency injection di_keys, di_instances = self.__create_di_instance(cls_attrs_dict) if di_keys and di_instances: di_keys_and_instances = zip(di_keys, di_instances) for di_key, di_instance in di_keys_and_instances: self._logger.debug( "Inject %s to %s object." % (di_instance, instance) ) # Injection setattr(instance, di_key, di_instance) del cls_attrs_dict[di_key] pattern = re.compile(r"{{(.*?)}}") for yaml_k, yaml_v in cls_attrs_dict.items(): # if value includes {{ var }}, replace value specified by with_vars if isinstance(yaml_v, str): matches = pattern.findall(yaml_v) for match in matches: var_name = match.strip() yaml_v = self.__replace_vars(yaml_v, var_name) Helper.set_property(instance, yaml_k, yaml_v) return instance def __is_custom_cls(self, cls_name): """ The specified class in scenario.yml is a custom step class or not Args: cls_name: the specified step class in scenario.yml Return: True: custom step class False: default step class """ custom_classes = env.COMMON_CUSTOM_CLASSES + env.PROJECT_CUSTOM_CLASSES for cc in custom_classes: split_cc = cc.split(".") custom_cls_name = split_cc[1] if cls_name == custom_cls_name: return True return False def __extract_with_vars(self, cls_attrs_dict): """ If 'with_vars' exist in scenario.yml, extract and save to dictionary. After that, remove from list """ # Extract with_vars if exists exists_with_vars = "with_vars" in cls_attrs_dict.keys() if exists_with_vars: variables = cls_attrs_dict["with_vars"] for yaml_k, yaml_v in variables.items(): self._dynamic_key_and_val[yaml_k] = yaml_v del cls_attrs_dict["with_vars"] return cls_attrs_dict def __replace_vars(self, yaml_v, var_name): """ Replace {{ var }} in string Args: yaml_v: replace target value var_name: means {{ var }} itself """ cmd = self._dynamic_key_and_val[var_name] if not cmd: raise ScenarioFileInvalid( "scenario.yml is invalid. 'with_vars' definition against %s does not exist." # noqa % var_name ) shell_output = subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True ).communicate()[0] shell_output = shell_output.strip() # remove head byte string shell_output = re.sub("^b", "", str(shell_output)) # remove ' shell_output = re.sub("'", "", str(shell_output)) return re.sub(r"{{(.*?)%s(.*?)}}" % var_name, shell_output, yaml_v) def __create_di_instance(self, cls_attrs): """ Create an instance to be injected Args: step class attributes Retrurn: DI attribute names, DI instances """ di_keys = [] di_instance = None di_instances = [] di_params = None for k in cls_attrs.keys(): if k in self.DI_KEYS: di_keys.append(k) di_params = cls_attrs.get(k) valid = DIScenarioFormat(k, di_params) valid() di_cls = di_params["class"] di_cls = globals()[di_cls] di_instance = di_cls() if di_instance: self._logger.debug( "An instance %s to be injected exists." % di_instance ) del di_params["class"] # set attributes to instance if di_params: for k, v in di_params.items(): Helper.set_property(di_instance, k, v) di_instances.append(di_instance) return di_keys, di_instances def _append_listeners(self, instance, args): listeners = [StepStatusListener()] if args is not None: from cliboa.core.factory import CustomInstanceFactory if type(args) is str: listeners.append(CustomInstanceFactory.create(args)) elif type(args) is list: for arg in args: listeners.append(CustomInstanceFactory.create(arg)) Helper.set_property(instance, "listeners", listeners) class JsonScenarioManager(ScenarioManager): """ Manage scenario with json format """ def create_scenario_queue(self): """ TODO: implement in the future """
the-stack_106_17033
""" @author: David Lei @since: 21/08/2016 @modified: """ def joinWords(list_of_strings): sentence = "" for string in list_of_strings: sentence = sentence + string # string concatenation # new copy of sentance # as strings are immutable return sentence l = ["Hi ", "there ", "Bob!"] print(joinWords(joinWords(l))) """ In this approach strings are copied over and over character by character First iteration copies x characters second iteration 2x third iteration 3x total time is O(x+2x+3x+.+nx) so complexity = O(xn^2) note: 1 + 2 + 3 + .. + n = """
the-stack_106_17035
#Uses python3 import sys from collections import deque def distance(adj, s, t): dist = {v: float('Inf') for v in range(len(adj))} dist[s] = 0 q = deque() q.append(s) while q: u = q.popleft() for nbr in adj[u]: if dist[nbr] == float('Inf'): q.append(nbr) dist[nbr] = dist[u] + 1 return -1 if dist[t] == float('Inf') else dist[t] if __name__ == '__main__': input = sys.stdin.read() data = list(map(int, input.split())) n, m = data[0:2] data = data[2:] edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2])) adj = [[] for _ in range(n)] for (a, b) in edges: adj[a - 1].append(b - 1) adj[b - 1].append(a - 1) s, t = data[2 * m] - 1, data[2 * m + 1] - 1 print(distance(adj, s, t))
the-stack_106_17037
""" Local storage Store data under .metaflow/ in the cwd """ import os import json import gzip from tempfile import NamedTemporaryFile from metaflow.metaflow_config import DATASTORE_LOCAL_DIR, DATASTORE_SYSROOT_LOCAL from .datastore import MetaflowDataStore, DataException, only_if_not_done from ..metadata import MetaDatum class LocalDataStore(MetaflowDataStore): TYPE = 'local' METADATA_DIR = '_meta' def _makedirs(self, path): try: os.makedirs(path) except OSError as x: if x.errno == 17: return else: raise def object_path(self, sha): root = os.path.join(self.data_root, sha[:2]) return os.path.join(root, sha) @classmethod def get_datastore_root_from_config(cls, echo, create_on_absent=True): # Compute path for DATASTORE_SYSROOT_LOCAL result = DATASTORE_SYSROOT_LOCAL if result is None: try: # Python2 current_path = os.getcwdu() except: # noqa E722 current_path = os.getcwd() check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR) check_dir = os.path.realpath(check_dir) orig_path = check_dir top_level_reached = False while not os.path.isdir(check_dir): new_path = os.path.dirname(current_path) if new_path == current_path: top_level_reached = True break # We are no longer making upward progress current_path = new_path check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR) if top_level_reached: if create_on_absent: # Could not find any directory to use so create a new one echo('Creating local datastore in current directory (%s)' % orig_path, fg='magenta', bold=True) os.mkdir(orig_path) result = orig_path else: return None else: result = check_dir else: result = os.path.join(result, DATASTORE_LOCAL_DIR) return result @classmethod def get_latest_tasks(cls, flow_name, run_id=None, steps=None, pathspecs=None): run_prefix = cls.make_path(flow_name, run_id) data_blobs = [] if os.path.exists(run_prefix): if steps is None: steps = [s for s in os.listdir(run_prefix) if s != cls.METADATA_DIR] if pathspecs is None: task_prefixes = [] for step in steps: step_prefix = cls.make_path(flow_name, run_id, step) for task in os.listdir(step_prefix): if task == cls.METADATA_DIR: continue task_prefixes.append( cls.make_path(flow_name, run_id, step, task)) else: task_prefixes = [cls.make_path(flow_name, pathspec) for pathspec in pathspecs] for task_prefix in task_prefixes: step, task = task_prefix.split('/')[-2:] # Sort the file listing to iterate in increasing order of # attempts. latest_data_path = None latest_attempt = None latest_done_attempt = None for fname in sorted(os.listdir(task_prefix)): if cls.is_done_filename(fname): _, attempt = cls.parse_filename(fname) latest_done_attempt = attempt # Read the corresponding metadata file. meta_fname = \ cls.get_metadata_filename_for_attempt(attempt) latest_data_path = os.path.join(task_prefix, meta_fname) elif cls.is_attempt_filename(fname): _, attempt = cls.parse_filename(fname) latest_attempt = attempt # Only read the metadata if the latest attempt is also done. if latest_done_attempt is not None and\ latest_done_attempt == latest_attempt: with open(latest_data_path) as f: data_blobs.append((step, task, attempt, f.read())) return data_blobs else: raise DataException("Couldn't find data at %s" % run_prefix) @classmethod def get_artifacts(cls, artifacts_to_prefetch): artifact_list = [] for path in artifacts_to_prefetch: sha = path.split('/')[-1] artifact_list.append((sha, cls.decode_gzip_data(path))) return artifact_list @only_if_not_done def save_log(self, logtype, bytebuffer): """ Save a task-specific log file represented as a bytes object. """ path = self.get_log_location(logtype) with open(path + '.tmp', 'wb') as f: f.write(bytebuffer) os.rename(path + '.tmp', path) return path def load_log(self, logtype, attempt_override=None): """ Load a task-specific log file represented as a bytes object. """ path = self.get_log_location(logtype, attempt_override) with open(path, 'rb') as f: return f.read() @only_if_not_done def save_metadata(self, name, metadata): """ Save a task-specific metadata dictionary as JSON. """ self._makedirs(self.root) filename = self.filename_with_attempt_prefix('%s.json' % name, self.attempt) path = os.path.join(self.root, filename) with open(path + '.tmp', 'w') as f: json.dump(metadata, f) os.rename(path + '.tmp', path) def load_metadata(self, name): """ Load a task-specific metadata dictionary as JSON. """ filename = self.filename_with_attempt_prefix('%s.json' % name, self.attempt) path = os.path.join(self.root, filename) with open(path) as f: return json.load(f) def has_metadata(self, name, with_attempt=True): attempt = self.attempt if with_attempt else None filename = self.filename_with_attempt_prefix('%s.json' % name, attempt) path = os.path.join(self.root, filename) return os.path.exists(path) @only_if_not_done def save_data(self, sha, transformable_object): """ Save a content-addressed data blob if it doesn't exist already. """ path = self.object_path(sha) if not os.path.exists(path): self._makedirs(os.path.dirname(path)) # NOTE multiple tasks may try to save an object with the # same sha concurrently, hence we need to use a proper tmp # file with NamedTemporaryFile(dir=os.path.dirname(path), prefix='blobtmp.', delete=False) as tmp: # NOTE compresslevel makes a huge difference. The default # level of 9 can be impossibly slow. with gzip.GzipFile(fileobj=tmp, mode='wb', compresslevel=3) as f: f.write(transformable_object.current()) os.rename(tmp.name, path) return path def load_data(self, sha): """ Load a content-addressed data blob. """ with gzip.open(self.object_path(sha), 'rb') as f: return f.read() @only_if_not_done def done(self): """ Write a marker indicating that datastore has finished writing to this path. """ filename = self.get_done_filename_for_attempt(self.attempt) path = os.path.join(self.root, filename) self._makedirs(self.root) try: # this is for python2 compatibility. # Python3 has open(mode='x'). fd = os.fdopen(os.open(path, os.O_EXCL | os.O_WRONLY | os.O_CREAT), 'wb') fd.close() except OSError as x: if x.errno == 17: raise DataException('Path %s already exists. Try with a ' 'different --run-id.' % path) else: raise self.metadata.register_metadata( self.run_id, self.step_name, self.task_id, [MetaDatum(field='attempt-done', value=str(self.attempt), type='attempt-done')]) self._is_done_set = True def is_done(self): """ A flag indicating whether this datastore directory was closed succesfully with done(). """ filename = self.get_done_filename_for_attempt(self.attempt) path = os.path.join(self.root, filename) return os.path.exists(path) def package_download_commands(environment,code_package): return []
the-stack_106_17039
#!/usr/bin/env python from __future__ import print_function from subprocess import PIPE, Popen import os, sys, re def joinhere(*args): return os.path.realpath(os.path.join(os.path.dirname(__file__), *args)) class ushuffle(object): def __init__(self, s, k=2, seed=12345, cap=True): self._ushuf = joinhere("ushuffle") if cap: self._s = s.upper() else: self._s = s self._k = k self._seed = seed def shuffle(self): x= Popen([self._ushuf, "-s", self._s, "-n", "1", "-k", "%d" % self._k, "-seed", "%d" % self._seed], stdout=PIPE).communicate()[0].decode("utf-8").rstrip() return x if __name__ == "__main__": pass
the-stack_106_17040
# -*- coding: utf-8 -*- """ Created on Mon Oct 9 10:34:48 2017 @author: Aujasvi """ from numpy import * import matplotlib.pyplot as plt #Load the data set data = loadtxt('nonlinear_classification.data') X = data [:, :2] T = data [:,2] N, d = X.shape #Parameters eta = .3 #Learning rate K = 15 #Number of hidden neurons #splitting the data into training and testing set X_train_data = X[:int((len(X))*.70)] X_test_data = X[int(len(X)*.70):] T_train_data = T[:int(len((T))*.70)] T_test_data = T[int(len((T))*.70):] #weights and biases max_val = .1 W_hid = random.uniform(-max_val, max_val,(d,K)) #all are small function b_hid = random.uniform(-max_val, max_val, K) W_out = random.uniform(-max_val, max_val, K) b_out = random.uniform(-max_val,max_val, 1) #Logistic transfer function for the hidden neurons def logistic(X_train_data): return 1.0/(1.0 + exp(-X_train_data)) #Threshold transfer function for the output neuron def threshold (X_train_data): P = X_train_data.copy P[P > 0.] = 1. P[P < 0.] = -1. return P def feedforward (X_train_data, W_hid, b_hid, W_out, b_out): #Hidden layer Y = logistic (dot(X_train_data, W_hid) + b_hid) #Output layer O = threshold (dot(Y, W_out) + b_out) return Y, O #Backproppgation Algo errors = [] for epoch in range (100): nb_errors = 0 for i in range (N): x = X_train_data[i,:] t = T[i] Y, O = feedforward (X_train_data,W_hid, b_hid, W_out, b_out) if t != O: nb_errors +=1 delta_out = (t-O) delta_hidden = Y*(1-Y)*delta_out*W_out W_out += eta*Y*delta_out b_out += eta*delta_out for k in range (K): W_hid[ :, k] += eta*x*delta_hidden[k] b_hid += eta*delta_hidden errors.append(nb_errors/float(N)) plt.plot(errors) M = mean(epoch) print(M) V = var(epoch) print(V) #Question 2 : Convergence speed as no of hidden neurons are increasing #Question 3: #Question 4: When weights are initialised between at max_val = 0 then error is #incresing. #Question 5: Mean of the number of the epochs needed is 9 and variance is 0.
the-stack_106_17042
"""Utility functions for tests.""" from __future__ import annotations import asyncio from typing import Any, Dict from httpx import AsyncClient __all__ = ["wait_for_business"] async def wait_for_business( client: AsyncClient, username: str ) -> Dict[str, Any]: """Wait for one loop of business to complete and return its data.""" for _ in range(1, 10): await asyncio.sleep(0.5) r = await client.get(f"/mobu/flocks/test/monkeys/{username}") assert r.status_code == 200 data = r.json() if data["business"]["success_count"] > 0: break if data["business"]["failure_count"] > 0: break return data
the-stack_106_17043
# -*- coding: utf-8 -*- """Cisco DNA Center Add Port assignment for access point data model. Copyright (c) 2019-2021 Cisco Systems. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import ( absolute_import, division, print_function, unicode_literals, ) import fastjsonschema import json from dnacentersdk.exceptions import MalformedRequest from builtins import * class JSONSchemaValidatorC2A43Ad24098Baa7(object): """Add Port assignment for access point request schema definition.""" def __init__(self): super(JSONSchemaValidatorC2A43Ad24098Baa7, self).__init__() self._validator = fastjsonschema.compile(json.loads( '''{ "items": { "properties": { "authenticateTemplateName": { "type": [ "string", "null" ] }, "dataIpAddressPoolName": { "type": [ "string", "null" ] }, "deviceManagementIpAddress": { "type": [ "string", "null" ] }, "interfaceName": { "type": [ "string", "null" ] }, "siteNameHierarchy": { "type": [ "string", "null" ] }, "voiceIpAddressPoolName": { "type": [ "string", "null" ] } }, "type": [ "object", "null" ] }, "type": "array" }'''.replace("\n" + ' ' * 16, '') )) def validate(self, request): try: self._validator(request) except fastjsonschema.exceptions.JsonSchemaException as e: raise MalformedRequest( '{} is invalid. Reason: {}'.format(request, e.message) )
the-stack_106_17046
# coding=utf8 # Copyright 2018 the pycolab Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The pycolab game engine. Refer to the docstring for `Engine` for details. This module also includes the `Palette` helper class. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from pycolab import plot from pycolab import rendering from pycolab import things import six class Engine(object): """The pycolab game engine. Every pycolab game is an instance of `Engine`. These games all have certain things in common: * They're grid-based! ▦ * Games proceed in discrete steps: the player calls the `play` method with their chosen action, and the `Engine` updates the board in response, which then becomes the observation returned to the player. * By default, observations are a single 2-D numpy array "board" with dtype `uint8`, or, alternatively, a collection of binary masks (see `Observation` in `rendering.py`). * Values are painted onto the board by instances of `Backdrop`, `Sprite`, and `Drape`, which are described in detail in `things.py`. (Now would be a fine time to go read more about them. Go ahead--it'll be fun!) * Additionally, it is expected that your game logic will be arranged within these objects somehow. * `Backdrop`, `Sprite`, and `Drape` instances can communicate with each other and affect global game state (reward, termination, etc.) through the game's `Plot` object. ("Plot" as in "thickens", not as in "Cartesian".) * (Now is NOT the best time to read more about the `Plot`; for the time being, just remember that it's a global blackboard. :-) At each game iteration, the `Engine` instance consults the `Backdrop` and each `Sprite` and `Drape` to determine how to update the board. These consultations, which happen in a specified, fixed order, are also when the game logic within those objects chooses how to react to the things they see on the board, the actions from the player(s), and the information stored in the game's `Plot`. Once all the updates have been applied, the new board is shown to the user, and the `Engine` awaits the next action. In the simplest arrangement, the `Engine` collects updates from the `Backdrop` and from all `Sprite`s and `Drape`s, then repaints the board all at once. This means that all of these objects will base their decision on the state of the board as it was when the user chose an action. More complicated arrangements are possible. By placing `Sprite`s and `Drape`s in separate "update groups", you can force the `Engine` to repaint the board after only some of the updates have been collected. For example, if update group 0 contains [sprite_A, drape_B, sprite_C] and update group 1 contains [drape_D, sprite_E, sprite_F] then the `Backdrop`, `sprite_A`, `drape_B`, and `sprite_C` will see the board as it was last seen by the user, while `drape_D`, `sprite_E`, and `sprite_F` will see the board after the updates from the first four are applied. This may simplify your game logic. No matter how things are arranged into update groups, the user will only see the board after the updates from *all* `Sprite`s, `Drape`s, and the `Backdrop` have been applied. From here, it's probably best to read the documentation for `Plot` (it's okay now!) and then the docstring for the `Engine` constructor. """ def __init__(self, rows, cols, nb_action, occlusion_in_layers=True): """Construct a new pycolab game engine. Builds a new pycolab game engine, ready to be populated with a `Backdrop`, `Sprite`s, and `Drape`s (see `things.py`). Once set up, an `Engine` will manage the rendering and logic of a game for one episode. (For new episodes, make a new `Engine`). A newly-constructed `Engine` makes for a really boring game: there is nothing to draw on the game board and no game logic. In fact, the `Engine` will refuse to work at all without a `Backdrop`. Here's what you need to do: after construction, supply the engine with a `Backdrop` object to paint the background of the game board (things like walls and such), and then `Sprite` and `Drape` objects to move around on top of it (see `things.py` for details). These objects can view the game board and communicate with each other (usually via a `Plot` object), and the game logic is implemented in their interactions. Here is an example of a simple game setting up a new `Engine`: engine = pycolab.Engine(rows=24, cols=80) engine.set_backdrop('#+|-* ', my_game.Mansion, time='1 AM', moon='full') engine.add_sprite('C', (22, 77), my_game.Ghost, name='Claudius') engine.add_sprite('E', (19, 61), my_game.Ghost, name='Ebenezer') engine.add_sprite('I', (11, 48), my_game.Ghost, name='Ichabod') engine.add_sprite('!', (23, 18), my_game.Player, hit_points=99) engine.add_drape('~', my_game.MistsAndVapours, breeze=1) first_obs, first_reward, first_discount = engine.its_showtime() The order in which `Sprite` and `Drape` objects are added to the `Engine` determines (partially; read on) the order in which they will be consulted for game board updates: in this case, after the `Backdrop`, which is always consulted first, it's Claudius, Ebenezer, Ichabod, the player, and then a `Drape` that paints spooky mists. This ordering cannot change once it is set. The order of addition is also the initial back-to-front "z-order": the order in which the updates are painted onto the board. Although `Backdrop` updates are always painted first, the rest of the layers can change their z-order at any point in the game (by registering a request with the `Plot` object). This may be useful if you ever want Ichabod to float in front of the spooky mists. Z-order can also be changed at game set-up time via the `set_z_order` method. Once the `Backdrop` and all of the `Sprite`s and `Drape`s are ready, call `its_showtime()` to start the game. This method "locks" the engine (i.e. no new `Sprite`s or `Drape`s can be added) and starts the episode, returning the first observation. Here is a more elaborate game setting up its `Engine`: engine = pycolab.Engine(rows=7, cols=7) engine.set_backdrop(sokoban.Warehouse.CHARACTERS, sokoban.Warehouse) engine.update_group('2. Player') engine.add_sprite('P', (5, 3), sokoban.Player) engine.update_group('1. Boxes') engine.add_sprite('1', (3, 2), sokoban.Box) engine.add_sprite('2', (5, 4), sokoban.Box) engine.add_sprite('3', (2, 5), sokoban.Box) engine.update_group('3. Judge') engine.add_drape('J', sokoban.Judge) first_obs, first_reward, first_discount = engine.its_showtime() The `Engine`'s order for consulting `Sprite`s and `Drape`s for updates is determined first by the sort order of the update group name, then by order of addition. Thus, in this Sokoban implementation, the `Engine` will first consult box sprites 1, 2, and 3 for board updates, then the player sprite, and finally the "Judge". (The Judge in this game happens to be an invisible `Drape` whose `update` method contains the logic that determines whether the player has won the game.) Nevertheless, the consultation order is different from the initial z-order, which starts at the backdrop and proceeds directly in the order in which the `Sprite`s and `Drape`s were `add_*`ed. (This structure could allow a player to crawl under a box in this Sokoban---or perhaps a box to crush a player!) This game has given a name to all of its update groups, which is a good idea whenever you have more than one. The default update group is named `''` (the empty string). And, for one last hyper-technical detail: the `Backdrop` can be thought of as belonging to the very first update group, and will always be the first `Engine` entity to be consulted for an update in that group. If it is desired that all `Sprite`s and `Drape`s be in a separate update group from the backdrop, the best way to accomplish this is probably to establish an update group that precedes all of your game's real `Sprite`s and `Drape`s, and to populate it with an invisible sprite that never does anything. Args: rows: Height of the game board. cols: Width of the game board. occlusion_in_layers: If `True` (the default), game entities or `Backdrop` characters that occupy the same position on the game board will be rendered into the `layers` member of `rendering.Observation`s with "occlusion": only the entity that appears latest in the game's Z-order will have its `layers` entry at that position set to `True`. If `False`, all entities and `Backdrop` characters at that position will have `True` in their `layers` entries there. This flag does not change the rendering of the "flat" `board` member of `Observation`, which always paints game entities on top of each other as dictated by the Z-order. **NOTE: This flag also determines the occlusion behavior in `layers` arguments to all game entities' `update` methods; see docstrings in [things.py] for details.** """ self._rows = rows self._cols = cols self._occlusion_in_layers = occlusion_in_layers self._nb_action = nb_action # This game's Plot object self._the_plot = plot.Plot() # True iff its_showtime() has been called and the game is underway. self._showtime = False # True iff the game has terminated. (It's still "showtime", though.) self._game_over = False # This game's Backdrop object. self._backdrop = None # This game's collection of Sprites and Drapes. The ordering of this dict # is the game's z-order, from back to front. self._sprites_and_drapes = collections.OrderedDict() # The collection of update groups. Before the its_showtime call, this is a # dict keyed by update group name, whose values are lists of Sprites and # Drapes in the update group. After the call, this becomes a dict-like list # of tuples that freezes the ordering implied by the update-group keys. self._update_groups = collections.defaultdict(list) # The current update group---used by add(). Will be set to None once the # game is underway. self._current_update_group = '' # This slot will hold the observation renderer once the game is underway. self._renderer = None # And this slot will hold the last observation rendered by the renderer. # It is not intended that this member be available to the user directly. # Code should not keep local references to this object or its members. self._board = None def set_backdrop(self, characters, backdrop_class, *args, **kwargs): """Add a `Backdrop` to this `Engine`. A `Backdrop` supplies the background scenery to be painted onto the game board using the characters specified in `characters`. It is always first (rearmost) in the z-order and first consulted by the `Engine` for board changes. Args: characters: A collection of ASCII characters that the `Backdrop` is allowed to use. (A string will work as an argument here.) backdrop_class: A subclass of `Backdrop` (including `Backdrop` itself) that will be constructed by this method. *args: Additional positional arguments for the `backdrop_class` constructor. **kwargs: Additional keyword arguments for the `backdrop_class` constructor. Returns: the newly-created `Backdrop`. Raises: RuntimeError: if gameplay has already begun, if `set_backdrop` has already been called for this engine, or if any characters in `characters` has already been claimed by a preceding call to the `add` method. TypeError: if `backdrop_class` is not a `Backdrop` subclass. ValueError: if `characters` are not ASCII characters. """ self._runtime_error_if_called_during_showtime('set_backdrop') return self.set_prefilled_backdrop( characters, np.zeros((self._rows, self._cols), dtype=np.uint8), backdrop_class, *args, **kwargs) def set_prefilled_backdrop( self, characters, prefill, backdrop_class, *args, **kwargs): """Add a `Backdrop` to this `Engine`, with a custom initial pattern. Much the same as `set_backdrop`, this method also allows callers to "prefill" the background with an arbitrary pattern. This method is mainly intended for use by the `ascii_art` tools; most `Backdrop` subclasses should fill their `curtain` on their own in the constructor (or in `update()`). This method does NOT check to make certain that `prefill` contains only ASCII values corresponding to characters in `characters`; your `Backdrop` class should ensure that only valid characters are present in the curtain after the first call to its `update` method returns. Args: characters: A collection of ASCII characters that the `Backdrop` is allowed to use. (A string will work as an argument here.) prefill: 2-D `uint8` numpy array of the same dimensions as this `Engine`. The `Backdrop`'s curtain will be initialised with this pattern. backdrop_class: A subclass of `Backdrop` (including `Backdrop` itself) that will be constructed by this method. *args: Additional positional arguments for the `backdrop_class` constructor. **kwargs: Additional keyword arguments for the `backdrop_class` constructor. Returns: the newly-created `Backdrop`. Raises: RuntimeError: if gameplay has already begun, if `set_backdrop` has already been called for this engine, or if any characters in `characters` has already been claimed by a preceding call to the `add` method. TypeError: if `backdrop_class` is not a `Backdrop` subclass. ValueError: if `characters` are not ASCII characters. """ self._runtime_error_if_called_during_showtime('set_prefilled_backdrop') self._value_error_if_characters_are_bad(characters) self._runtime_error_if_characters_claimed_already(characters) if self._backdrop: raise RuntimeError('A backdrop of type {} has already been supplied to ' 'this Engine.'.format(type(self._backdrop))) if not issubclass(backdrop_class, things.Backdrop): raise TypeError('backdrop_class arguments to Engine.set_backdrop must ' 'either be a Backdrop class or one of its subclasses.') # Construct a new curtain and palette for the Backdrop. curtain = np.zeros((self._rows, self._cols), dtype=np.uint8) palette = Palette(characters) # Fill the curtain with the prefill data. np.copyto(dst=curtain, src=prefill, casting='equiv') # Build and set the Backdrop. self._backdrop = backdrop_class(curtain, palette, *args, **kwargs) return self._backdrop def add_drape(self, character, drape_class, *args, **kwargs): """Add a `Drape` to this `Engine`. A `Drape` supplies masks that the Engine uses to paint the same character to multiple different places on the board. The positions of a particular `Drape` in the painting order (z-order) and the `Engine`'s board change consultation order are determined by order of its addition to the `Engine` and various other factors; see the `Engine` constructor docstring for details. Args: character: The ASCII character that this `Drape` directs the `Engine` to paint on the game board. drape_class: A subclass of `Drape` to be constructed by this method. *args: Additional positional arguments for the `drape_class` constructor. **kwargs: Additional keyword arguments for the `drape_class` constructor. Returns: the newly-created `Drape`. Raises: RuntimeError: if gameplay has already begun, or if any characters in `characters` has already been claimed by a preceding call to the `set_backdrop` or `add` methods. TypeError: if `drape_class` is not a `Drape` subclass. ValueError: if `character` is not a single ASCII character. """ self._runtime_error_if_called_during_showtime('add_drape') return self.add_prefilled_drape( character, np.zeros((self._rows, self._cols), dtype=np.bool_), drape_class, *args, **kwargs) def add_prefilled_drape( self, character, prefill, drape_class, *args, **kwargs): """Add a `Drape` to this `Engine`, with a custom initial mask. Much the same as `add_drape`, this method also allows callers to "prefill" the drape's `curtain` with an arbitrary mask. This method is mainly intended for use by the `ascii_art` tools; most `Drape` subclasses should fill their `curtain` on their own in the constructor (or in `update()`). Args: character: The ASCII character that this `Drape` directs the `Engine` to paint on the game board. prefill: 2-D `bool_` numpy array of the same dimensions as this `Engine`. The `Drape`'s curtain will be initialised with this pattern. drape_class: A subclass of `Drape` to be constructed by this method. *args: Additional positional arguments for the `drape_class` constructor. **kwargs: Additional keyword arguments for the `drape_class` constructor. Returns: the newly-created `Drape`. Raises: RuntimeError: if gameplay has already begun, or if any characters in `characters` has already been claimed by a preceding call to the `set_backdrop` or `add` methods. TypeError: if `drape_class` is not a `Drape` subclass. ValueError: if `character` is not a single ASCII character. """ self._runtime_error_if_called_during_showtime('add_prefilled_drape') self._value_error_if_characters_are_bad(character, mandatory_len=1) self._runtime_error_if_characters_claimed_already(character) if not issubclass(drape_class, things.Drape): raise TypeError('drape_class arguments to Engine.add_drape must be a ' 'subclass of Drape') # Construct a new curtain for the drape. curtain = np.zeros((self._rows, self._cols), dtype=np.bool_) # Fill the curtain with the prefill data. np.copyto(dst=curtain, src=prefill, casting='equiv') # Build and save the drape. drape = drape_class(curtain, character, *args, **kwargs) self._sprites_and_drapes[character] = drape self._update_groups[self._current_update_group].append(drape) return drape def add_sprite(self, character, position, sprite_class, *args, **kwargs): """Add a `Sprite` to this `Engine`. A `Sprite` supplies coordinates that the Engine uses to paint a character to one place on the board. The positions of a particular `Sprite` in the painting order (z-order) and the `Engine`'s board change consultation order are determined by order of its addition to the `Engine` and various other factors; see the `Engine` constructor docstring for details. Args: character: The ASCII character that this `Sprite` directs the `Engine` to paint on the game board. position: A 2-tuple or similar indexable containing the `Sprite`'s initial position on the game board. sprite_class: A subclass of `Sprite` to be constructed by this method. *args: Additional positional arguments for the `sprite_class` constructor. **kwargs: Additional keyword arguments for the `sprite_class` constructor. Returns: the newly-created `Sprite`. Raises: RuntimeError: if gameplay has already begun, or if any characters in `characters` has already been claimed by a preceding call to the `set_backdrop` or `add` methods. TypeError: if `sprite_class` is not a `Sprite` subclass. ValueError: if `character` is not a single ASCII character, or if `position` is not a valid game board coordinate. """ self._runtime_error_if_called_during_showtime('add_sprite') self._value_error_if_characters_are_bad(character, mandatory_len=1) self._runtime_error_if_characters_claimed_already(character) if not issubclass(sprite_class, things.Sprite): raise TypeError('sprite_class arguments to Engine.add_sprite must be a ' 'subclass of Sprite') if (not 0 <= position[0] < self._rows or not 0 <= position[1] < self._cols): raise ValueError('Position {} does not fall inside a {}x{} game board.' ''.format(position, self._rows, self._cols)) # Construct the game board dimensions for the benefit of this sprite. corner = things.Sprite.Position(self._rows, self._cols) # Construct a new position for the sprite. position = things.Sprite.Position(*position) # Build and save the drape. sprite = sprite_class(corner, position, character, *args, **kwargs) self._sprites_and_drapes[character] = sprite self._update_groups[self._current_update_group].append(sprite) return sprite def update_group(self, group_name): """Change the update group for subsequent `add_sprite`/`add_drape` calls. The `Engine` consults `Sprite`s and `Drape`s for board updates in an order determined first by the update group name, then by the order in which the `Sprite` or `Drape` was added to the `Engine`. See the `Engine` constructor docstring for more details. It's fine to return to an update group after leaving it. Args: group_name: name of the new current update group. Raises: RuntimeError: if gameplay has already begun. """ self._runtime_error_if_called_during_showtime('update_group') self._current_update_group = group_name def set_z_order(self, z_order): """Set the z-ordering of all `Sprite`s and `Drape`s in this engine. Specify the complete order in which all `Sprite`s and `Drape`s should have their characters painted onto the game board. This method is available during game set-up only. Args: z_order: an ordered collection of all of the characters corresponding to all `Sprite`s and `Drape`s registered with this `Engine`. Raises: RuntimeError: if gameplay has already begun. ValueError: if the set of characters in `z_order` does not match the set of characters corresponding to all `Sprite`s and `Drape`s registered with this `Engine`. """ self._runtime_error_if_called_during_showtime('set_z_order') if (set(z_order) != set(self._sprites_and_drapes.keys()) or len(z_order) != len(self._sprites_and_drapes)): raise ValueError('The z_order argument {} to Engine.set_z_order is not a ' 'proper permutation of the characters corresponding to ' 'Sprites and Drapes in this game, which are {}.'.format( repr(z_order), self._sprites_and_drapes.keys())) new_sprites_and_drapes = collections.OrderedDict() for character in z_order: new_sprites_and_drapes[character] = self._sprites_and_drapes[character] self._sprites_and_drapes = new_sprites_and_drapes def its_showtime(self): """Finalise `Engine` set-up and compute the first observation of the game. Switches the `Engine` from set-up mode, where more `Sprite`s and `Drape`s can be added, to "play" mode, where gameplay iterates via the `play` method. After this permanent modal switch, no further calls to `add_drape` or `add_sprite` can be made. Once in "play" mode, consults the `Backdrop` and all `Sprite`s and `Drape`s for updates, and uses these to compute the episode's first observation. Returns: A three-tuple with the following members: * A `rendering.Observation` object containing single-array and multi-array feature-map representations of the game board. * An initial reward given to the player (or players) (before it/they even gets/get a chance to play!). This reward can be any type---it all depends on what the `Backdrop`, `Sprite`s, and `Drape`s have communicated to the `Plot`. If none have communicated anything at all, this will be None. * A reinforcement learning discount factor value. By default, it will be 1.0 if the game is still ongoing; if the game has just terminated (before the player got a chance to do anything!), `discount` will be 0.0 unless the game has chosen to supply a non-standard value to the `Plot`'s `terminate_episode` method. Raises: RuntimeError: if this method is called more than once, or if no `Backdrop` class has ever been provided to the Engine. """ self._runtime_error_if_called_during_showtime('its_showtime') # It's showtime! self._showtime = True # Now that all the Sprites and Drapes are known, convert the update groups # to a more efficient structure. self._update_groups = [(key, self._update_groups[key]) for key in sorted(self._update_groups.keys())] # And, I guess we promised to do this: self._current_update_group = None # Construct the game's observation renderer. chars = set(self._sprites_and_drapes.keys()).union(self._backdrop.palette) if self._occlusion_in_layers: self._renderer = rendering.BaseObservationRenderer( self._rows, self._cols, chars) else: self._renderer = rendering.BaseUnoccludedObservationRenderer( self._rows, self._cols, chars) # Render a "pre-initial" board rendering from all of the data in the # Engine's Backdrop, Sprites, and Drapes. This rendering is only used as # input to these entities to collect their updates for the very first frame; # as it accesses data members inside the entities directly, it doesn't # actually run any of their code (unless implementers ignore notes that say # "Final. Do not override."). self._render() # The behaviour of this method is now identical to play() with None actions. return self.play(None) def play(self, actions): """Perform another game iteration, applying player actions. Receives an action (or actions) from the player (or players). Consults the `Backdrop` and all `Sprite`s and `Drape`s for updates in response to those actions, and derives a new observation from them to show the user. Also collects reward(s) for the last action and determines whether the episode has terminated. Args: actions: Actions supplied by the external agent(s) in response to the last board. Could be a scalar, could be an arbitrarily nested structure of... stuff, it's entirely up to the game you're making. When the game begins, however, it is guaranteed to be None. Used for the `update()` method of the `Backdrop` and all `Sprite`s and `Layer`s. Returns: A three-tuple with the following members: * A `rendering.Observation` object containing single-array and multi-array feature-map representations of the game board. * An reward given to the player (or players) for having performed `actions` in response to the last observation. This reward can be any type---it all depends on what the `Backdrop`, `Sprite`s, and `Drape`s have communicated to the `Plot`. If none have communicated anything at all, this will be None. * A reinforcement learning discount factor value. By default, it will be 1.0 if the game is still ongoing; if the game has just terminated (before the player got a chance to do anything!), `discount` will be 0.0 unless the game has chosen to supply a non-standard value to the `Plot`'s `terminate_episode` method. Raises: RuntimeError: if this method has been called before the `Engine` has been finalised via `its_showtime()`, or if this method has been called after the episode has terminated. """ if not self._showtime: raise RuntimeError('play() cannot be called until the Engine is placed ' 'in "play mode" via the its_showtime() method.') if self._game_over: raise RuntimeError('play() was called after the episode handled by this ' 'Engine has terminated.') # Update Backdrop and all Sprites and Drapes. self._update_and_render(actions) # Apply all plot directives that the Backdrop, Sprites, and Drapes have # submitted to the Plot during the update. reward, discount, should_rerender = self._apply_and_clear_plot() # If directives in the Plot changed our state in any way that would change # the appearance of the observation (e.g. changing the z-order), we'll have # to re-render it before we return it. if should_rerender: self._render() # Return first-frame rendering to the user. return self._board, reward, discount @property def the_plot(self): return self._the_plot @property def nb_action(self): return self._nb_action @property def rows(self): return self._rows @property def cols(self): return self._cols @property def game_over(self): return self._game_over @property def z_order(self): """Obtain a copy of the game's current z-order.""" return list(self._sprites_and_drapes.keys()) ### Abstraction breakers ### @property def backdrop(self): """Obtain the `Engine`'s `Backdrop`. Most pycolab applications don't need to access individual game entities, so using this accessor may signal that your design challenges some abstraction conventions. The canonical way to communicate with entities, for example, is through messages in the Plot. Still, the final choice is yours. We recommend you limit yourself to read-only interactions with the returned `Backdrop`. Returns: The `Engine`'s `Backdrop` object. """ return self._backdrop @property def things(self): """Obtain the `Engine`'s `Sprite`s and `Drape`s. Most pycolab applications don't need to access individual game entities, so using this accessor may signal that your design challenges some abstraction conventions. The canonical way to communicate with entities, for example, is through messages in the Plot. Still, the final choice is yours. We recommend you limit yourself to read-only interactions with the returned `Sprite`s and `Drape`s. Returns: A dict mapping ASCII characters to the `Sprite` and `Drape` entities that paint those characters onto the game board. """ return {k: t for k, t in six.iteritems(self._sprites_and_drapes)} ### Private helpers ### def _update_and_render(self, actions): """Perform all game entity updates and render the next observation. This private method is the heart of the `Engine`: as dictated by the update order, it consults the `Backdrop` and all `Sprite`s and `Layer`s for updates, then renders the game board (`self._board`) based on those updates. Args: actions: Actions supplied by the external agent(s) in response to the last board. Could be a scalar, could be an arbitrarily nested structure of... stuff, it's entirely up to the game you're making. When the game begins, however, it is guaranteed to be None. Used for the `update()` method of the `Backdrop` and all `Sprite`s and `Layer`s. """ assert self._board, ( '_update_and_render() called without a prior rendering of the board') # A new frame begins! self._the_plot.frame += 1 # We start with the backdrop; it doesn't really belong to an update group, # or it belongs to the first update group, depending on how you look at it. self._the_plot.update_group = None self._backdrop.update(actions, self._board.board, self._board.layers, self._sprites_and_drapes, self._the_plot) # Now we proceed through each of the update groups in the prescribed order. for update_group, entities in self._update_groups: # First, consult each item in this update group for updates. self._the_plot.update_group = update_group for entity in entities: entity.update(actions, self._board.board, self._board.layers, self._backdrop, self._sprites_and_drapes, self._the_plot) # Next, repaint the board to reflect the updates from this update group. self._render() def _render(self): """Render a new game board. Computes a new rendering of the game board, and assigns it to `self._board`, based on the current contents of the `Backdrop` and all `Sprite`s and `Drape`s. Uses properties of those objects to obtain those contents; no computation should be done on their part. Each object is "painted" on the board in a prescribed order: the `Backdrop` first, then the `Sprite`s and `Drape`s according to the z-order (the order in which they appear in `self._sprites_and_drapes` """ self._renderer.clear() self._renderer.paint_all_of(self._backdrop.curtain) for character, entity in six.iteritems(self._sprites_and_drapes): # By now we should have checked fairly carefully that all entities in # _sprites_and_drapes are Sprites or Drapes. if isinstance(entity, things.Sprite) and entity.visible: self._renderer.paint_sprite(character, entity.position) elif isinstance(entity, things.Drape): self._renderer.paint_drape(character, entity.curtain) # Done with all the layers; render the board! self._board = self._renderer.render() def _apply_and_clear_plot(self): """Apply directives to this `Engine` found in its `Plot` object. These directives are requests from the `Backdrop` and all `Drape`s and `Sprite`s for the engine to alter its global state or its interaction with the player (or players). They include requests to alter the z-order, terminate the game, or report some kind of reward. For more information on these directives, refer to `Plot` object documentation. After collecting and applying these directives to the `Engine`s state, all are cleared in preparation for the next game iteration. Returns: A 2-tuple with the following elements: * A reward value summed over all of the rewards that the `Backdrop` and all `Drape`s and `Sprite`s requested be reported to the player (or players), or None if nobody specified a reward. Otherwise, this reward can be any type; it all depends on what the `Backdrop`, `Drape`s, and `Sprite`s have provided. * A boolean value indicating whether the `Engine` should re-render the observation before supplying it to the user. This is necessary if any of the Plot directives change the `Engine`'s state in ways that would change the appearance of the observation, like changing the z-order. Raises: RuntimeError: a z-order change directive in the Plot refers to a `Sprite` or `Drape` that does not exist. """ directives = self._the_plot._get_engine_directives() # pylint: disable=protected-access # So far, there's no reason to re-render the observation. should_rerender = False # We don't expect to have too many z-order changes, so this slow, simple # algorithm will probably do the trick. for move_this, in_front_of_that in directives.z_updates: # We have a z-order change, so re-rendering is necessary. should_rerender = True # Make sure that the characters in the z-order change directive correspond # to actual `Sprite`s and `Drape`s. if move_this not in self._sprites_and_drapes: raise RuntimeError( 'A z-order change directive said to move a Sprite or Drape ' 'corresponding to character {}, but no such Sprite or Drape ' 'exists'.format(repr(move_this))) if in_front_of_that is not None: if in_front_of_that not in self._sprites_and_drapes: raise RuntimeError( 'A z-order change directive said to move a Sprite or Drape in ' 'front of a Sprite or Drape corresponding to character {}, but ' 'no such Sprite or Drape exists'.format(repr(in_front_of_that))) # Each directive means replacing the entire self._sprites_and_drapes dict. new_sprites_and_drapes = collections.OrderedDict() # Retrieve the Sprite or Drape that we are directed to move. moving_sprite_or_drape = self._sprites_and_drapes[move_this] # This special case handles circumstances where a Sprite or Drape is moved # all the way to the back of the z-order. if in_front_of_that is None: new_sprites_and_drapes[move_this] = moving_sprite_or_drape # Copy all Sprites or Drapes into the new sprites_and_drapes OrderedDict, # inserting the moving entity in front of the one it's meant to occulude. for character, entity in six.iteritems(self._sprites_and_drapes): if character == move_this: continue new_sprites_and_drapes[character] = entity if character == in_front_of_that: new_sprites_and_drapes[move_this] = moving_sprite_or_drape # Install the OrderedDict just made as the new z-order and catalogue # of Sprites and Drapes. self._sprites_and_drapes = new_sprites_and_drapes # The Backdrop or one of the Sprites or Drapes may have directed the game # to end. Update our game-over flag. self._game_over = directives.game_over # Collect the sum of all rewards from this latest game iteration, in # preparation to return it to the player. reward = directives.summed_reward # Get the discount value from the latest game iteration. discount = directives.discount # Reset the Plot for the next game iteration, should there be one. self._the_plot._clear_engine_directives() # pylint: disable=protected-access return reward, discount, should_rerender ### Helpers for error detection ### def _runtime_error_if_called_during_showtime(self, method_name): if self._showtime: raise RuntimeError('{} should not be called after its_showtime() ' 'has been called'.format(method_name)) def _runtime_error_if_characters_claimed_already(self, characters): for char in characters: if self._backdrop and char in self._backdrop.palette: raise RuntimeError('Character {} is already being used by ' 'the backdrop'.format(repr(char))) if char in self._sprites_and_drapes: raise RuntimeError('Character {} is already being used by a sprite ' 'or a drape'.format(repr(char))) def _value_error_if_characters_are_bad(self, characters, mandatory_len=None): if mandatory_len is not None and len(characters) != mandatory_len: raise ValueError( '{}, a string of length {}, was used where a string of length {} was ' 'required'.format(repr(characters), len(characters), mandatory_len)) for char in characters: try: # This test won't catch all non-ASCII characters; if ord(char) # someone uses a unicode string, it'll pass. But that's except TypeError: # hard to do by accident. raise ValueError('Character {} is not an ASCII character'.format(char)) class Palette(object): """A helper class for turning human-readable characters into numerical values. Classes like `Backdrop` need to assign certain `uint8` values to cells in the game board. Since these values are typically printable ASCII characters, this assignment can be both cumbersome (e.g. `board[r][c] = ord('j')`) and error- prone (what if 'j' isn't a valid value for the Backdrop to use?). A `Palette` object (which you can give a very short name, like `p`) is programmed with all of the valid characters for your Backdrop. Those that are valid Python variable names become attributes of the object, whose access yields the corresponding ASCII ordinal value (e.g. `p.j == 106`). Characters that are not legal Python names, like `#`, can be converted through lookup notation (e.g. `p['#'] == 35`). However, any character that was NOT programmed into the `Palette` object yields an `AttributeError` or and `IndexError` respectively. Finally, this class also supports a wide range of aliases for characters that are not valid variable names. There is a decent chance that the name you give to a symbolic character is there; for example, `p.hash == p['#'] == 35`. If it's not there, consider adding it... """ _ALIASES = dict( backtick='`', backquote='`', grave='`', tilde='~', zero='0', one='1', two='2', three='3', four='4', five='5', six='6', seven='7', eight='8', nine='9', bang='!', exclamation='!', exclamation_point='!', exclamation_pt='!', at='@', # regrettably, £ is not ASCII. hash='#', hashtag='#', octothorpe='#', number_sign='#', pigpen='#', pound='#', dollar='$', dollar_sign='$', buck='$', mammon='$', percent='%', percent_sign='%', food='%', carat='^', circumflex='^', trap='^', and_sign='&', ampersand='&', asterisk='*', star='*', splat='*', lbracket='(', left_bracket='(', lparen='(', left_paren='(', rbracket=')', right_bracket=')', rparen=')', right_paren=')', dash='-', hyphen='-', underscore='_', plus='+', add='+', equal='=', equals='=', lsquare='[', left_square_bracket='[', rsquare=']', right_square_bracket=']', lbrace='{', lcurly='{', left_brace='{', left_curly='{', left_curly_brace='{', rbrace='}', rcurly='}', right_brace='}', right_curly='}', right_curly_brace='}', pipe='|', bar='|', backslash='\\', back_slash='\\', reverse_solidus='\\', semicolon=';', colon=':', tick='\'', quote='\'', inverted_comma='\'', prime='\'', quotes='"', double_inverted_commas='"', quotation_mark='"', zed='z', comma=',', less_than='<', langle='<', left_angle='<', left_angle_bracket='<', period='.', full_stop='.', greater_than='>', rangle='>', right_angle='>', right_angle_bracket='>', question='?', question_mark='?', slash='/', solidus='/', ) def __init__(self, legal_characters): """Construct a new `Palette` object. Args: legal_characters: An iterable of characters that users of this `Palette` are allowed to use. (A string like "#.o " will work.) Raises: ValueError: a value inside `legal_characters` is not a single character. """ for char in legal_characters: if len(char) != 1: raise ValueError('Palette constructor requires legal characters to be ' 'actual single charaters. "{}" is not.'.format(char)) self._legal_characters = set(legal_characters) def __deepcopy__(self, memodict={}): return self def __getattr__(self, name): return self._actual_lookup(name, AttributeError) def __getitem__(self, key): return self._actual_lookup(key, IndexError) def __contains__(self, key): # It is intentional, but probably not so important (as long as there are no # single-character aliases) that we do not do an aliases lookup for key. return key in self._legal_characters def __iter__(self): return iter(self._legal_characters) def _actual_lookup(self, key, error): """Helper: perform character validation and conversion to numeric value.""" if key in self._ALIASES: key = self._ALIASES[key] if key in self._legal_characters: return ord(key) raise error( '{} is not a legal character in this Palette; legal characters ' 'are {}.'.format(key, list(self._legal_characters)))
the-stack_106_17047
# Copyright (c) 2019 Nitin Agarwal ([email protected]) from __future__ import print_function import numpy as np import os import sys import scipy.sparse import torch import torch.utils.data as data sys.path.append('./utils') from pc_utils import * class getDataset(data.Dataset): def __init__(self, root, train=True, data_augment=True, small=False, category = ['abc_2.5k']): self.root = root self.train = train self.data_augment = data_augment self.small = small # test on a small dataset shape_paths = [] # path of all mesh files for shape_class in category: if self.train: if self.small: self.file = os.path.join(self.root, shape_class, 'train.txt') else: self.file = os.path.join(self.root, shape_class, 'train_full.txt') else: if self.small: self.file = os.path.join(self.root, shape_class, 'test.txt') else: self.file = os.path.join(self.root, shape_class, 'test_full.txt') with open(self.file) as f: for line in f: shape_paths.append(os.path.join(self.root, shape_class, line.strip())) self.datapath=[] if self.data_augment: """ data augment by scaling and rotation""" for line in shape_paths: mesh_path = line mesh={} mesh["rotate"] = False mesh["scale"] = True mesh["path"] = mesh_path self.datapath.append(mesh) mesh={} mesh["rotate"] = True mesh["scale"] = False mesh["path"] = mesh_path self.datapath.append(mesh) mesh={} mesh["rotate"] = True mesh["scale"] = True mesh["path"] = mesh_path self.datapath.append(mesh) for line in shape_paths: mesh = {} mesh_path = line mesh["rotate"]=False mesh["scale"] = False mesh["path"] = mesh_path self.datapath.append(mesh) def __getitem__(self, index): fn = self.datapath[index] if fn["path"].endswith('obj'): vertices, faces = load_obj_data(fn["path"]) else: vertices, faces = load_ply_data(fn["path"]) # vertices = uniform_sampling(vertices, faces, 2500) if fn["scale"]: vertices = scale_vertices(vertices) if fn["rotate"]: vertices = rotate_vertices(vertices) vertices = normalize_shape(vertices) Q = compute_Q_matrix(vertices, faces) adj = get_adjacency_matrix(vertices, faces, K_max=271) face_coords = get_face_coordinates(vertices, faces, K_max=271) normal = compute_vertex_normals(vertices, faces) # vertices = farthest_point_sample(vertices, 2500) vertices = self.convert_to_tensor(vertices) Q = self.convert_to_tensor(Q) Q = Q.view(vertices.size()[0], -1) adj = self.convert_to_tensor(adj) normal = self.convert_to_tensor(normal) face_coords = self.convert_to_tensor(face_coords) return vertices, Q, adj, normal, face_coords def convert_to_tensor(self, x): x = torch.from_numpy(x.astype(np.float32)) return x def __len__(self): return len(self.datapath) if __name__ == "__main__": path = '../data' obj = getDataset(root = path, train=False, data_augment=False, small=False, category=['abc_2.5k']) testdataloader = torch.utils.data.DataLoader(obj, batch_size = 1, shuffle=False, num_workers=4) print(len(obj)) for i, data in enumerate(testdataloader, 0): v, q, adj, normal, f = data print(v.size(), q.size(), adj.size(), normal.size(), f.size())
the-stack_106_17049
# Copyright 2020. ThingsBoard # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from thingsboard_gateway.connectors.converter import log from thingsboard_gateway.connectors.odbc.odbc_converter import OdbcConverter class OdbcUplinkConverter(OdbcConverter): def convert(self, config, data): if isinstance(config, str) and config == "*": return data converted_data = {} for config_item in config: try: if isinstance(config_item, str): converted_data[config_item] = data[config_item] elif isinstance(config_item, dict): if "nameExpression" in config_item: name = eval(config_item["nameExpression"], globals(), data) else: name = config_item["name"] if "column" in config_item: converted_data[name] = data[config_item["column"]] elif "value" in config_item: converted_data[name] = eval(config_item["value"], globals(), data) else: log.error("Failed to convert SQL data to TB format: no column/value configuration item") else: log.error("Failed to convert SQL data to TB format: unexpected configuration type '%s'", type(config_item)) except Exception as e: log.error("Failed to convert SQL data to TB format: %s", str(e)) return converted_data
the-stack_106_17054
""" Model Implementation """ import tensorflow as tf from tensorflow.keras import layers from tensorflow import keras class Attention(keras.Model): def __init__(self, input_dim, var_scope, reuse=tf.AUTO_REUSE): super(Attention, self).__init__() self.input_dim = input_dim with tf.variable_scope(var_scope, reuse=reuse): self.attention_w = layers.Dense(self.input_dim, name='W') self.attention_u = layers.Dense(self.input_dim, name='U') self.attention_v = layers.Dense(1, name='V') def call(self, input_x, prev_state_tuple): """ Compute the attention weight for input series hidden_state, cell_state (batch_size, hidden_dim) input_x (batch_size, num_series, input_dim), input_dim = num_steps for input attention """ prev_hidden_state, prev_cell_state = prev_state_tuple # (batch_size, 1, hidden_dim * 2) concat_state = tf.expand_dims(tf.concat([prev_hidden_state, prev_cell_state], axis=-1), axis=1) # (batch_size, num_series, input_dim) score_ = self.attention_w(concat_state) + self.attention_u(input_x) # (batch_size, num_series, 1) # Equation (8) score = self.attention_v(tf.nn.tanh(score_)) # (batch_size, num_series) # Equation (9) weight = tf.squeeze(tf.nn.softmax(score, axis=1), axis=-1) return weight class LSTMCell(keras.Model): def __init__(self, hidden_dim): super(LSTMCell, self).__init__() self.hidden_dim = hidden_dim self.layer_fc = layers.Dense(self.hidden_dim) def call(self, input_x, prev_state_tuple): """ Return next step's hidden state and cell state """ hidden_state, cell_state = prev_state_tuple # (batch_size, hidden_dim + input_dim) concat_input = tf.concat([hidden_state, input_x], axis=-1) # (batch_size * 4, hidden_dim + input_dim) concat_input_tiled = tf.tile(concat_input, [4, 1]) # Equation (3) - (6) without activation forget_, input_, output_, cell_bar = tf.split(self.layer_fc(concat_input_tiled), axis=0, num_or_size_splits=4) # (batch_size, hidden_dim) # Equation (6) cell_state = tf.nn.sigmoid(forget_) * cell_state + \ tf.nn.sigmoid(input_) * tf.nn.tanh(cell_bar) # Equation (7) hidden_state = tf.nn.sigmoid(output_) * tf.nn.tanh(cell_state) return (hidden_state, cell_state) class Encoder(keras.Model): def __init__(self, encoder_dim, num_steps): super(Encoder, self).__init__() self.encoder_dim = encoder_dim self.attention_layer = Attention(num_steps, var_scope='input_attention') self.lstm_cell = LSTMCell(encoder_dim) def call(self, inputs): """ inputs: (batch_size, num_steps, num_series) """ def one_step(prev_state_tuple, current_input): """ Move along the time axis by one step """ # (batch_size, num_series, num_steps) inputs_scan = tf.transpose(inputs, perm=[0, 2, 1]) # (batch_size, num_series) weight = self.attention_layer(inputs_scan, prev_state_tuple) weighted_current_input = weight * current_input return self.lstm_cell(weighted_current_input, prev_state_tuple) # Get the batch size from inputs self.batch_size = tf.shape(inputs)[0] self.num_steps = inputs.get_shape().as_list()[1] self.init_hidden_state = tf.random_normal([self.batch_size, self.encoder_dim]) self.init_cell_state = tf.random_normal([self.batch_size, self.encoder_dim]) # (num_steps, batch_size, num_series) inputs_ = tf.transpose(inputs, perm=[1, 0, 2]) # use scan to run over all time steps state_tuple = tf.scan(one_step, elems=inputs_, initializer=(self.init_hidden_state, self.init_cell_state)) # (batch_size, num_steps, encoder_dim) all_hidden_state = tf.transpose(state_tuple[0], perm=[1, 0, 2]) return all_hidden_state class Decoder(keras.Model): def __init__(self, decoder_dim, num_steps): super(Decoder, self).__init__() self.decoder_dim = decoder_dim self.attention_layer = Attention(num_steps, var_scope='temporal_attention') self.lstm_cell = LSTMCell(decoder_dim) self.layer_fc_context = layers.Dense(1) self.layer_prediction_fc_1 = layers.Dense(decoder_dim) self.layer_prediction_fc_2 = layers.Dense(1) def call(self, encoder_states, labels): """ encoder_states: (batch_size, num_steps, encoder_dim) labels: (batch_size, num_steps) """ def one_step(accumulator, current_label): """ Move along the time axis by one step """ prev_state_tuple, context = accumulator # (batch_size, num_steps) # Equation (12) (13) weight = self.attention_layer(encoder_states, prev_state_tuple) # Equation (14) # (batch_size, encoder_dim) context = tf.reduce_sum(tf.expand_dims(weight, axis=-1) * encoder_states, axis=1) # Equation (15) # (batch_size, 1) y_tilde = self.layer_fc_context(tf.concat([current_label, context], axis=-1)) # Equation (16) return self.lstm_cell(y_tilde, prev_state_tuple), context # Get the batch size from inputs self.batch_size = tf.shape(encoder_states)[0] self.num_steps = encoder_states.get_shape().as_list()[1] self.encoder_dim = encoder_states.get_shape().as_list()[-1] init_hidden_state = tf.random_normal([self.batch_size, self.decoder_dim]) init_cell_state = tf.random_normal([self.batch_size, self.decoder_dim]) init_context = tf.random_normal([self.batch_size, self.encoder_dim]) # (num_steps, batch_size, num_series) inputs_ = tf.transpose(encoder_states, perm=[1, 0, 2]) # use scan to run over all time steps state_tuple, all_context = tf.scan(one_step, elems=inputs_, initializer=((init_hidden_state, init_cell_state), init_context)) # (batch_size, num_steps, decoder_dim) all_hidden_state = tf.transpose(state_tuple[0], perm=[1, 0, 2]) # (batch_size, num_steps, encoder_dim) all_context = tf.transpose(all_context, perm=[1, 0, 2]) last_hidden_state = all_hidden_state[:, -1, :] last_context = all_context[:, -1, :] # (batch_size, 1) # Equation (22) pred_ = self.layer_prediction_fc_1(tf.concat([last_hidden_state, last_context], axis=-1)) pred = self.layer_prediction_fc_2(pred_) return pred
the-stack_106_17055
import logging def linear_system(a, b, c, d, e, f): """ Solves linear system given by coeffs: { a x + b y = c { d x + e y = f Returns (x, y) if there is a single solution, otherwise None. """ if a == 0: if b == 0: return None y, x = linear_system(b,a,c, e,d,f) return x, y a = float(a) if abs(e - d * b / a) < 1e-6: return None y = (f - d * c / a) / (e - d * b / a) x = c / a - b * y / a return x, y class SimplexLinearProgram(object): """ Solves max linear programming problem. """ def __init__(self, fitness, constraints): """ Fitness is a list of coeffs of fitness functions: Ax + By + Cz + ... -> (A, B, C, ...) Number of fitness coeffs should be equal to the number of variables. Constraints is the list of coeffs in constraining inequalities: Px + Qy + Rz + ... <= S -> (P, Q, R, ..., S) Number of coeffs should equal to the number of variables (use 0 for missing ones) + limiting value. """ self.fitness = fitness self.constraints = constraints @staticmethod def basis_line(index, size): return [0] * index + [1] + [0] * (size - 1 - index) def solve(self): """ Calculates and returns tuple of the max plan. """ logging.debug("fitness", self.fitness) logging.debug("constraints", self.constraints) value_count = len(self.fitness) basis_size = len(self.constraints) table = [line[:-1] + self.basis_line(index, basis_size) for index, line in enumerate(self.constraints)] logging.debug("table", table) plan = [line[-1] for line in self.constraints] logging.debug("plan", plan) index = [-x for x in self.fitness] + [0] * basis_size logging.debug("index", index) safe = 1000 mapping = list(range(value_count, value_count + basis_size)) while safe and any(x < 0 for x in index): safe -= 1 logging.debug("------ iteration -------") logging.debug("mapping", mapping) minimal_index = index.index(min(index)) logging.debug("leader column", minimal_index) leaders = [(b/x if x else -1) for b,x in zip(plan, [line[minimal_index] for line in table])] logging.debug("leaders", leaders) leader_line = leaders.index(min(l for l in leaders if l > -1)) logging.debug("leader line", leader_line) solver_element = table[leader_line][minimal_index] logging.debug("solver element", solver_element) new_plan = [x - (plan[leader_line] * line[minimal_index]) / solver_element for x, line in zip(plan, table)] new_plan[leader_line] = plan[leader_line] / solver_element logging.debug("new plan", new_plan) mapping[leader_line] = minimal_index logging.debug("new mapping", mapping) new_index = [x - (index[minimal_index] * t) / solver_element for x, t in zip(index, table[leader_line])] logging.debug("new index", new_index) new_table = [ [ x - (table[row_index][minimal_index] * table[leader_line][col_index]) / solver_element for col_index, x in enumerate(line) ] for row_index, line in enumerate(table) ] new_table[leader_line] = [x/solver_element for x in table[leader_line]] logging.debug("new table", new_table) plan = new_plan index = new_index table = new_table optimal = dict((i, value) for i, value in zip(mapping, plan) if i < value_count) logging.debug("optimal", optimal) values = [(optimal[pos] if pos in optimal else 0) for pos in range(value_count)] logging.debug("values", values) return values
the-stack_106_17057
from __future__ import annotations import toolcli import tooltime import toolstr from ctc.protocols.fei_utils import fei_psms from ctc import rpc from ctc import spec def get_command_spec() -> toolcli.CommandSpec: return { 'f': async_psms_command, 'help': 'display recent FEI redemptions', 'args': [ { 'name': '--time', 'help': 'span of time to display', 'default': '24h', }, { 'name': '--token', 'help': 'filter by redemptions of a particular token', }, { 'name': '--block', 'help': 'block', 'default': 'latest', }, { 'name': '--limit', 'help': 'limit number of mints/redeems shown', 'type': int, 'default': 15, }, ], } async def async_psms_command( time: tooltime.Timelength, token: str, block: spec.BlockNumberReference, limit: int, ) -> None: import asyncio mints_task = asyncio.create_task( fei_psms.async_get_fei_psm_mints(start_block=14700000) ) redeems_task = asyncio.create_task( fei_psms.async_get_fei_psm_redemptions(start_block=14700000) ) await async_print_psm_state(block=block) # print recent redemptions print() print() mints = await mints_task fei_psms.print_fei_psm_mints(mints, limit=limit) print() print() redemptions = await redeems_task fei_psms.print_fei_psm_redemptions(redemptions, limit=limit) async def async_print_psm_state(block: spec.BlockNumberReference) -> None: import asyncio psms = fei_psms.get_psms() mint_paused_coroutines = [ rpc.async_eth_call( to_address=psm_address, function_name='paused', block_number=block, ) for psm_name, psm_address in psms.items() ] redeem_paused_coroutines = [ rpc.async_eth_call( to_address=psm_address, function_name='redeemPaused', block_number=block, ) for psm_name, psm_address in psms.items() ] mint_paused_tasks = [ asyncio.create_task(mint_coroutine) for mint_coroutine in mint_paused_coroutines ] redeem_paused_tasks = [ asyncio.create_task(redeem_coroutine) for redeem_coroutine in redeem_paused_coroutines ] mint_paused = await asyncio.gather(*mint_paused_tasks) redeem_paused = await asyncio.gather(*redeem_paused_tasks) # print PSM state toolstr.print_text_box('FEI PSMs') print('(block = ' + str(block) + ')') print() labels = [ 'PSM', 'minting', 'redeeming', 'address', ] rows = [] for p, (psm_name, psm_address) in enumerate(psms.items()): row = [ ' '.join(psm_name.split(' ')[:-1]), str(not bool(mint_paused[p])), str(not bool(redeem_paused[p])), psm_address, ] rows.append(row) toolstr.print_table(rows=rows, labels=labels)
the-stack_106_17058
from __future__ import division import numpy as np import matplotlib.pyplot as plt from math import cos,sin import pinocchio from pinocchio.rpy import rpyToMatrix from mpl_toolkits.mplot3d import Axes3D data = np.transpose(np.loadtxt("data/nmpc_traj_offline.csv")) # # time_interp = np.linspace(0,100, len(traj[0])) # # plt.plot(time_interp, traj[0]) # # plt.plot(time_interp, traj[3]) # # plt.plot(time_com, com[0]) # # plt.plot(time_com, com[1]) # # plt.show() # # plt.plot(time_interp, traj[14]) # # plt.plot(time_foot, footL[2]) # # plt.show() com_file = open("data/com.dat", "w") am_file = open("data/am.dat", "w") phase_file = open("data/phases.dat", "w") footR_file = open("data/rightFoot.dat", "w") footL_file = open("data/leftFoot.dat", "w") for i in range (len(data[0])): com_file.write(str(data[0][i]) + " " + str(data[3][i]) + " 0.892675352 " +\ str(data[1][i]) + " " + str(data[4][i])+ " 0.0 " +\ str(data[2][i]) + " " + str(data[5][i])+ " 0.0\n") am_file.write(str(data[6][i]) + " " + str(data[7][i])+ " " + str(data[8][i]) + "\n") rotR = rpyToMatrix(0,0,data[15][i]) footR_file.write(str(data[12][i]) + " " + str(data[13][i])+ " " +\ str(data[14][i]) + " " + str(rotR[0][0]) + " " +\ str(rotR[0][1]) + " " + str(rotR[0][2]) + " " + str(rotR[1][0]) +\ " " + str(rotR[1][1]) + " " + str(rotR[1][2])+ " " +\ str(rotR[2][0]) + " " + str(rotR[2][1])+ " " + str(rotR[2][2])+"\n") rotL = rpyToMatrix(0,0,data[19][i]) footL_file.write(str(data[16][i]) + " " + str(data[17][i])+ " " +\ str(data[18][i]) + " " + str(rotL[0][0]) + " " +\ str(rotL[0][1]) + " " + str(rotL[0][2]) + " " + str(rotL[1][0]) +\ " " + str(rotL[1][1]) + " " + str(rotL[1][2])+ " " +\ str(rotL[2][0]) + " " + str(rotL[2][1])+ " " + str(rotL[2][2])+"\n") if data[14][i] == 0.105 and data[18][i] == 0.105: # DS Phase # print(0) phase_file.write("0\n") elif data[14][i] != 0.105: # SS Phase : Left = Support foot # print(1) phase_file.write("1\n") else : # SS Phase : Right = Support foot # print(-1) phase_file.write("-1\n") com_file.close() am_file.close() phase_file.close() footR_file.close() footL_file.close() com = np.transpose(np.loadtxt("data/com.dat")) footR = np.transpose(np.loadtxt("data/rightFoot.dat")) footL = np.transpose(np.loadtxt("data/leftFoot.dat")) phase = np.transpose(np.loadtxt("data/phases.dat")) am = np.transpose(np.loadtxt("data/am.dat")) plt.plot(com[0],com[1],color = 'blue',label='CoM') plt.plot(footL[0],footL[1],color = 'green',label='Left Foot') plt.plot(footR[0],footR[1],color = 'red',label='Right Foot') legend = plt.legend() plt.show() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot3D(com[0],com[1],com[2],color = 'blue',label='CoM') ax.plot3D(footL[0],footL[1],footL[2],color = 'green',label='Left Foot') ax.plot3D(footR[0],footR[1],footR[2],color = 'red',label='Right Foot') legend = plt.legend() plt.show() # time_com = np.linspace(0,100,len(com[0])) # time_foot = np.linspace(0,100,len(footR[0])) # plt.plot(time_foot,footR[2]) # plt.plot(time_foot,footL[2]) # plt.plot(time_foot,phase) # plt.show() # plt.plot(time_foot,footL[0]) # plt.plot(time_foot,footR[0]) # plt.plot(time_foot,footL[1]) # plt.plot(time_foot,footR[1]) # plt.show() # plt.plot(time_foot,np.arccos(footR[3])) # plt.plot(time_foot,np.arccos(footL[3])) # plt.show() # time_reduced = np.arange(0,len(footR[0]),100) # arrow_len = 0.1 # fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') # ax.plot3D(footL[0],footL[1],footL[2],color = 'green',label='Left Foot') # ax.plot3D(footR[0],footR[1],footR[2],color = 'red',label='Right Foot') # legend = plt.legend() # ax.quiver(np.array(footR[0])[time_reduced],np.array(footR[1])[time_reduced],\ # np.array(footR[2])[time_reduced],np.array(footR[3])[time_reduced],\ # np.array(footR[6])[time_reduced],0, length=arrow_len, lw = 2) # ax.quiver(np.array(footL[0])[time_reduced],np.array(footL[1])[time_reduced],\ # np.array(footL[2])[time_reduced],np.array(footL[3])[time_reduced],\ # np.array(footL[6])[time_reduced],0, length=arrow_len, lw = 2) # ax.quiver(np.array(com[0])[time_reduced],np.array(com[1])[time_reduced],\ # np.array(com[2])[time_reduced],np.array(np.cos(am[0]))[time_reduced],\ # np.array(np.sin(am[0]))[time_reduced],0, length=arrow_len, lw = 2) # plt.show()
the-stack_106_17060
import base64 import json from typing import Callable, Dict, Mapping import uuid from cryptography.fernet import Fernet from starlette.datastructures import MutableHeaders, Secret from starlette.requests import HTTPConnection from starlette.types import ASGIApp, Message, Receive, Scope, Send class SessionMiddleware: """Based on Starlette SessionMiddleware. https://github.com/encode/starlette/blob/0.13.2/starlette/middleware/sessions.py Updated to store session id in cookie, and keep session data elsewhere. Usage: app.add_middleware(SessionMiddleware, **params) Parameters ---------- app: the ASGI application delete_session_callback(session_id): callback to delete stored session data. get_session_callback(session_id): callback to get stored session data. save_session_callback(session_id): callback to update stored session data. encryption: encrypt session data before storage if provided session_cookie: name of session cookie path: path for session cookie max_age: how long session cookies last same_site: cookie same site policy https_only: whether to require https for cookies """ def __init__( self, app: ASGIApp, delete_session_callback: Callable[[str], None], get_session_callback: Callable[[str], str], save_session_callback: Callable[[str, str], None], encryption: Fernet = None, session_cookie: str = "session", path: str = "/", max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds same_site: str = "lax", https_only: bool = False, ) -> None: self.app = app self.encryption = encryption self.delete_session_callback = delete_session_callback self.get_session_callback = get_session_callback self.save_session_callback = save_session_callback self.session_cookie = session_cookie self.path = path self.max_age = max_age self.security_flags = "httponly; samesite=" + same_site if https_only: # Secure flag can be used with HTTPS only self.security_flags += "; secure" async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] not in ("http", "websocket"): # pragma: no cover await self.app(scope, receive, send) return connection = HTTPConnection(scope) initial_session_was_empty = True session_id = None if self.session_cookie in connection.cookies: session_id = connection.cookies[self.session_cookie] try: scope["session"] = await self.get_session(session_id) initial_session_was_empty = False except Exception: scope["session"] = {} else: scope["session"] = {} async def send_wrapper(message: Message) -> None: nonlocal session_id if message["type"] == "http.response.start": if scope["session"]: session_id = session_id or uuid.uuid4().hex # Persist session await self.save_session(session_id, scope["session"]) self.set_cookie(message=message, value=session_id) elif not initial_session_was_empty: # Clear session await self.delete_session(session_id) self.set_cookie(message=message, value="null", max_age=-1) await send(message) await self.app(scope, receive, send_wrapper) async def delete_session(self, session_id: str): await self.delete_session_callback(session_id) async def get_session(self, session_id: str) -> Dict: data = await self.get_session_callback(session_id) if self.encryption: data = self.encryption.decrypt(data.encode("utf8")) return json.loads(data) async def save_session(self, session_id: str, data: Mapping): data = json.dumps(data) if self.encryption: data = self.encryption.encrypt(data.encode("utf8")).decode("utf8") await self.save_session_callback(session_id, data) def set_cookie( self, message: Message, value: str, max_age: int = None, ): headers = MutableHeaders(scope=message) headers.append("Cache-Control", "no-cache") headers.append( "Set-Cookie", f"{self.session_cookie}={value};" f" path={self.path};" f" Max-Age={max_age or self.max_age};" f" {self.security_flags}", )
the-stack_106_17061
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Functions to perform input/output operations.""" import sys import os import glob import copy import re from collections.abc import Iterable import importlib import warnings import pickle import os.path import numpy as np try: import netCDF4 as nc HEN_FILE_EXTENSION = ".nc" HAS_NETCDF = True except ImportError: msg = "Warning! NetCDF is not available. Using pickle format." warnings.warn(msg) HEN_FILE_EXTENSION = ".p" HAS_NETCDF = False pass from astropy.modeling.core import Model from astropy import log from astropy.logger import AstropyUserWarning from astropy.io import fits from stingray.utils import assign_value_if_none from stingray.events import EventList from stingray.lightcurve import Lightcurve from stingray.powerspectrum import Powerspectrum, AveragedPowerspectrum from stingray.crossspectrum import Crossspectrum, AveragedCrossspectrum from stingray.pulse.modeling import SincSquareModel from stingray.pulse.search import search_best_peaks from .base import _order_list_of_arrays, _empty, is_string, force_iterable from .base import find_peaks_in_image try: _ = np.complex256 HAS_C256 = True except Exception: HAS_C256 = False cpl128 = np.dtype([(str("real"), np.double), (str("imag"), np.double)]) if HAS_C256: cpl256 = np.dtype( [(str("real"), np.longdouble), (str("imag"), np.longdouble)] ) class EFPeriodogram(object): def __init__( self, freq=None, stat=None, kind=None, nbin=None, N=None, oversample=None, M=None, pepoch=None, mjdref=None, peaks=None, peak_stat=None, best_fits=None, fdots=0, fddots=0, segment_size=1e32, filename="", parfile=None, emin=None, emax=None, ncounts=None, upperlim=None, ): self.freq = freq self.stat = stat self.kind = kind self.nbin = nbin self.oversample = oversample self.N = N self.peaks = peaks self.peak_stat = peak_stat self.best_fits = best_fits self.fdots = fdots self.fddots = fddots self.M = M self.segment_size = segment_size self.filename = filename self.parfile = parfile self.emin = emin self.emax = emax self.pepoch = pepoch self.mjdref = mjdref self.upperlim = upperlim self.ncounts = ncounts def find_peaks(self, conflevel=99.0): from .base import z2_n_detection_level, fold_detection_level ntrial = self.stat.size if hasattr(self, "oversample") and self.oversample is not None: ntrial /= self.oversample ntrial = int(ntrial) epsilon = 1 - conflevel / 100 if self.kind == "Z2n": threshold = z2_n_detection_level( epsilon=epsilon, n=self.N, ntrial=ntrial, n_summed_spectra=int(self.M), ) else: threshold = fold_detection_level( nbin=int(self.nbin), epsilon=epsilon, ntrial=ntrial ) if len(self.stat.shape) == 1: best_peaks, best_stat = search_best_peaks( self.freq, self.stat, threshold ) else: best_cands = find_peaks_in_image( self.stat, n=10, threshold_abs=threshold ) best_peaks = [] best_stat = [] for i, idx in enumerate(best_cands): f, fdot = self.freq[idx[0], idx[1]], self.fdots[idx[0], idx[1]] best_peaks.append([f, fdot]) best_stat.append(self.stat[idx[0], idx[1]]) best_peaks = np.asarray(best_peaks) best_stat = np.asarray(best_stat) if len(best_peaks) > 0: self.peaks = best_peaks self.peak_stat = best_stat return best_peaks, best_stat def get_energy_from_events(ev): if hasattr(ev, "energy") and ev.energy is not None: energy = ev.energy elabel = "Energy" elif hasattr(ev, "pi") and ev.pi is not None: energy = ev.pi elabel = "PI" ev.energy = energy else: energy = np.ones_like(ev.time) elabel = "" return elabel, energy def filter_energy(ev: EventList, emin: float, emax: float) -> (EventList, str): """Filter event list by energy (or PI) If an ``energy`` attribute is present, uses it. Otherwise, it switches automatically to ``pi`` Examples -------- >>> import doctest >>> from contextlib import redirect_stderr >>> import sys >>> time = np.arange(5) >>> energy = np.array([0, 0, 30, 4, 1]) >>> events = EventList(time=time, energy=energy) >>> ev_out, elabel = filter_energy(events, 3, None) >>> np.all(ev_out.time == [2, 3]) True >>> elabel == 'Energy' True >>> events = EventList(time=time, pi=energy) >>> with warnings.catch_warnings(record=True) as w: ... ev_out, elabel = filter_energy(events, None, 20) # doctest: +ELLIPSIS >>> "No energy information in event list" in str(w[-1].message) True >>> np.all(ev_out.time == [0, 1, 3, 4]) True >>> elabel == 'PI' True >>> events = EventList(time=time, pi=energy) >>> ev_out, elabel = filter_energy(events, None, None) # doctest: +ELLIPSIS >>> np.all(ev_out.time == time) True >>> elabel == 'PI' True >>> events = EventList(time=time) >>> with redirect_stderr(sys.stdout): ... ev_out, elabel = filter_energy(events, 3, None) # doctest: +ELLIPSIS ERROR:...No Energy or PI... >>> np.all(ev_out.time == time) True >>> elabel == '' True """ times = ev.time elabel, energy = get_energy_from_events(ev) # For some reason the doctest doesn't work if I don't do this instead # of using warnings.warn if elabel == "": log.error( "No Energy or PI information available. " "No energy filter applied to events" ) return ev, "" if emax is None and emin is None: return ev, elabel # For some reason the doctest doesn't work if I don't do this instead # of using warnings.warn if elabel.lower() == "pi" and (emax is not None or emin is not None): warnings.warn( f"No energy information in event list " f"while filtering between {emin} and {emax}. " f"Definition of events.energy is now based on PI." ) if emin is None: emin = np.min(energy) if emax is None: emax = np.max(energy) good = (energy >= emin) & (energy <= emax) ev.time = times[good] ev.energy = energy[good] return ev, elabel def _get_key(dict_like, key): """ Examples -------- >>> a = dict(b=1) >>> _get_key(a, 'b') 1 >>> _get_key(a, 'c') == "" True """ try: return dict_like[key] except KeyError: return "" def high_precision_keyword_read(hdr, keyword): """Read FITS header keywords, also if split in two. In the case where the keyword is split in two, like MJDREF = MJDREFI + MJDREFF in some missions, this function returns the summed value. Otherwise, the content of the single keyword Parameters ---------- hdr : dict_like The header structure, or a dictionary keyword : str The key to read in the header Returns ------- value : long double The value of the key, or None if keyword not present Examples -------- >>> hdr = dict(keywordS=1.25) >>> high_precision_keyword_read(hdr, 'keywordS') 1.25 >>> hdr = dict(keywordI=1, keywordF=0.25) >>> high_precision_keyword_read(hdr, 'keywordS') 1.25 >>> high_precision_keyword_read(hdr, 'bubabuab') is None True """ if keyword in hdr: return np.longdouble(hdr[keyword]) if len(keyword) == 8: keyword = keyword[:7] if keyword + "I" in hdr and keyword + "F" in hdr: value_i = np.longdouble(hdr[keyword + "I"]) value_f = np.longdouble(hdr[keyword + "F"]) return value_i + value_f else: return None def read_header_key(fits_file, key, hdu=1): """Read the header key ``key`` from HDU ``hdu`` of a fits file. Parameters ---------- fits_file: str key: str The keyword to be read Other Parameters ---------------- hdu : int """ from astropy.io import fits as pf hdulist = pf.open(fits_file) try: value = hdulist[hdu].header[key] except KeyError: # pragma: no cover value = "" hdulist.close() return value def ref_mjd(fits_file, hdu=1): """Read MJDREFF+ MJDREFI or, if failed, MJDREF, from the FITS header. Parameters ---------- fits_file : str Returns ------- mjdref : numpy.longdouble the reference MJD Other Parameters ---------------- hdu : int """ from astropy.io import fits as pf if isinstance(fits_file, Iterable) and not is_string(fits_file): fits_file = fits_file[0] log.info("opening %s", fits_file) with pf.open(fits_file) as hdul: return high_precision_keyword_read(hdul[hdu].header, "MJDREF") def get_file_extension(fname): """Get the file extension.""" return os.path.splitext(fname)[1] def get_file_format(fname): """Decide the file format of the file. Examples -------- >>> get_file_format('bu.p') 'pickle' >>> get_file_format('bu.nc') 'nc' >>> get_file_format('bu.evt') 'fits' >>> get_file_format('bu.txt') 'text' >>> get_file_format('bu.pdfghj') Traceback (most recent call last): ... RuntimeError: File format pdfghj not recognized """ ext = get_file_extension(fname) if ext == ".p": return "pickle" elif ext == ".nc": return "nc" elif ext in [".evt", ".fits"]: return "fits" elif ext in [".txt", ".qdp", ".csv"]: return "text" else: raise RuntimeError(f"File format {ext[1:]} " f"not recognized") # ---- Base function to save NetCDF4 files def save_as_netcdf(vars, varnames, formats, fname): """Save variables in a NetCDF4 file.""" rootgrp = nc.Dataset(fname, "w", format="NETCDF4") for iv, v in enumerate(vars): dims = {} dimname = varnames[iv] + "dim" dimspec = (varnames[iv] + "dim",) if formats[iv] == "c32": # Too complicated. Let's decrease precision warnings.warn("complex256 yet unsupported", AstropyUserWarning) formats[iv] = "c16" if formats[iv] == "c16": # unicode_literals breaks something, I need to specify str. if "cpl128" not in rootgrp.cmptypes.keys(): complex128_t = rootgrp.createCompoundType(cpl128, "cpl128") vcomp = np.empty(v.shape, dtype=cpl128) vcomp["real"] = v.real.astype(np.float64) vcomp["imag"] = v.imag.astype(np.float64) v = vcomp formats[iv] = complex128_t unsized = False try: len(v) except TypeError: unsized = True if isinstance(v, Iterable) and formats[iv] != str and not unsized: dim = len(v) dims[dimname] = dim if isinstance(v[0], Iterable): dim = len(v[0]) dims[dimname + "_2"] = dim dimspec = (dimname, dimname + "_2") else: dims[dimname] = 1 for dimname in dims.keys(): rootgrp.createDimension(dimname, dims[dimname]) vnc = rootgrp.createVariable(varnames[iv], formats[iv], dimspec) try: if formats[iv] == str: vnc[0] = v else: vnc[:] = v except Exception: log.error("Bad variable:", varnames[iv], formats[iv], dimspec, v) raise rootgrp.close() def read_from_netcdf(fname): """Read from a netCDF4 file.""" rootgrp = nc.Dataset(fname) out = {} for k in rootgrp.variables.keys(): dum = rootgrp.variables[k] values = dum.__array__() # Handle special case of complex if dum.dtype == cpl128: arr = np.empty(values.shape, dtype=np.complex128) arr.real = values[str("real")] arr.imag = values[str("imag")] values = arr # Handle special case of complex if HAS_C256 and dum.dtype == cpl256: arr = np.empty(values.shape, dtype=np.complex256) arr.real = values[str("real")] arr.imag = values[str("imag")] values = arr if dum.dtype == str or dum.size == 1: to_save = values[0] else: to_save = values if isinstance(to_save, (str, bytes)) and to_save.startswith("__bool_"): to_save = eval(to_save.replace("__bool__", "")) out[k] = to_save rootgrp.close() return out def _dum(x): return x # ----- Functions to handle file types def get_file_type(fname, raw_data=False): """Return the file type and its contents. Only works for hendrics-format pickle or netcdf files. """ contents = load_data(fname) ftype_raw = contents["__sr__class__type__"] if "Lightcurve" in ftype_raw: ftype = "lc" fun = load_lcurve elif "Color" in ftype_raw: ftype = "color" fun = load_lcurve elif "EventList" in ftype_raw: ftype = "events" fun = load_events elif "Crossspectrum" in ftype_raw: ftype = "cpds" fun = load_pds elif "Powerspectrum" in ftype_raw: ftype = "pds" fun = load_pds elif "gti" in ftype_raw: ftype = "gti" fun = _dum elif "EFPeriodogram" in ftype_raw: ftype = "folding" fun = load_folding else: raise ValueError("File format not understood") if not raw_data: contents = fun(fname) return ftype, contents # ----- functions to save and load EVENT data def save_events(eventlist, fname): """Save events in a file. Parameters ---------- eventlist: :class:`stingray.EventList` object Event list to be saved fname: str Name of output file """ out = dict( time=eventlist.time, gti=eventlist.gti, pi=eventlist.pi, mjdref=eventlist.mjdref, tstart=np.min(eventlist.gti), tstop=np.max(eventlist.gti), ) out["__sr__class__type__"] = str(type(eventlist)) if hasattr(eventlist, "instr") and eventlist.instr is not None: out["instr"] = eventlist.instr.lower() else: out["instr"] = "unknown" for attr in ["energy", "cal_pi", "detector_id"]: if hasattr(eventlist, attr) and getattr(eventlist, attr) is not None: out[attr] = getattr(eventlist, attr) if hasattr(eventlist, "header") and eventlist.header is not None: out["header"] = eventlist.header for attr in ["mission", "ephem", "timeref", "timesys"]: if hasattr(eventlist, attr) and getattr(eventlist, attr) is not None: out[attr] = getattr(eventlist, attr).lower() if get_file_format(fname) == "pickle": _save_data_pickle(out, fname) elif get_file_format(fname) == "nc": _save_data_nc(out, fname) def load_events(fname): """Load events from a file.""" if get_file_format(fname) == "pickle": out = _load_data_pickle(fname) elif get_file_format(fname) == "nc": out = _load_data_nc(fname) eventlist = EventList() eventlist.time = out["time"] eventlist.gti = out["gti"] for attr in ["pi", "cal_pi", "detector_id", "energy"]: if attr in out: setattr(eventlist, attr, force_iterable(out[attr])) if "mjdref" in list(out.keys()): eventlist.mjdref = out["mjdref"] if "instr" in list(out.keys()): eventlist.instr = out["instr"].lower() if "header" in list(out.keys()): eventlist.header = out["header"] if "mission" in list(out.keys()): eventlist.mission = out["mission"].lower() else: eventlist.mission = "" for attr in ["mission", "ephem", "timeref", "timesys"]: if attr in list(out.keys()): setattr(eventlist, attr, out[attr].lower()) return eventlist # ----- functions to save and load LCURVE data def save_lcurve(lcurve, fname, lctype="Lightcurve"): """Save Light curve to file Parameters ---------- lcurve: :class:`stingray.Lightcurve` object Event list to be saved fname: str Name of output file """ out = {} out["__sr__class__type__"] = str(lctype) out["counts"] = lcurve.counts out["counts_err"] = lcurve.counts_err out["time"] = lcurve.time out["dt"] = lcurve.dt out["gti"] = lcurve.gti out["err_dist"] = lcurve.err_dist out["mjdref"] = lcurve.mjdref out["tstart"] = lcurve.tstart out["tseg"] = lcurve.tseg if hasattr(lcurve, "header"): out["header"] = lcurve.header if hasattr(lcurve, "expo"): out["expo"] = lcurve.expo if hasattr(lcurve, "base"): out["base"] = lcurve.base if lctype == "Color": out["e_intervals"] = lcurve.e_intervals elif hasattr(lcurve, "e_interval") and lcurve.e_interval is not None: out["e_interval"] = lcurve.e_interval if hasattr(lcurve, "use_pi"): out["use_pi"] = lcurve.use_pi if hasattr(lcurve, "instr") and lcurve.instr is not None: out["instr"] = lcurve.instr.lower() else: out["instr"] = "unknown" if hasattr(lcurve, "mission") and lcurve.mission is not None: out["mission"] = lcurve.mission.lower() if get_file_format(fname) == "pickle": return _save_data_pickle(out, fname) elif get_file_format(fname) == "nc": return _save_data_nc(out, fname) def load_lcurve(fname): """Load light curve from a file.""" if get_file_format(fname) == "pickle": data = _load_data_pickle(fname) elif get_file_format(fname) == "nc": data = _load_data_nc(fname) lcurve = Lightcurve( data["time"], data["counts"], err=data["counts_err"], gti=data["gti"], err_dist=data["err_dist"], mjdref=data["mjdref"], dt=data["dt"], skip_checks=True, ) if hasattr(lcurve, "_apply_gtis"): # pragma: no cover # Compatibility with old versions of stingray lcurve.apply_gtis = lcurve._apply_gtis if "instr" in list(data.keys()) and data["instr"] is not None: lcurve.instr = data["instr"].lower() if "mission" in list(data.keys()) and data["mission"] is not None: lcurve.mission = data["mission"].lower() if "expo" in list(data.keys()): lcurve.expo = data["expo"] if "e_intervals" in list(data.keys()): lcurve.e_intervals = data["e_intervals"] if "e_interval" in list(data.keys()): lcurve.e_interval = data["e_interval"] if "use_pi" in list(data.keys()): lcurve.use_pi = data["use_pi"] if "header" in list(data.keys()): lcurve.header = data["header"] if "base" in list(data.keys()): lcurve.base = data["base"] return lcurve # ---- Functions to save epoch folding results def save_folding(efperiodogram, fname): """Save PDS in a file.""" outdata = copy.copy(efperiodogram.__dict__) outdata["__sr__class__type__"] = "EFPeriodogram" if "best_fits" in outdata and efperiodogram.best_fits is not None: model_files = [] for i, b in enumerate(efperiodogram.best_fits): mfile = fname.replace(HEN_FILE_EXTENSION, "__mod{}__.p".format(i)) save_model(b, mfile) model_files.append(mfile) outdata.pop("best_fits") if get_file_format(fname) == "pickle": return _save_data_pickle(outdata, fname) elif get_file_format(fname) == "nc": return _save_data_nc(outdata, fname) def load_folding(fname): """Load PDS from a file.""" if get_file_format(fname) == "pickle": data = _load_data_pickle(fname) elif get_file_format(fname) == "nc": data = _load_data_nc(fname) data.pop("__sr__class__type__") ef = EFPeriodogram() for key in data.keys(): setattr(ef, key, data[key]) modelfiles = glob.glob(fname.replace(HEN_FILE_EXTENSION, "__mod*__.p")) if len(modelfiles) >= 1: bmodels = [] for mfile in modelfiles: if os.path.exists(mfile): bmodels.append(load_model(mfile)[0]) ef.best_fits = bmodels if ef.peaks is not None and len(np.asarray(ef.peaks).shape) == 0: ef.peaks = [ef.peaks] return ef # ---- Functions to save PDSs def save_pds(cpds, fname, save_all=False): """Save PDS in a file.""" from .base import mkdir_p outdata = copy.copy(cpds.__dict__) outdata["__sr__class__type__"] = str(type(cpds)) if not hasattr(cpds, "instr"): outdata["instr"] = "unknown" for attr in ["show_progress", "amplitude"]: if hasattr(cpds, attr): outdata[attr] = getattr(cpds, attr) outdir = fname.replace(HEN_FILE_EXTENSION, "") if save_all: mkdir_p(outdir) for attr in ["lc1", "lc2", "pds1", "pds2"]: if save_all and hasattr(cpds, attr): value = getattr(cpds, attr) outf = f"__{attr}__" + HEN_FILE_EXTENSION if "lc" in attr and isinstance(value, Lightcurve): save_lcurve(value, os.path.join(outdir, outf)) elif "pds" in attr and isinstance(value, Crossspectrum): save_pds(value, os.path.join(outdir, outf), save_all=False) outdata.pop(attr, None) if "cs_all" in outdata: if save_all: for i, c in enumerate(cpds.cs_all): save_pds( c, os.path.join( outdir, "__cs__{}__".format(i) + HEN_FILE_EXTENSION ), ) outdata.pop("cs_all") if "best_fits" in outdata and cpds.best_fits is not None: model_files = [] for i, b in enumerate(cpds.best_fits): mfile = os.path.join( outdir, fname.replace(HEN_FILE_EXTENSION, "__mod{}__.p".format(i)), ) save_model(b, mfile) model_files.append(mfile) outdata.pop("best_fits") if get_file_format(fname) == "pickle": return _save_data_pickle(outdata, fname) elif get_file_format(fname) == "nc": return _save_data_nc(outdata, fname) def load_pds(fname, nosub=False): """Load PDS from a file.""" if get_file_format(fname) == "pickle": data = _load_data_pickle(fname) elif get_file_format(fname) == "nc": data = _load_data_nc(fname) type_string = data["__sr__class__type__"] if "AveragedPowerspectrum" in type_string: cpds = AveragedPowerspectrum() elif "Powerspectrum" in type_string: cpds = Powerspectrum() elif "AveragedCrossspectrum" in type_string: cpds = AveragedCrossspectrum() elif "Crossspectrum" in type_string: cpds = Crossspectrum() else: raise ValueError("Unrecognized data type in file") data.pop("__sr__class__type__") for key in data.keys(): setattr(cpds, key, data[key]) outdir = fname.replace(HEN_FILE_EXTENSION, "") modelfiles = glob.glob( os.path.join(outdir, fname.replace(HEN_FILE_EXTENSION, "__mod*__.p")) ) cpds.best_fits = None if len(modelfiles) >= 1: bmodels = [] for mfile in modelfiles: if os.path.exists(mfile): bmodels.append(load_model(mfile)[0]) cpds.best_fits = bmodels if nosub: return cpds lc1_name = os.path.join(outdir, "__lc1__" + HEN_FILE_EXTENSION) lc2_name = os.path.join(outdir, "__lc2__" + HEN_FILE_EXTENSION) pds1_name = os.path.join(outdir, "__pds1__" + HEN_FILE_EXTENSION) pds2_name = os.path.join(outdir, "__pds2__" + HEN_FILE_EXTENSION) cs_all_names = glob.glob( os.path.join(outdir, "__cs__[0-9]__" + HEN_FILE_EXTENSION) ) if os.path.exists(lc1_name): cpds.lc1 = load_lcurve(lc1_name) if os.path.exists(lc2_name): cpds.lc2 = load_lcurve(lc2_name) if os.path.exists(pds1_name): cpds.pds1 = load_pds(pds1_name) if os.path.exists(pds2_name): cpds.pds2 = load_pds(pds2_name) if len(cs_all_names) > 0: cs_all = [] for c in sorted(cs_all_names): cs_all.append(load_pds(c)) cpds.cs_all = cs_all return cpds # ---- GENERIC function to save stuff. def _load_data_pickle(fname, kind="data"): """Load generic data in pickle format.""" log.info("Loading %s and info from %s" % (kind, fname)) with open(fname, "rb") as fobj: result = pickle.load(fobj) return result def _save_data_pickle(struct, fname, kind="data"): """Save generic data in pickle format.""" log.info("Saving %s and info to %s" % (kind, fname)) with open(fname, "wb") as fobj: pickle.dump(struct, fobj) return def _load_data_nc(fname): """Load generic data in netcdf format.""" contents = read_from_netcdf(fname) keys = list(contents.keys()) keys_to_delete = [] for k in keys: if k in keys_to_delete: continue if str(contents[k]) == str("__hen__None__type__"): contents[k] = None if k[-2:] in ["_I", "_L", "_F", "_k"]: kcorr = k[:-2] integer_key = kcorr + "_I" float_key = kcorr + "_F" kind_key = kcorr + "_k" log10_key = kcorr + "_L" if not (integer_key in keys and float_key in keys): continue # Maintain compatibility with old-style files: if not (kind_key in keys and log10_key in keys): contents[kind_key] = "longdouble" contents[log10_key] = 0 keys_to_delete.extend([integer_key, float_key]) keys_to_delete.extend([kind_key, log10_key]) if contents[kind_key] == "longdouble": dtype = np.longdouble elif contents[kind_key] == "double": dtype = np.double else: raise ValueError( contents[kind_key] + ": unrecognized kind string" ) log10_part = contents[log10_key] if isinstance(contents[integer_key], Iterable): integer_part = np.array(contents[integer_key], dtype=dtype) float_part = np.array(contents[float_key], dtype=dtype) else: integer_part = dtype(contents[integer_key]) float_part = dtype(contents[float_key]) contents[kcorr] = (integer_part + float_part) * 10.0 ** log10_part for k in keys_to_delete: del contents[k] return contents def _split_high_precision_number(varname, var, probesize): var_log10 = 0 if probesize == 8: kind_str = "double" if probesize == 16: kind_str = "longdouble" if isinstance(var, Iterable): dum = np.min(np.abs(var)) if dum < 1 and dum > 0.0: var_log10 = np.floor(np.log10(dum)) var = np.asarray(var) / (10.0 ** var_log10) var_I = np.floor(var).astype(int) var_F = np.array(var - var_I, dtype=np.double) else: if np.abs(var) < 1 and np.abs(var) > 0.0: var_log10 = np.floor(np.log10(np.abs(var))) var = np.asarray(var) / 10.0 ** var_log10 var_I = int(np.floor(var)) var_F = np.double(var - var_I) return var_I, var_F, var_log10, kind_str def _save_data_nc(struct, fname, kind="data"): """Save generic data in netcdf format.""" log.info("Saving %s and info to %s" % (kind, fname)) varnames = [] values = [] formats = [] for k in struct.keys(): var = struct[k] if isinstance(var, bool): var = f"__bool__{var}" probe = var if isinstance(var, Iterable) and len(var) >= 1: probe = var[0] if is_string(var): probekind = str probesize = -1 elif var is None: probekind = None else: probekind = np.result_type(probe).kind probesize = np.result_type(probe).itemsize if probekind == "f" and probesize >= 8: # If a (long)double, split it in integer + floating part. # If the number is below zero, also use a logarithm of 10 before # that, so that we don't lose precision var_I, var_F, var_log10, kind_str = _split_high_precision_number( k, var, probesize ) values.extend([var_I, var_log10, var_F, kind_str]) formats.extend(["i8", "i8", "f8", str]) varnames.extend([k + "_I", k + "_L", k + "_F", k + "_k"]) elif probekind == str: values.append(var) formats.append(probekind) varnames.append(k) elif probekind is None: values.append("__hen__None__type__") formats.append(str) varnames.append(k) else: values.append(var) formats.append(probekind + "%d" % probesize) varnames.append(k) save_as_netcdf(values, varnames, formats, fname) def save_data(struct, fname, ftype="data"): """Save generic data in hendrics format.""" if get_file_format(fname) == "pickle": _save_data_pickle(struct, fname) elif get_file_format(fname) == "nc": _save_data_nc(struct, fname) def load_data(fname): """Load generic data in hendrics format.""" if get_file_format(fname) == "pickle": return _load_data_pickle(fname) elif get_file_format(fname) == "nc": return _load_data_nc(fname) else: raise TypeError( "The file type is not recognized. Did you convert the" " original files into HENDRICS format (e.g. with " "HENreadevents or HENlcurve)?" ) # QDP format is often used in FTOOLS def save_as_qdp(arrays, errors=None, filename="out.qdp", mode="w"): """Save arrays in a QDP file. Saves an array of variables, and possibly their errors, to a QDP file. Parameters ---------- arrays: [array1, array2] List of variables. All variables must be arrays and of the same length. errors: [array1, array2] List of errors. The order has to be the same of arrays; the value can be: - None if no error is assigned - an array of same length of variable for symmetric errors - an array of len-2 lists for non-symmetric errors (e.g. [[errm1, errp1], [errm2, errp2], [errm3, errp3], ...]) Other parameters ---------------- mode : str the file access mode, to be passed to the open() function. Can be 'w' or 'a' """ import numpy as np errors = assign_value_if_none(errors, [None for i in arrays]) data_to_write = [] list_of_errs = [] for ia, ar in enumerate(arrays): data_to_write.append(ar) if errors[ia] is None: continue shape = np.shape(errors[ia]) assert shape[0] == len(ar), "Errors and arrays must have same length" if len(shape) == 1: list_of_errs.append([ia, "S"]) data_to_write.append(errors[ia]) elif shape[1] == 2: list_of_errs.append([ia, "T"]) mine = [k[0] for k in errors[ia]] maxe = [k[1] for k in errors[ia]] data_to_write.append(mine) data_to_write.append(maxe) print_header = True if os.path.exists(filename) and mode == "a": print_header = False outfile = open(filename, mode) if print_header: for lerr in list_of_errs: i, kind = lerr print("READ %s" % kind + "ERR %d" % (i + 1), file=outfile) length = len(data_to_write[0]) for i in range(length): for idw, d in enumerate(data_to_write): print(d[i], file=outfile, end=" ") print("", file=outfile) outfile.close() def save_as_ascii(cols, filename="out.txt", colnames=None, append=False): """Save arrays as TXT file with respective errors.""" import numpy as np shape = np.shape(cols) ndim = len(shape) if ndim == 1: cols = [cols] elif ndim >= 3 or ndim == 0: log.error("Only one- or two-dim arrays accepted") return -1 lcol = len(cols[0]) log.debug("%s %s" % (repr(cols), repr(np.shape(cols)))) if append: txtfile = open(filename, "a") else: txtfile = open(filename, "w") if colnames is not None: print("#", file=txtfile, end=" ") for i_c, c in enumerate(cols): print(colnames[i_c], file=txtfile, end=" ") print("", file=txtfile) for i in range(lcol): for c in cols: print(c[i], file=txtfile, end=" ") print("", file=txtfile) txtfile.close() return 0 def print_fits_info(fits_file, hdu=1): """Print general info about an observation.""" from astropy.io import fits as pf from astropy.units import Unit from astropy.time import Time lchdulist = pf.open(fits_file) datahdu = lchdulist[hdu] header = datahdu.header info = {} info["N. events"] = _get_key(header, "NAXIS2") info["Telescope"] = _get_key(header, "TELESCOP") info["Instrument"] = _get_key(header, "INSTRUME") info["OBS_ID"] = _get_key(header, "OBS_ID") info["Target"] = _get_key(header, "OBJECT") info["Start"] = _get_key(header, "DATE-OBS") info["Stop"] = _get_key(header, "DATE-END") # Give time in MJD mjdref = high_precision_keyword_read(header, "MJDREF") tstart = high_precision_keyword_read(header, "TSTART") tstop = high_precision_keyword_read(header, "TSTOP") tunit = _get_key(header, "TIMEUNIT") start_mjd = Time(mjdref, format="mjd") + tstart * Unit(tunit) stop_mjd = Time(mjdref, format="mjd") + tstop * Unit(tunit) print("ObsID: {0}\n".format(info["OBS_ID"])) print("Date: {0} -- {1}\n".format(info["Start"], info["Stop"])) print("Date (MJD): {0} -- {1}\n".format(start_mjd, stop_mjd)) print( "Instrument: {0}/{1}\n".format( info["Telescope"], info["Instrument"] ) ) print("Target: {0}\n".format(info["Target"])) print("N. Events: {0}\n".format(info["N. events"])) lchdulist.close() return info def main(args=None): """Main function called by the `HENreadfile` command line script.""" from astropy.time import Time import astropy.units as u import argparse description = "Print the content of HENDRICS files" parser = argparse.ArgumentParser(description=description) parser.add_argument("files", help="List of files", nargs="+") parser.add_argument( "--print-header", help="Print the full FITS header if present in the " "meta data.", default=False, action="store_true", ) args = parser.parse_args(args) for fname in args.files: print() print("-" * len(fname)) print("{0}".format(fname)) print("-" * len(fname)) if fname.endswith(".fits") or fname.endswith(".evt"): print("This FITS file contains:", end="\n\n") print_fits_info(fname) print("-" * len(fname)) continue ftype, contents = get_file_type(fname, raw_data=True) print("This file contains:", end="\n\n") mjdref = 0.0 * u.d if "mjdref" in contents: mjdref = Time(contents["mjdref"], format="mjd") tstart = None tstop = None tseg = None for k in sorted(contents.keys()): if k == "header" and not args.print_header: continue if k == "tstart": timeval = contents[k] * u.s val = f"MET {contents[k]} s (MJD {mjdref + timeval.to(u.d)})" tstart = timeval elif k == "tstop": timeval = contents[k] * u.s val = f"MET {contents[k]} s (MJD {mjdref + timeval.to(u.d)})" tstop = timeval elif k == "tseg": val = f"{contents[k]} s" tseg = contents[k] * u.s else: val = contents[k] if isinstance(val, Iterable) and not is_string(val): length = len(val) if len(val) < 4: val = repr(list(val[:4])) else: val = repr(list(val[:4])).replace("]", "") + "...]" val = "{} (len {})".format(val, length) print((k + ":").ljust(15), val, end="\n\n") if tseg is None and (tstart is not None and tstop is not None): print(("length:").ljust(15), tstop - tstart, end="\n\n") print("-" * len(fname)) def sort_files(files): """Sort a list of HENDRICS files, looking at `Tstart` in each.""" allfiles = {} ftypes = [] for f in files: log.info("Loading file " + f) ftype, contents = get_file_type(f) instr = contents.instr ftypes.append(ftype) if instr not in list(allfiles.keys()): allfiles[instr] = [] # Add file name to the dictionary contents.__sort__filename__ = f allfiles[instr].append(contents) # Check if files are all of the same kind (lcs, PDSs, ...) ftypes = list(set(ftypes)) assert len(ftypes) == 1, "Files are not all of the same kind." instrs = list(allfiles.keys()) for instr in instrs: contents = list(allfiles[instr]) tstarts = [np.min(c.gti) for c in contents] fnames = [c.__sort__filename__ for c in contents] fnames = [x for (y, x) in sorted(zip(tstarts, fnames))] # Substitute dictionaries with the sorted list of files allfiles[instr] = fnames return allfiles def save_model(model, fname="model.p", constraints=None): """Save best-fit models to data. Parameters ---------- model : func or `astropy.modeling.core.Model` object The model to be saved fname : str, default 'models.p' The output file name Other parameters ---------------- constraints: dict Additional model constraints. Ignored for astropy models. """ modeldata = {"model": model, "constraints": None} if isinstance(model, (Model, SincSquareModel)): modeldata["kind"] = "Astropy" elif callable(model): nargs = model.__code__.co_argcount nkwargs = len(model.__defaults__) if not nargs - nkwargs == 1: raise TypeError( "Accepted callable models have only one " "non-keyword argument" ) modeldata["kind"] = "callable" modeldata["constraints"] = constraints else: raise TypeError( "The model has to be an Astropy model or a callable" " with only one non-keyword argument" ) with open(fname, "wb") as fobj: pickle.dump(modeldata, fobj) def load_model(modelstring): if not is_string(modelstring): raise TypeError("modelstring has to be an existing file name") if not os.path.exists(modelstring): raise FileNotFoundError("Model file not found") # modelstring is a pickle file if modelstring.endswith(".p"): log.debug("Loading model from pickle file") with open(modelstring, "rb") as fobj: modeldata = pickle.load(fobj) return modeldata["model"], modeldata["kind"], modeldata["constraints"] # modelstring is a python file elif modelstring.endswith(".py"): log.debug("Loading model from Python source") modulename = modelstring.replace(".py", "") sys.path.append(os.getcwd()) # If a module with the same name was already imported, unload it! # This is because the user might be using the same file name but # different models inside, just like we do in test_io.py if modulename in sys.modules: del sys.modules[modulename] # This invalidate_caches() is called to account for the case when # the model file does not exist the first time we call # importlib.import_module(). In this case, the second time we call it, # even if the file exists it will not exist for importlib. importlib.invalidate_caches() _model = importlib.import_module(modulename) model = _model.model constraints = None if hasattr(_model, "constraints"): constraints = _model.constraints else: raise TypeError("Unknown file type") if isinstance(model, Model): return model, "Astropy", constraints elif callable(model): nargs = model.__code__.co_argcount nkwargs = len(model.__defaults__) if not nargs - nkwargs == 1: raise TypeError( "Accepted callable models have only one " "non-keyword argument" ) return model, "callable", constraints def find_file_in_allowed_paths(fname, other_paths=None): """Check if file exists at its own relative/absolute path, or elsewhere. Parameters ---------- fname : str The name of the file, with or without a path. Other Parameters ---------------- other_paths : list of str list of other possible paths """ if fname is None: return False existance_condition = os.path.exists(fname) if existance_condition: return fname bname = os.path.basename(fname) if other_paths is not None: for p in other_paths: fullpath = os.path.join(p, bname) if os.path.exists(fullpath): log.info(f"Parfile found at different path: {fullpath}") return fullpath return False
the-stack_106_17063
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Uploads FHIR Bundle transactions to a FHIR endpoint. This module reads JSON files containing Bundle transactions generated by Synthea and uploads them to a FHIR server you specify. Example usage: python3 main.py \ https://healthcare.googleapis.com/v1beta1/projects/terrafhir/locations/us-central1/datasets/example-dataset/fhirStores/hundred/fhir \ --input_dir sample_data/fhir python3 main.py http://localhost:8099/openmrs/ws/fhir2/R4 \ --input_dir sample_data/fhir --convert_to_openmrs """ import argparse import itertools import json import logging import multiprocessing import pathlib from typing import Dict, Set import bundle import fhir_client import uploader parser = argparse.ArgumentParser( description='Upload FHIR Bundles.', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( 'fhir_endpoint', help=( 'endpoint to upload to.\n\nFor GCP FHIR Store, the format is ' 'https://healthcare.googleapis.com/v1beta1/projects/PROJECT_ID/' 'locations/LOCATION/datasets/DATASET/fhirStores/FHIR_STORE/fhir' '\n\nFor a local OpenMRS endpoint, it is http://localhost:8099/openmrs/' 'ws/fhir2/R4')) parser.add_argument( '--input_dir', type=str, default='../output/fhir', help='directory where JSON files are stored') parser.add_argument( '--convert_to_openmrs', action='store_true', help=('specify if uploading to OpenMRS. Splits bundle to resources before ' 'uploading ')) def list_all_files(directory: str) -> Dict[str, Set[pathlib.PosixPath]]: """Lists JSON files under a directory. Args: directory: Directory containing JSON files Returns: Dictionary listing JSON files for 'hospitals', 'practitioners', and 'patient_history' """ p = pathlib.Path(directory) hospital_file = set(p.glob('hospitalInformation*.json')) practitioner_file = set(p.glob('practitionerInformation*.json')) patient_history_files = set( p.glob('*.json')) - practitioner_file - hospital_file file_type_dict = { 'hospital': hospital_file, 'practitioner': practitioner_file, 'patient_history': patient_history_files } logging.info('%s patients found in %s', len(patient_history_files), p.resolve()) return file_type_dict def convert_to_bundle(json_file: pathlib.PosixPath) -> bundle.Bundle: """Loops through list of JSON files, and loads content of each file to create Bundle object. Args: json_file: Set containing JSON files that need to be read Returns: List containing Bundle objects """ with open(json_file) as f: data = json.loads(f.read()) return bundle.Bundle(json_file, data) def upload_openmrs(sink: fhir_client.FhirClient, patient_bundle: bundle.Bundle): """Upload Patient history bundles to OpenMRS. For each bundle, we have to know the individual Patient, Encounters, and Observations resources before uploading as the OpenMRS FHIR Module does not yet support uploading Bundles. OpenMRS does not suppport uploading the hospital and practitioner files as well. Args: sink: OpenMRS FHIR endpoint to talk with patient_bundle: list of all the Bundles that need to be uploaded """ upload_handler = uploader.Uploader(sink) patient_bundle.extract_resources() upload_handler.upload_openmrs_bundle(patient_bundle) patient_bundle.save_mapping() def upload(sink: fhir_client.FhirClient, each_bundle: bundle.Bundle): """Upload bundles to a FHIR endpoint. Server must be able to handle Bundle transactions. Args: sink: FHIR endpoint to talk with each_bundle: Dictionary listing JSON files for 'hospitals', 'practitioners', and 'patient_history' """ upload_handler = uploader.Uploader(sink) upload_handler.upload_bundle(each_bundle) def create_sink(url: str) -> fhir_client.FhirClient: if url.startswith('https://healthcare.googleapis.com'): return fhir_client.GcpClient(url) else: return fhir_client.OpenMrsClient(url) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) args = parser.parse_args() json_file_dict = list_all_files(args.input_dir) fhir_sink = create_sink(args.fhir_endpoint) if args.convert_to_openmrs: with multiprocessing.Pool() as pool: logging.info('Loading patient_history JSON files into memory') bundle_list = pool.map(convert_to_bundle, json_file_dict['patient_history']) pool.starmap(upload_openmrs, zip(itertools.repeat(fhir_sink), bundle_list)) else: # To post the files to GCP FHIR Store, they require a certain order because # Synthea only creates bundles of type transaction and POST. The order is: # hospital, practitioner, patient history for file_type in ['hospital', 'practitioner', 'patient_history']: with multiprocessing.Pool() as pool: logging.info('Loading %s JSON files into memory', file_type) bundle_list = pool.map(convert_to_bundle, json_file_dict[file_type]) pool.starmap(upload, zip(itertools.repeat(fhir_sink), bundle_list))
the-stack_106_17064
import socket import math import time import threading from Helpers import PacketState, calc_checksum, lose_the_packet, make_ack_packet, print_progress_bar PACKET_SIZE = 200 HEADER_SIZE = 12 SERVER_PORT_NO = None PLP = None WINDOW_SIZE = None MAX_SEQ_NO = None main_lock = threading.Lock() threads = [] state = { 'base': 0, 'packets': [], 'acks_count': 0 } # States: # 0: not sent # 1: sent # 2: acked def start(filename): global SERVER_PORT_NO, WINDOW_SIZE, MAX_SEQ_NO, PLP file = open(filename, 'r') configs = file.read() configs = configs.split('\n') SERVER_PORT_NO = int(configs[0].split(':')[1].strip()) WINDOW_SIZE = int(configs[1].split(':')[1].strip()) MAX_SEQ_NO = WINDOW_SIZE PLP = float(configs[2].split(':')[1].strip()) main_socket = make_socket(SERVER_PORT_NO) start_listening(main_socket, PACKET_SIZE) def make_socket(port_no): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(('', port_no)) return sock def start_listening(main_socket, datagram_size): file_data, address = main_socket.recvfrom(datagram_size) ack_pkt = make_ack_packet(0) main_socket.sendto(bytes(ack_pkt, 'UTF-8'), address) file = open('files/{}'.format(file_data.decode().split('&$')[1])) file_data = file.read() no_of_pkts = int(math.ceil(len(file_data) / (PACKET_SIZE - HEADER_SIZE))) seq_no = 0 step_size = PACKET_SIZE - HEADER_SIZE for i in range(no_of_pkts): if no_of_pkts - 1 == i: current_data = file_data[i*step_size:len(file_data)] is_final = 1 else: current_data = file_data[i*step_size:i*step_size+step_size] is_final = 0 pkt = PacketState(seq_no, 0, is_final, current_data) state['packets'].append(pkt) seq_no = (seq_no + 1) % MAX_SEQ_NO for i in range(WINDOW_SIZE): thread = threading.Thread(target=send_packet, args=(main_socket, state['packets'][i], i, address)) thread.start() threads.append(thread) print('Sent first window size') while True: rPkt, rAddress = main_socket.recvfrom(PACKET_SIZE) print('Received ack {}'.format(rPkt.decode())) thread = threading.Thread(target=handle_received_packet, args=(main_socket, rPkt, rAddress)) thread.start() threads.append(thread) check_if_thread_finished() def send_packet(sock, pkt, pkt_index, address): main_lock.acquire() if state['packets'][pkt_index].status != 2: sock.sendto(bytes(pkt.packet, 'UTF-8'), address) else: main_lock.release() return pkt.status = 1 main_lock.release() time.sleep(0.1) main_lock.acquire() if int(state['base']) == int(pkt_index) and state['packets'][pkt_index] != 2: # still didn't acknowledge, Resend for i in range(state['base'], state['base'] + WINDOW_SIZE): if not lose_the_packet(PLP): thread = threading.Thread(target=send_packet, args=(sock, state['packets'][i], i, address)) thread.start() threads.append(thread) main_lock.release() return def handle_received_packet(sock, packet, address): received_seq_no = packet.decode().split('&')[1] main_lock.acquire() if int(state['packets'][state['base']].seq_no) == int(received_seq_no): state['packets'][state['base']].status = 2 state['acks_count'] += 1 state['base'] += 1 print_progress_bar(state['acks_count'], len(state['packets'])) main_lock.release() main_lock.acquire() base = state['base'] last_index = base + WINDOW_SIZE - 1 if state['packets'][last_index].status == 0: main_lock.release() thread = threading.Thread(target=send_packet, args=(sock, state['packets'][last_index], last_index, address)) thread.start() threads.append(thread) else: main_lock.release() main_lock.acquire() if state['acks_count'] == len(state['packets']) - 1: print('File Successfully Sent.') main_lock.release() return def valid_ack(packet): return calc_checksum(packet.decode()) == packet.decode().split('&')[0] def check_if_thread_finished(): inactive = [] for th in threads: if not th.is_alive(): inactive.append(th) threads.remove(th) for i in inactive: i.join()
the-stack_106_17065
############################################################################### # # Tests for XlsxWriter. # # SPDX-License-Identifier: BSD-2-Clause # Copyright (c), 2013-2022, John McNamara, [email protected] # from ..excel_comparison_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('header_image10.xlsx') self.ignore_elements = {'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup'], 'xl/worksheets/sheet2.xml': ['<pageMargins', '<pageSetup']} def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet() worksheet2 = workbook.add_worksheet() worksheet1.set_header('&L&G', {'image_left': self.image_dir + 'red.jpg'}) worksheet2.write('A1', 'Foo') worksheet2.write_comment('B2', 'Some text') worksheet2.set_comments_author('John') workbook.close() self.assertExcelEqual()
the-stack_106_17066
from seldon_e2e_utils import ( wait_for_rollout, initial_rest_request, rest_request_ambassador, retry_run, create_random_data, wait_for_status, rest_request, ) from subprocess import run import time import logging class TestPrepack(object): # Test prepackaged server for sklearn def test_sklearn(self, namespace): spec = "../../servers/sklearnserver/samples/iris.yaml" retry_run(f"kubectl apply -f {spec} -n {namespace}") wait_for_status("sklearn", namespace) wait_for_rollout("sklearn", namespace) time.sleep(1) logging.warning("Initial request") r = initial_rest_request( "sklearn", namespace, data=[[0.1, 0.2, 0.3, 0.4]], dtype="ndarray" ) assert r.status_code == 200 r = rest_request_ambassador("sklearn", namespace, method="metadata") assert r.status_code == 200 res = r.json() logging.warning(res) assert res["name"] == "iris" assert res["versions"] == ["iris/v1"] logging.warning("Success for test_prepack_sklearn") run(f"kubectl delete -f {spec} -n {namespace}", shell=True) # Test prepackaged server for tfserving def test_tfserving(self, namespace): spec = "../../servers/tfserving/samples/mnist_rest.yaml" retry_run(f"kubectl apply -f {spec} -n {namespace}") wait_for_status("tfserving", namespace) wait_for_rollout("tfserving", namespace) time.sleep(1) logging.warning("Initial request") r = initial_rest_request( "tfserving", namespace, data=[create_random_data(784)[1].tolist()], dtype="ndarray", ) assert r.status_code == 200 logging.warning("Success for test_prepack_tfserving") run(f"kubectl delete -f {spec} -n {namespace}", shell=True) # Test prepackaged server for xgboost def test_xgboost(self, namespace): spec = "../../servers/xgboostserver/samples/iris.yaml" retry_run(f"kubectl apply -f {spec} -n {namespace}") wait_for_status("xgboost", namespace) wait_for_rollout("xgboost", namespace) time.sleep(1) logging.warning("Initial request") r = initial_rest_request( "xgboost", namespace, data=[[0.1, 0.2, 0.3, 0.4]], dtype="ndarray" ) assert r.status_code == 200 r = rest_request_ambassador("xgboost", namespace, method="metadata") assert r.status_code == 200 res = r.json() logging.warning(res) assert res["name"] == "xgboost-iris" assert res["versions"] == ["xgboost-iris/v1"] logging.warning("Success for test_prepack_xgboost") run(f"kubectl delete -f {spec} -n {namespace}", shell=True) # Test prepackaged server for MLflow def test_mlflow(self, namespace): spec = "../../servers/mlflowserver/samples/elasticnet_wine.yaml" retry_run(f"kubectl apply -f {spec} -n {namespace}") wait_for_status("mlflow", namespace) wait_for_rollout("mlflow", namespace) time.sleep(1) r = initial_rest_request( "mlflow", namespace, data=[[6.3, 0.3, 0.34, 1.6, 0.049, 14, 132, 0.994, 3.3, 0.49, 9.5]], dtype="ndarray", names=[ "fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide", "total sulfur dioxide", "density", "pH", "sulphates", "alcohol", ], ) assert r.status_code == 200 r = rest_request_ambassador("mlflow", namespace, method="metadata") assert r.status_code == 200 res = r.json() logging.warning(res) assert res["name"] == "mlflow-wines" assert res["versions"] == ["mlflow-wines/v1"] run(f"kubectl delete -f {spec} -n {namespace}", shell=True) # Test prepackaged Text SKLearn Alibi Explainer def test_text_alibi_explainer(self, namespace): spec = "../resources/movies-text-explainer.yaml" retry_run(f"kubectl apply -f {spec} -n {namespace}") wait_for_status("movie", namespace) wait_for_rollout("movie", namespace, expected_deployments=2) time.sleep(5) logging.warning("Initial request") r = initial_rest_request( "movie", namespace, data=["This is test data"], dtype="ndarray" ) assert r.status_code == 200 e = rest_request( "movie", namespace, data=["This is test data"], dtype="ndarray", method="explain", predictor_name="movies-predictor", ) assert e.status_code == 200 logging.warning("Success for test_prepack_sklearn") run(f"kubectl delete -f {spec} -n {namespace}", shell=True)
the-stack_106_17067
#!/usr/bin/env python3 # Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Command-line tool to run jdeps and process its output into a JSON file.""" import argparse import functools import math import multiprocessing import pathlib import subprocess from typing import List, Tuple import class_dependency import package_dependency import serialization SRC_PATH = pathlib.Path(__file__).resolve().parents[3] # src/ JDEPS_PATH = SRC_PATH.joinpath('third_party/jdk/current/bin/jdeps') DEFAULT_ROOT_TARGET = 'chrome/android:monochrome_public_bundle' def class_is_interesting(name: str): """Checks if a jdeps class is a class we are actually interested in.""" if name.startswith('org.chromium.'): return True return False # pylint: disable=useless-object-inheritance class JavaClassJdepsParser(object): """A parser for jdeps class-level dependency output.""" def __init__(self): # pylint: disable=missing-function-docstring self._graph = class_dependency.JavaClassDependencyGraph() @property def graph(self): """The dependency graph of the jdeps output. Initialized as empty and updated using parse_raw_jdeps_output. """ return self._graph def parse_raw_jdeps_output(self, build_target: str, jdeps_output: str): """Parses the entirety of the jdeps output.""" for line in jdeps_output.split('\n'): self.parse_line(build_target, line) def parse_line(self, build_target: str, line: str): """Parses a line of jdeps output. The assumed format of the line starts with 'name_1 -> name_2'. """ parsed = line.split() if len(parsed) <= 3: return if parsed[2] == 'not' and parsed[3] == 'found': return if parsed[1] != '->': return dep_from = parsed[0] dep_to = parsed[2] if not class_is_interesting(dep_from): return if not class_is_interesting(dep_to): return key_from, nested_from = class_dependency.split_nested_class_from_key( dep_from) key_to, nested_to = class_dependency.split_nested_class_from_key( dep_to) from_node: class_dependency.JavaClass = self._graph.add_node_if_new( key_from) self._graph.add_node_if_new(key_to) if key_from != key_to: # Skip self-edges (class-nested dependency) self._graph.add_edge_if_new(key_from, key_to) if nested_from is not None: from_node.add_nested_class(nested_from) if nested_to is not None: from_node.add_nested_class(nested_to) from_node.add_build_target(build_target) def _run_jdeps(jdeps_path: str, filepath: pathlib.Path): """Runs jdeps on the given filepath and returns the output.""" print(f'Running jdeps and parsing output for {filepath}') jdeps_res = subprocess.run([jdeps_path, '-R', '-verbose:class', filepath], capture_output=True, text=True, check=True) return jdeps_res.stdout def _run_gn_desc_list_dependencies(build_output_dir: str, target: str, gn_path: str): """Runs gn desc to list all jars that a target depends on. This includes direct and indirect dependencies.""" gn_desc_res = subprocess.run( [gn_path, 'desc', '--all', build_output_dir, target, 'deps'], capture_output=True, text=True, check=True) return gn_desc_res.stdout JarTargetList = List[Tuple[str, pathlib.Path]] def list_original_targets_and_jars(gn_desc_output: str, build_output_dir: str) -> JarTargetList: """Parses gn desc output to list original java targets and output jar paths. Returns a list of tuples (build_target: str, jar_path: str), where: - build_target is the original java dependency target in the form "//path/to:target" - jar_path is the path to the built jar in the build_output_dir, including the path to the output dir """ jar_tuples: JarTargetList = [] for build_target_line in gn_desc_output.split('\n'): if not build_target_line.endswith('__compile_java'): continue build_target = build_target_line.strip() original_build_target = build_target.replace('__compile_java', '') jar_path = _get_jar_path_for_target(build_output_dir, build_target) jar_tuples.append((original_build_target, jar_path)) return jar_tuples def _get_jar_path_for_target(build_output_dir: str, build_target: str) -> str: """Calculates the output location of a jar for a java build target.""" target_path, target_name = build_target.split(':') assert target_path.startswith('//'), \ f'Build target should start with "//" but is: "{build_target}"' jar_dir = target_path[len('//'):] jar_name = target_name.replace('__compile_java', '.javac.jar') return pathlib.Path(build_output_dir) / 'obj' / jar_dir / jar_name def main(): """Runs jdeps on all JARs a build target depends on. Creates a JSON file from the jdeps output.""" arg_parser = argparse.ArgumentParser( description='Runs jdeps (dependency analysis tool) on all JARs a root ' 'build target depends on and writes the resulting dependency graph ' 'into a JSON file. The default root build target is ' 'chrome/android:monochrome_public_bundle.') required_arg_group = arg_parser.add_argument_group('required arguments') required_arg_group.add_argument('-C', '--build_output_dir', required=True, help='Build output directory.') required_arg_group.add_argument( '-o', '--output', required=True, help='Path to the file to write JSON output to. Will be created ' 'if it does not yet exist and overwrite existing ' 'content if it does.') arg_parser.add_argument('-t', '--target', default=DEFAULT_ROOT_TARGET, help='Root build target.') arg_parser.add_argument('-j', '--jdeps-path', default=JDEPS_PATH, help='Path to the jdeps executable.') arg_parser.add_argument('-g', '--gn-path', default='gn', help='Path to the gn executable.') arguments = arg_parser.parse_args() print('Getting list of dependency jars...') gn_desc_output = _run_gn_desc_list_dependencies(arguments.build_output_dir, arguments.target, arguments.gn_path) target_jars: JarTargetList = list_original_targets_and_jars( gn_desc_output, arguments.build_output_dir) print('Running jdeps...') # jdeps already has some parallelism jdeps_process_number = math.ceil(multiprocessing.cpu_count() / 2) with multiprocessing.Pool(jdeps_process_number) as pool: jar_paths = [target_jar for _, target_jar in target_jars] jdeps_outputs = pool.map( functools.partial(_run_jdeps, arguments.jdeps_path), jar_paths) print('Parsing jdeps output...') jdeps_parser = JavaClassJdepsParser() for raw_jdeps_output, (build_target, _) in zip(jdeps_outputs, target_jars): jdeps_parser.parse_raw_jdeps_output(build_target, raw_jdeps_output) class_graph = jdeps_parser.graph print(f'Parsed class-level dependency graph, ' f'got {class_graph.num_nodes} nodes ' f'and {class_graph.num_edges} edges.') package_graph = package_dependency.JavaPackageDependencyGraph(class_graph) print(f'Created package-level dependency graph, ' f'got {package_graph.num_nodes} nodes ' f'and {package_graph.num_edges} edges.') print(f'Dumping JSON representation to {arguments.output}.') serialization.dump_class_and_package_graphs_to_file( class_graph, package_graph, arguments.output) if __name__ == '__main__': main()
the-stack_106_17068
import matplotlib as mpl # mpl.use('Agg') import matplotlib.pyplot as plt import numpy as np import rff import csv import rff def trials_agg(trials, folder, prefix, suffix): content = list() for idx in trials: sourcename = folder + prefix + str(idx) + suffix + '.csv' with open(sourcename,'r') as sourcefile: datareader = csv.reader(sourcefile) for row in datareader: content.append(row) targetname = folder + prefix + suffix + '.csv' with open(targetname,'w',newline='') as targetfile: datawriter = csv.writer(targetfile) datawriter.writerows(content) return 1 def read_trials(filename): rate_list = list() with open(filename,'r') as datafile: datareader = csv.reader(datafile) trials = 0 for row in datareader: trials = trials + 1 rate = list() for idx,value in enumerate(row): rate.append(float(value)) rate_list.append(rate) return rate_list, trials def rate_plot(samplesize,opt_filename,unif_filename,image_filename): opt_rate_list, opt_trials = read_trials(opt_filename) unif_rate_list, unif_trials = read_trials(unif_filename) opt_rate_list = np.array(opt_rate_list) unif_rate_list = np.array(unif_rate_list) opt_mean = np.sum(opt_rate_list,axis=0) / opt_trials unif_mean = np.sum(unif_rate_list,axis=0) / unif_trials opt_err_rate = -opt_mean + 0.9 unif_err_rate = -unif_mean + 0.9 opt_std = np.std(opt_rate_list,axis=0) unif_std = np.std(unif_rate_list,axis=0) fig = plt.figure() plt.errorbar(samplesize,unif_err_rate,unif_std,fmt='g:x',fillstyle='none',label='unif') plt.errorbar(samplesize,opt_err_rate,opt_std,fmt='b--s',fillstyle='none',label='opt') plt.yticks(np.arange(0,0.2,0.05)) plt.legend(loc=1) plt.xlabel('$\log(m)$') plt.ylabel('error rate') plt.savefig(image_filename) plt.close(fig) def main(): ### combine opt_best_score results from different trials #trials = range(1,11,1) #folder = 'result/' #prefix = 'opt_best_score trial ' #suffix = ' ' #trials_agg(trials,folder,prefix,suffix) ### combine unif_best_score results from different trials #trials = range(1,11,1) #folder = 'result/' #prefix = 'unif_best_score trial ' #suffix = ' ' #trials_agg(trials,folder,prefix,suffix) ### plot the learning rate #samplesize = np.arange(2,6,0.5) #opt_filename = 'result/opt_best_score trial .csv' #unif_filename = 'result/unif_best_score trial .csv' #image_filename = 'image/learningrate.eps' #rate_plot(samplesize,opt_filename,unif_filename,image_filename) ### plot sample points X = np.loadtxt('data/ideal_Xtest.txt') Y = np.loadtxt('data/ideal_Ytest.txt') ratio = 50 / len(X) rff.plot_circle(X,Y,ratio) if __name__ == '__main__': main()
the-stack_106_17069
import sys import os from PIL import Image path = sys.argv[1] directory = sys.argv[2] if not os.path.exists(directory): os.makedirs(directory) count = 0 for filename in os.listdir(path): count += 1 clean_name = os.path.splitext(filename)[0] img = Image.open('{}{}'.format(path,filename)) #added the / in case user doesn't enter it. You may want to check for this and add or remover it. #img.save('{}/{}.png'.format(directory, clean_name), 'png') print('all done!')
the-stack_106_17070
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Media', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255)), ('url', models.URLField(max_length=500, null=True, blank=True)), ('media_type', models.CharField(max_length=5, choices=[('image', 'Image'), ('video', 'Video')])), ('content', models.FileField(null=True, upload_to='site/', blank=True)), ('content_type', models.CharField(max_length=255, null=True, blank=True)), ('width', models.IntegerField(default=0)), ('height', models.IntegerField(default=0)), ], options={ 'verbose_name': 'Media', 'verbose_name_plural': 'Medias', }, ), ]
the-stack_106_17073
from hikari.scripts.angular_explorer import angular_property_explorer_factory def r1_map(a, b, c, al, be, ga, space_group='P1', axis='', fix_scale=False, histogram=True, opening_angle=35, orientation=None, path='~/sortav.lst', output_quality=3, resolution=1.2, wavelength='MoKa'): """ Calculate and draw a r1 map for a given crystal in diamond anvil cell (DAC) with a given opening angle, as a function of crystal orientation. The script accepts unit cell & space group information, runs SHELXL, and reads value of R1 after single refinement. Results are logged into text files and drawn with gnuplot or matplotlib, depending on settings. For further detail concerning r1_map, its basis and uses, refer to :py:func:`hikari.scripts.potency_map`, as well as selected terminology described in `this paper <https://doi.org/10.1107/S2052252521009532>`_. """ kwargs = locals() ape = angular_property_explorer_factory.create(prop='r1') ape.set_up(**kwargs) ape.explore() ape.write_hist_file() ape.draw_matplotlib_map() ape.draw_gnuplot_map() if __name__ == '__main__': r1_map(5.64109, 5.64109, 5.64109, 90, 90, 90, space_group='Fm-3m', path='~/_/NaCl/NaCl.hkl', output_quality=5, wavelength='MoKa')
the-stack_106_17074
""" Module containing functions used to load and write data """ from typing import * import zarr import sys import time import numpy as np import pandas as pd from pathlib import Path from dask import dataframe as dd from dask.distributed import Client from pysmFISH.utils import convert_from_uint16_to_float64 from pysmFISH.logger_utils import selected_logger def create_empty_zarr_file(experiment_fpath:str,tag:str)-> str: """Function that create and empty zarr file Args: experiment_fpath (str): location of the folder to be processed tag (str): string to add to the file name Returns: str: path of the created file """ experiment_fpath = Path(experiment_fpath) experiment_name = experiment_fpath.stem zarr_fpath = experiment_fpath / (experiment_name + '_' + tag + '.zarr') store = zarr.DirectoryStore(zarr_fpath,'w') grp = zarr.group(store=store) return zarr_fpath def consolidate_zarr_metadata(parsed_raw_data_fpath: str): """Function to consolidate all the zarr metadata in one unique json file for eady indexing and searching Args: parsed_raw_data_fpath (str): path to the zarr file for which the metadata needs to be consolidated Returns: zarr groups instance with the consolidated metadata """ logger = selected_logger() try: store = zarr.DirectoryStore(parsed_raw_data_fpath) consolidated_grp = zarr.consolidate_metadata(store) except: logger.error(f'cannot consolidate metadata of the parsed zarr file') sys.exit(f'cannot consolidate metadata of the parsed zarr file') else: return consolidated_grp def open_consolidated_metadata(parsed_raw_data_fpath:str): """Load the consolidated json metadata file Args: parsed_raw_data_fpath (str): path to the zarr file with the consolidated metadata Returns: zarr groups instance with the consolidated metadata """ logger = selected_logger() try: store = zarr.DirectoryStore(parsed_raw_data_fpath) except: logger.error(f'the metadata are not consolidated') else: consolidated_grp = zarr.open_consolidated(store) return consolidated_grp def load_raw_images(zarr_grp_name:str,parsed_raw_data_fpath:str)->Tuple[np.ndarray,Dict]: """Function used to load a raw image and metadata from the parsed raw file and the attrs for the filtering Args: zarr_grp_name (str): name to the group to process. The group contain the raw images and the corresponding metadata. grp = experiment_name_channel_fov_X dataset = raw_data_fov_X parsed_raw_data_fpath (str): fpath to zarr store containing the parsed raw images Returns: Tuple[np.ndarray,Dict]: return the selected image and the corresponding metadata """ logger = selected_logger() st = zarr.DirectoryStore(parsed_raw_data_fpath) root = zarr.group(store=st,overwrite=False) metadata = root[zarr_grp_name].attrs img = root[zarr_grp_name][metadata['fov_name']][...] return img, metadata def load_general_zarr(fov_subdataset: pd.Series ,parsed_raw_data_fpath:str, tag:str)->Tuple[np.ndarray,Dict]: """Function used to load images stored in a zarr file (ex. preprocessed zarr) Args: fov_subdataset (pd.Series): Dataset metadata corresponding to as specific fov parsed_raw_data_fpath (str): path to the zarr file tag (str): string used to specify the zarr file Returns: Tuple[np.ndarray,Dict]: return the selected image and the corresponding metadata """ logger = selected_logger() st = zarr.DirectoryStore(parsed_raw_data_fpath) root = zarr.group(store=st,overwrite=False) fov_name = tag + '_fov_' + str(fov_subdataset.fov_num) grp_name = fov_subdataset.experiment_name +'_' + fov_subdataset.channel + '_round_' + str(fov_subdataset.round_num) + '_fov_' + str(fov_subdataset.fov_num) img = root[grp_name][fov_name][...] img = convert_from_uint16_to_float64(img) metadata = root[grp_name].attrs return img, metadata def simple_output_plotting(experiment_fpath: str, stitching_selected: str, selected_Hdistance: float, client, input_file_tag:str, file_tag: str): """Utility function used to create a pandas dataframe with a simplified version of the eel analysis output that can be used for quick visualization Args: experiment_fpath (str): Path to the experiment to process stitching_selected (str): Define with stitched data will be selected for creating the simplified dataframe selected_Hdistance (float): Used to select the dots with hamming distance below this value (if selected_Hdistance is 0 plot the perfect barcodes ) client (Client): Dask client taking care of the processing input_file_tag (str): File type to load for the plotting file_tag (str): tag to label the output file """ experiment_fpath = Path(experiment_fpath) counts_dd = dd.read_parquet(experiment_fpath / 'results' / ('*' + input_file_tag +'*.parquet'),engine='pyarrow') date_tag = time.strftime("%y%m%d_%H_%M_%S") r_tag = 'r_px_' + stitching_selected c_tag = 'c_px_' + stitching_selected if selected_Hdistance == 0: counts_dd_below = counts_dd.loc[counts_dd.hamming_distance == selected_Hdistance, :] else: counts_dd_below = counts_dd.loc[counts_dd.hamming_distance < selected_Hdistance, :] counts_df = counts_dd_below.loc[:,['fov_num',r_tag,c_tag, 'decoded_genes']].compute() counts_df=counts_df.dropna(subset=['decoded_genes']) fpath = experiment_fpath / 'results' / (date_tag + '_' + experiment_fpath.stem + '_data_summary_simple_plotting_'+file_tag+'.parquet') counts_df.to_parquet(fpath,index=False) def simple_output_plotting_serial(experiment_fpath: str, stitching_selected: str, client, input_file_tag:str, file_tag: str): """Utility function used to create a pandas dataframe with a simplified version of the eel analysis output that can be used for quick visualization Args: experiment_fpath (str): Path to the experiment to process stitching_selected (str): Define with stitched data will be selected for creating the simplified dataframe client (Client): Dask client taking care of the processing input_file_tag (str): File type to load for the plotting file_tag (str): tag to label the output file """ experiment_fpath = Path(experiment_fpath) counts_dd = dd.read_parquet(experiment_fpath / 'results' / ('*' + input_file_tag +'*.parquet'),engine='pyarrow') date_tag = time.strftime("%y%m%d_%H_%M_%S") r_tag = 'r_px_' + stitching_selected c_tag = 'c_px_' + stitching_selected counts_df = counts_dd.loc[:,['fov_num',r_tag,c_tag, 'target_name']].compute() counts_df=counts_df.dropna(subset=['target_name']) fpath = experiment_fpath / 'results' / (date_tag + '_' + experiment_fpath.stem + '_data_summary_simple_plotting_'+file_tag+'.parquet') counts_df.to_parquet(fpath,index=False)
the-stack_106_17075
import socketserver class MyTCPHandler(socketserver.StreamRequestHandler): # обработчик входящих соединений def handle(self): self.data = self.request.recv(1024) value = bytes.decode(self.data) print("CLIENT SEND: " + value) self.request.sendall(b"Hello, client") if __name__ == "__main__": # создание сервера server = socketserver.TCPServer(("127.0.0.1", 3000), MyTCPHandler) print("Starting TCP server...") try: server.serve_forever() except KeyboardInterrupt: print("Stopping server...")
the-stack_106_17076
num = 10 limit = 6 cnt = 0 playGame = False guess = 0 while guess != num : print("You have "+str(limit - cnt)+" left") guessFirst = input("Enter a Number : ") if(guessFirst.isnumeric()): guess = int(guessFirst) if guess == num : playGame = True if(guess > num): print("Wrong too high") elif(guess < num): print("wrong too low") cnt+=1 if((limit-cnt) == 0 ): break else: playGame = True if(playGame): print("You Got it!") else: print("You ran out of guesses it was:"+str(num))
the-stack_106_17077
# -*- coding: utf-8 -*- ''' 定时处理任务启动器 主要根据自动触发配置加载任务,然后启动func_runner ''' # Builtin Modules import time import traceback # 3rd-party Modules import arrow from croniter import croniter import six # Project Modules from worker import app from worker.utils import toolkit, yaml_resources from worker.tasks import gen_task_id, webhook # Current Module from worker.tasks import BaseTask from worker.tasks.main.func_runner import func_runner CONFIG = yaml_resources.get('CONFIG') class CrontabStarterTask(BaseTask): # Crontab过滤器 - 向前筛选 def crontab_config_filter(self, trigger_time, crontab_config): ''' Crontab执行时机过滤函数 算法描述如下: trigger_time - 1m trigger_time | | 实际Starter执行时间(比trigger_time晚1秒,此时计算出trigger_time) | | 1s | | | | +-----------------------------+====+------------> Future ^ ^ | | | start_time(需要执行的Crontab任务) | start_time(不需要执行的Crontab任务) 即:只有启动点大于等于当前触发点的才需要执行 ''' crontab_expr = crontab_config['crontab'] if not crontab_expr: return False if not croniter.is_valid(crontab_expr): return False now = arrow.get(trigger_time + 1).to('Asia/Shanghai').datetime crontab_iter = croniter(crontab_expr, now) start_time = int(crontab_iter.get_prev()) return start_time >= trigger_time def prepare_contab_config(self, crontab_config): crontab_config['taskOrigin'] = 'crontab' crontab_config['execMode'] = 'crontab' func_call_kwargs_json = crontab_config.get('funcCallKwargsJSON') if func_call_kwargs_json: crontab_config['funcCallKwargs'] = toolkit.json_loads(func_call_kwargs_json) else: crontab_config['funcCallKwargs'] = {} func_extra_config_json = crontab_config.get('funcExtraConfigJSON') if func_extra_config_json: crontab_config['funcExtraConfig'] = toolkit.json_loads(func_extra_config_json) else: crontab_config['funcExtraConfig'] = {} if crontab_config.get('saveResult'): crontab_config['saveResult'] = True else: crontab_config['saveResult'] = False try: crontab_config['crontab'] = crontab_config['funcExtraConfig'].get('fixedCrontab') or crontab_config['crontab'] except Exception as e: crontab_config['crontab'] = None try: crontab_config['taskInfoLimit'] = crontab_config['funcExtraConfig'].get('fixedTaskInfoLimit') or crontab_config['taskInfoLimit'] except Exception as e: crontab_config['taskInfoLimit'] = None return crontab_config def get_integrated_func_crontab_configs(self): sql = ''' SELECT `func`.`id` ,`func`.`extraConfigJSON` FROM `biz_main_func` AS `func` WHERE `func`.`integration` = 'autoRun' AND JSON_EXTRACT(`func`.`extraConfigJSON`, '$.integrationConfig.crontab') IS NOT NULL ''' funcs = self.db.query(sql) crontab_configs = [] for f in funcs: extra_config_json = f.get('extraConfigJSON') if extra_config_json: f['extraConfig'] = toolkit.json_loads(extra_config_json) else: f['extraConfig'] = {} crontab_expr = None try: crontab_expr = f['extraConfig']['integrationConfig']['crontab'] except Exception as e: continue c = { 'seq' : 0, 'id' : CONFIG['_INTEGRATION_CRONTAB_CONFIG_ID'], 'funcCallKwargs' : {}, 'crontab' : crontab_expr, 'saveResult' : False, 'funcId' : f['id'], 'funcExtraConfig': f['extraConfig'], 'taskOrigin' : 'integration', 'taskInfoLimit' : CONFIG['_TASK_INFO_DEFAULT_LIMIT_INTEGRATION'], 'execMode' : 'crontab', } crontab_configs.append(c) return crontab_configs def get_crontab_config(self, crontab_config_id): sql = ''' SELECT `cron`.`seq` ,`cron`.`id` ,`cron`.`funcCallKwargsJSON` ,`cron`.`crontab` ,`cron`.`saveResult` ,`cron`.`taskInfoLimit` ,`func`.`id` AS `funcId` ,`func`.`extraConfigJSON` AS `funcExtraConfigJSON` FROM `biz_main_crontab_config` AS `cron` JOIN `biz_main_func` AS `func` ON `cron`.`funcId` = `func`.`id` WHERE `cron`.`id` = ? LIMIT 1 ''' sql_params = [crontab_config_id] db_res = self.db.query(sql, sql_params) if not db_res: return None crontab_config = self.prepare_contab_config(db_res[0]) return crontab_config def fetch_crontab_configs(self, next_seq=None): if next_seq is None: next_seq = 0 sql = ''' SELECT `cron`.`seq` ,`cron`.`id` ,`cron`.`funcCallKwargsJSON` ,`cron`.`crontab` ,`cron`.`saveResult` ,`cron`.`taskInfoLimit` ,`func`.`id` AS `funcId` ,`func`.`extraConfigJSON` AS `funcExtraConfigJSON` FROM `biz_main_crontab_config` AS `cron` JOIN `biz_main_func` AS `func` ON `cron`.`funcId` = `func`.`id` WHERE `cron`.`seq` > ? AND `cron`.`isDisabled` = FALSE AND IFNULL(UNIX_TIMESTAMP(`cron`.`expireTime`) > UNIX_TIMESTAMP(), TRUE) LIMIT 100 ''' sql_params = [next_seq] crontab_configs = self.db.query(sql, sql_params) latest_seq = None if len(crontab_configs) > 0: latest_seq = crontab_configs[-1]['seq'] # 准备配置详情 for c in crontab_configs: c = self.prepare_contab_config(c) return crontab_configs, latest_seq def _get_time_limit(self, crontab_config): soft_time_limit = CONFIG['_FUNC_TASK_DEFAULT_TIMEOUT'] time_limit = CONFIG['_FUNC_TASK_DEFAULT_TIMEOUT'] + CONFIG['_FUNC_TASK_EXTRA_TIMEOUT_TO_KILL'] func_timeout = None try: func_timeout = crontab_config['funcExtraConfig']['timeout'] except Exception as e: pass else: if isinstance(func_timeout, (six.integer_types, float)) and func_timeout > 0: soft_time_limit = func_timeout time_limit = func_timeout + CONFIG['_FUNC_TASK_EXTRA_TIMEOUT_TO_KILL'] return soft_time_limit, time_limit def _get_queue(self, crontab_config): specified_queue = None try: specified_queue = crontab_config['funcExtraConfig']['queue'] except Exception as e: pass queue = None if specified_queue is None: queue = CONFIG['_FUNC_TASK_DEFAULT_CRONTAB_QUEUE'] else: if isinstance(specified_queue, int) and 0 <= specified_queue < CONFIG['_WORKER_QUEUE_COUNT']: # 直接指定队列编号 queue = specified_queue else: # 指定队列别名 try: queue_number = int(CONFIG['WORKER_QUEUE_ALIAS_MAP'][specified_queue]) except Exception as e: # 配置错误,无法解析为队列编号,或队列编号超过范围,使用默认函数队列。 # 保证无论如何都有Worker负责执行(实际运行会报错) queue = CONFIG['_FUNC_TASK_DEFAULT_CRONTAB_QUEUE'] else: # 队列别名转换为队列编号 queue = queue_number return str(queue) def send_task(self, crontab_config, current_time, trigger_time): # 确定超时时间 soft_time_limit, time_limit = self._get_time_limit(crontab_config) # 确定执行队列 queue = self._get_queue(crontab_config) # 延迟执行支持 delayed_crontab = None try: delayed_crontab = crontab_config['funcExtraConfig'].get('delayedCrontab') or [0] except Exception as e: delayed_crontab = [0] for delay in delayed_crontab: # 上锁 lock_key = toolkit.get_cache_key('lock', 'CrontabConfig', tags=[ 'crontabConfigId', crontab_config['id'], 'funcId', crontab_config['funcId'], 'crontabDelay', delay]) lock_value = toolkit.gen_uuid() if not self.cache_db.lock(lock_key, lock_value, time_limit): # 触发任务前上锁,失败则跳过 continue # 任务ID task_id = gen_task_id() # 计算任务过期时间 _shift_seconds = int(soft_time_limit * CONFIG['_FUNC_TASK_TIMEOUT_TO_EXPIRE_SCALE'] + delay) expires = arrow.get().shift(seconds=_shift_seconds).datetime # 任务入队 task_headers = { 'origin': '{}-{}'.format(crontab_config['id'], current_time) # 来源标记为「<自动触发配置ID>-<时间戳>」 } # 注意: # 此处「任务的`origin`」与「自动触发配置的`origin`」不同 # 「任务的`origin`」表示任务来源(取值 authLink, crontab, batch, integration),配合`originId`可确定业务实体 # 「自动触发配置的`origin`」表示配置来源(取值 API, UI, INTEGRATION) task_kwargs = { 'funcId' : crontab_config['funcId'], 'funcCallKwargs': crontab_config['funcCallKwargs'], 'origin' : crontab_config['taskOrigin'], 'originId' : crontab_config['id'], 'saveResult' : crontab_config['saveResult'], 'execMode' : crontab_config['execMode'], 'triggerTime' : (trigger_time + delay), 'triggerTimeMs' : (trigger_time + delay) * 1000, 'crontab' : crontab_config['crontab'], 'crontabDelay' : delay, 'queue' : queue, 'taskInfoLimit' : crontab_config['taskInfoLimit'], 'lockKey' : lock_key, 'lockValue' : lock_value, } func_runner.apply_async( task_id=task_id, kwargs=task_kwargs, headers=task_headers, queue=toolkit.get_worker_queue(queue), soft_time_limit=soft_time_limit, time_limit=time_limit, expires=expires, countdown=delay or None) @app.task(name='Main.CrontabManualStarter', bind=True, base=CrontabStarterTask) def crontab_manual_starter(self, *args, **kwargs): # 执行函数、参数 crontab_config_id = kwargs.get('crontabConfigId') self.logger.info('Main.CrontabManualStarter Task launched: `{}`'.format(crontab_config_id)) # 计算当前触发点 trigger_time = int(time.time()) current_time = int(time.time()) # 获取需要执行的自动触发配置 crontab_config = self.get_crontab_config(crontab_config_id) self.send_task(crontab_config=crontab_config, current_time=current_time, trigger_time=trigger_time) @app.task(name='Main.CrontabStarter', bind=True, base=CrontabStarterTask) def crontab_starter(self, *args, **kwargs): # 注:需要等待1秒,确保不会在整点运行,导致跳回上一触发点 time.sleep(1) # 计算当前触发点 now = arrow.get().to('Asia/Shanghai').datetime crontab_iter = croniter(CONFIG['_CRONTAB_STARTER'], now) trigger_time = int(crontab_iter.get_prev()) next_trigger_time = int(crontab_iter.get_next()) current_time = int(time.time()) # 上锁 lock_age = int(next_trigger_time - current_time - 1) self.lock(max_age=lock_age) # 获取函数功能集成自动触发 integrated_crontab_configs = self.get_integrated_func_crontab_configs() # 循环获取需要执行的自动触发配置 next_seq = 0 while next_seq is not None: crontab_configs, next_seq = self.fetch_crontab_configs(next_seq) # 第一轮查询时,加入功能集成中自动执行的函数 if integrated_crontab_configs: crontab_configs = integrated_crontab_configs + crontab_configs integrated_crontab_configs = None # 分发任务 for c in crontab_configs: # 跳过未到达触发时间的任务 if not self.crontab_config_filter(trigger_time, c): continue self.send_task(crontab_config=c, current_time=current_time, trigger_time=trigger_time)
the-stack_106_17078
#! /usr/local/bin/python3 """ Run the antler parser against a scribe block, and record info about the block, as well as timing and output info for later analysis. """ import os import subprocess import sys import time from argparse import ArgumentParser from pathlib import Path from dgw_filter import dgw_filter DEBUG = os.getenv('DEBUG_RUN') parser = ArgumentParser('run grun') parser.add_argument('block_type') parser.add_argument('requirement_block') parser.add_argument('-t', '--timelimit', default=900) args = parser.parse_args() timelimit = int(args.timelimit) block_type = args.block_type.lower() test_dir = Path(f'./test_data.{block_type}') file = Path(test_dir, args.requirement_block) size = file.stat().st_size lines = file.read_text() lines = dgw_filter(lines) num_lines = lines.count('\n') lines = lines.encode('utf-8') # Need bytes-like object for input arg to subprocess.run() if DEBUG: print(f'{test_dir.name}/{file.name} has {size} bytes; {num_lines} lines. {timelimit=}') if os.getenv('HOSTTYPE') == 'aarch64': classpath = './classes:/opt/homebrew/Cellar/antlr/4.9.3/antlr-4.9.3-complete.jar' else: classpath = './classes:/usr/local/lib/antlr-4.9.3-complete.jar' try: run_args = ['java', '-cp', classpath, 'org.antlr.v4.gui.TestRig', 'ReqBlock', 'req_block'] t0 = time.time() completed = subprocess.run(run_args, timeout=timelimit, # stdin=file.open(), input=lines, capture_output=True) elapsed = time.time() - t0 output = len(completed.stdout) + len(completed.stderr) if output != 0: with open(f'test_results.{args.block_type}/{args.requirement_block}', 'w') as errorlog: print(f'{completed.stdout} {completed.stderr}', file=errorlog) except subprocess.TimeoutExpired: output = 'timeout' elapsed = timelimit + 1 print(f'{args.block_type},{args.requirement_block},{num_lines},{output},{elapsed:0.1f}', file=sys.stderr) print(f'{args.block_type},{args.requirement_block},{num_lines},{output},{elapsed:0.1f}')
the-stack_106_17079
import timeit def validate_input(n): if type(n) != int: raise TypeError("n must be a positive int") if n < 1: raise ValueError("n must be a positive int") def stress(prefixes=[], globals=None): print("Cache warmup with fib(128), fib(256), fib(512), fib(1000)") for prefix in prefixes: time = timeit.timeit( f"{prefix}_fibonacci(128);" f"{prefix}_fibonacci(256);" f"{prefix}_fibonacci(512);" f"{prefix}_fibonacci(1000)", globals=globals, number=1, ) print(f"{prefix}\t{time:.6f}") times = 1000 print(f"Cache stress with fib(1000) {times} times") for prefix in prefixes: time = timeit.timeit( f"{prefix}_fibonacci(1000)", globals=globals, number=times, ) print(f"{prefix}\t{time:.6f}")
the-stack_106_17080
import os import functools import time import glob import argparse import numpy as np import joblib import yaml from blocks.core import utils, labels, duplocorpus from blocks.estimation import imageprocessing, models, render, metrics def removeBackground(image, foreground_mask, replace_with=None): if replace_with is None: replace_with = np.zeros_like(image) new_image = image.copy().astype(float) new_image[~foreground_mask] = replace_with[~foreground_mask] return new_image def getUniqueTrialIds(dir_path): trial_ids = set( int(os.path.basename(fn).split('-')[1].split('_')[0]) for fn in glob.glob(os.path.join(dir_path, f"trial-*.pkl")) ) return sorted(tuple(trial_ids)) def main( out_dir=None, data_dir=None, preprocess_dir=None, detections_dir=None, data_scores_dir=None, keyframes_dir=None, num_seqs=None, only_task_ids=None, resume=None, num_folds=None, scores_run_name=None, keyframe_model_name=None, reselect_keyframes=None, subsample_period=None, window_size=None, corpus_name=None, debug=None, remove_skin=None, remove_background=None, default_annotator=None, cv_scheme=None, model_config=None, overwrite=None, legacy_mode=None): out_dir = os.path.expanduser(out_dir) data_dir = os.path.expanduser(data_dir) preprocess_dir = os.path.expanduser(preprocess_dir) detections_dir = os.path.expanduser(detections_dir) if data_scores_dir is not None: data_scores_dir = os.path.expanduser(data_scores_dir) if keyframes_dir is not None: keyframes_dir = os.path.expanduser(keyframes_dir) fig_dir = os.path.join(out_dir, 'figures') if not os.path.exists(fig_dir): os.makedirs(fig_dir) out_data_dir = os.path.join(out_dir, 'data') if not os.path.exists(out_data_dir): os.makedirs(out_data_dir) if overwrite is None: overwrite = debug if legacy_mode: model_config['decode_kwargs']['legacy_mode'] = legacy_mode def loadFromDataDir(var_name): return joblib.load(os.path.join(data_dir, f"{var_name}.pkl")) def loadFromPreprocessDir(var_name): return joblib.load(os.path.join(preprocess_dir, f"{var_name}.pkl")) def loadFromDetectionsDir(var_name): return joblib.load(os.path.join(detections_dir, f"{var_name}.pkl")) def loadFromKeyframesDir(var_name): return joblib.load(os.path.join(keyframes_dir, f"{var_name}.pkl")) def saveToWorkingDir(var, var_name): joblib.dump(var, os.path.join(out_data_dir, f"{var_name}.pkl")) trial_ids = getUniqueTrialIds(detections_dir) corpus = duplocorpus.DuploCorpus(corpus_name) if num_seqs is not None and num_seqs > 0: logger.info(f"Ignoring all but the first {num_seqs} videos") trial_ids = trial_ids[:num_seqs] logger.info(f"Loading data...") kept_trial_ids = [] rgb_keyframe_seqs = [] depth_keyframe_seqs = [] seg_keyframe_seqs = [] background_keyframe_seqs = [] assembly_keyframe_seqs = [] assembly_seqs = [] label_keyframe_seqs = [] foreground_mask_seqs = [] for seq_idx, trial_id in enumerate(trial_ids): try: trial_str = f"trial={trial_id}" rgb_frame_seq = loadFromDataDir(f'{trial_str}_rgb-frame-seq') depth_frame_seq = loadFromDataDir(f'{trial_str}_depth-frame-seq') rgb_timestamp_seq = loadFromDataDir(f'{trial_str}_rgb-frame-timestamp-seq') action_seq = loadFromDataDir(f'{trial_str}_action-seq') trial_str = f"trial-{trial_id}" foreground_mask_seq = loadFromPreprocessDir( f'{trial_str}_foreground-mask-seq_no-ref-model' ) background_plane_seq = loadFromPreprocessDir(f'{trial_str}_background-plane-seq') # FIXME: I need a better way of handling this. For child data it's # better to get segments from block detections, but for the easy # dataset it's better to use the original foreground segments. if legacy_mode: segment_seq = loadFromDetectionsDir(f'{trial_str}_block-segment-frame-seq') else: segment_seq = loadFromPreprocessDir(f'{trial_str}_segment-frame-seq') label_frame_seq = loadFromDetectionsDir(f'{trial_str}_class-label-frame-seq') assembly_seq = labels.parseLabelSeq(action_seq, timestamps=rgb_timestamp_seq) if len(assembly_seq) < 2: logger.info(f"Skipping trial {trial_id} --- error parsing state seq") continue assembly_seq[-1].end_idx = len(rgb_frame_seq) - 1 except FileNotFoundError as e: logger.warning(e) logger.info(f"Skipping trial {trial_id} --- no data in {scores_run_name}") continue task_id = None # corpus.getTaskIndex(trial_id) (FIXME: getTaskIndex is broken) assembly_seqs.append(assembly_seq) if keyframes_dir is not None: keyframe_idxs = loadFromKeyframesDir(f'{trial_str}_keyframe-idxs') assembly_seq = labels.resampleStateSeq(keyframe_idxs, assembly_seq) rgb_frame_seq = rgb_frame_seq[keyframe_idxs] depth_frame_seq = depth_frame_seq[keyframe_idxs] segment_seq = segment_seq[keyframe_idxs] background_plane_seq = tuple( background_plane_seq[i] for i in keyframe_idxs ) label_frame_seq = label_frame_seq[keyframe_idxs] foreground_mask_seq = foreground_mask_seq[keyframe_idxs] if not only_task_ids or task_id in only_task_ids: rgb_keyframe_seqs.append(rgb_frame_seq) depth_keyframe_seqs.append(depth_frame_seq) seg_keyframe_seqs.append(segment_seq) background_keyframe_seqs.append(background_plane_seq) assembly_keyframe_seqs.append(assembly_seq) label_keyframe_seqs.append(label_frame_seq) foreground_mask_seqs.append(foreground_mask_seq) kept_trial_ids.append(trial_id) trial_ids = kept_trial_ids # Split into train and test sets if cv_scheme == 'leave one out': num_seqs = len(trial_ids) cv_folds = [] for i in range(num_seqs): test_fold = (i,) train_fold = tuple(range(0, i)) + tuple(range(i + 1, num_seqs)) cv_folds.append((train_fold, test_fold)) elif cv_scheme == 'train on child': child_corpus = duplocorpus.DuploCorpus('child') child_trial_ids = utils.loadVariable('trial_ids', 'preprocess-all-data', 'child') train_assembly_seqs = tuple( labels.parseLabelSeq(child_corpus.readLabels(trial_id, 'Cathryn')[0]) for trial_id in child_trial_ids ) hmm = models.EmpiricalImageHmm(**model_config['init_kwargs']) logger.info(f" Training model on {len(train_assembly_seqs)} sequences...") hmm.fit(train_assembly_seqs, **model_config['fit_kwargs']) logger.info(f' Model trained on {hmm.num_states} unique assembly states') saveToWorkingDir(hmm, f'hmm-fold0') cv_folds = [(tuple(range(len(child_trial_ids))), tuple(range(len(trial_ids))))] num_cv_folds = len(cv_folds) saveToWorkingDir(cv_folds, f'cv-folds') total_correct = 0 total_items = 0 for fold_index, (train_idxs, test_idxs) in enumerate(cv_folds): if num_folds is not None and fold_index >= num_folds: break logger.info(f"CV FOLD {fold_index + 1} / {num_cv_folds}") # Initialize and train model if cv_scheme == 'train on child': pass else: utils.validateCvFold(train_idxs, test_idxs) selectTrain = functools.partial(utils.select, train_idxs) # train_trial_ids = selectTrain(trial_ids) train_assembly_seqs = selectTrain(assembly_keyframe_seqs) hmm = models.EmpiricalImageHmm(**model_config['init_kwargs']) logger.info(f" Training model on {len(train_idxs)} sequences...") hmm.fit(train_assembly_seqs, **model_config['fit_kwargs']) logger.info(f' Model trained on {hmm.num_states} unique assembly states') saveToWorkingDir(hmm, f'hmm-fold{fold_index}') # Decode on the test set logger.info(f" Testing model on {len(test_idxs)} sequences...") for i, test_index in enumerate(test_idxs): trial_id = trial_ids[test_index] rgb_frame_seq = rgb_keyframe_seqs[test_index] depth_frame_seq = depth_keyframe_seqs[test_index] seg_frame_seq = seg_keyframe_seqs[test_index] background_plane_seq = background_keyframe_seqs[test_index] true_assembly_seq = assembly_keyframe_seqs[test_index] true_assembly_seq_orig = assembly_seqs[test_index] label_frame_seq = label_keyframe_seqs[test_index] foreground_mask_seq = foreground_mask_seqs[test_index] if data_scores_dir is not None: try: data_scores = joblib.load( os.path.join(data_scores_dir, f"trial={trial_id}_data-scores.pkl") ) except FileNotFoundError: logger.info(" Skipping trial {trial_id} --- scores file not found") continue else: data_scores = None rgb_frame_seq = tuple( imageprocessing.saturateImage( rgb_image, background_mask=~foreground_mask, remove_background=remove_background ) for rgb_image, foreground_mask in zip(rgb_frame_seq, foreground_mask_seq) ) depth_bkgrd_frame_seq = tuple( render.renderPlane( background_plane, camera_params=render.intrinsic_matrix, camera_pose=render.camera_pose, plane_appearance=render.object_colors[0] )[1] for background_plane in background_plane_seq ) depth_frame_seq = tuple( removeBackground(depth_image, foreground_mask, replace_with=depth_bkgrd) for depth_image, foreground_mask, depth_bkgrd in zip(depth_frame_seq, foreground_mask_seq, depth_bkgrd_frame_seq) ) # FIXME: This is a really hacky way of dealing with the fact that # fitScene takes a background plane but stateLogLikelihood takes # a background plane IMAGE if legacy_mode: background_seq = depth_bkgrd_frame_seq else: background_seq = background_plane_seq logger.info(f' Decoding video {trial_id}...') num_oov = sum(int(s not in hmm.states) for s in true_assembly_seq) logger.info(f" {num_oov} out-of-vocabulary states in ground-truth") start_time = time.process_time() ret = hmm.predictSeq( rgb_frame_seq, depth_frame_seq, seg_frame_seq, label_frame_seq, background_seq, log_likelihoods=data_scores, **model_config['decode_kwargs'] ) pred_assembly_seq, pred_idx_seq, max_log_probs, log_likelihoods, poses_seq = ret end_time = time.process_time() logger.info(utils.makeProcessTimeStr(end_time - start_time)) if data_scores_dir is not None: # FIXME: I only save the pose of the best sequence, but I should # save all of them # poses_seq = joblib.load( # os.path.join(data_scores_dir, f"trial-{trial_id}_poses-seq.pkl") # ) if legacy_mode: poses_seq = tuple( ((0, np.zeros(2)),) * len(s.connected_components) for s in pred_assembly_seq ) else: poses_seq = tuple( ((np.eye(3), np.zeros(3)),) * len(s.connected_components) for s in pred_assembly_seq ) if len(pred_assembly_seq) == len(true_assembly_seq): num_correct, num_total = metrics.numberCorrect(true_assembly_seq, pred_assembly_seq) logger.info(f' ACCURACY: {num_correct} / {num_total}') total_correct += num_correct total_items += num_total else: logger.info( f" Skipping accuracy computation: " f"{len(pred_assembly_seq)} pred states != " f"{len(true_assembly_seq)} gt states" ) # Save intermediate results logger.info(f"Saving output...") trial_str = f"trial-{trial_id}" saveToWorkingDir(true_assembly_seq_orig, f'{trial_str}_true-state-seq-orig') saveToWorkingDir(true_assembly_seq, f'{trial_str}_true-state-seq') saveToWorkingDir(pred_assembly_seq, f'{trial_str}_pred-state-seq') saveToWorkingDir(poses_seq, f'{trial_str}_poses-seq') saveToWorkingDir(max_log_probs, f'{trial_str}_viterbi-scores') saveToWorkingDir(log_likelihoods, f'{trial_str}_data-scores') # Save figures if legacy_mode: renders = tuple( render.makeFinalRender( p, assembly=a, rgb_background=np.zeros_like(rgb), depth_background=depth_bkgrd, camera_pose=render.camera_pose, camera_params=render.intrinsic_matrix, block_colors=render.object_colors ) for p, a, rgb, depth, depth_bkgrd in zip( poses_seq, pred_assembly_seq, rgb_frame_seq, depth_frame_seq, depth_bkgrd_frame_seq ) ) rgb_rendered_seq, depth_rendered_seq, label_rendered_seq = tuple( zip(*renders) ) gt_poses_seq = tuple( ((0, np.zeros(2)),) * len(s.connected_components) for s in true_assembly_seq ) renders = tuple( render.makeFinalRender( p, assembly=a, rgb_background=np.zeros_like(rgb), depth_background=depth_bkgrd, camera_pose=render.camera_pose, camera_params=render.intrinsic_matrix, block_colors=render.object_colors ) for p, a, rgb, depth, depth_bkgrd in zip( gt_poses_seq, true_assembly_seq, rgb_frame_seq, depth_frame_seq, depth_bkgrd_frame_seq ) ) rgb_rendered_seq_gt, depth_rendered_seq_gt, label_rendered_seq_gt = tuple( zip(*renders) ) else: rgb_rendered_seq, depth_rendered_seq, label_rendered_seq = utils.batchProcess( render.renderScene, background_plane_seq, pred_assembly_seq, poses_seq, static_kwargs={ 'camera_pose': render.camera_pose, 'camera_params': render.intrinsic_matrix, 'object_appearances': render.object_colors }, unzip=True ) gt_poses_seq = tuple( ((np.eye(3), np.zeros(3)),) * len(s.connected_components) for s in true_assembly_seq ) renders = utils.batchProcess( render.renderScene, background_plane_seq, true_assembly_seq, gt_poses_seq, static_kwargs={ 'camera_pose': render.camera_pose, 'camera_params': render.intrinsic_matrix, 'object_appearances': render.object_colors }, unzip=True ) rgb_rendered_seq_gt, depth_rendered_seq_gt, label_rendered_seq_gt = renders if utils.in_ipython_console(): file_path = None else: trial_str = f"trial-{trial_id}" file_path = os.path.join(fig_dir, f'{trial_str}_best-frames.png') diff_images = tuple(np.abs(f - r) for f, r in zip(rgb_frame_seq, rgb_rendered_seq)) imageprocessing.displayImages( *rgb_frame_seq, *diff_images, *rgb_rendered_seq, *rgb_rendered_seq_gt, *seg_frame_seq, *label_frame_seq, num_rows=6, file_path=file_path ) logger.info(f'AVG ACCURACY: {total_correct / total_items * 100: .1f}%') if __name__ == '__main__': # Parse command line arguments parser = argparse.ArgumentParser() parser.add_argument('--config_file') parser.add_argument('--out_dir') parser.add_argument('--data_dir') parser.add_argument('--preprocess_dir') parser.add_argument('--detections_dir') parser.add_argument('--keyframes_dir') parser.add_argument('--data_scores_dir') args = vars(parser.parse_args()) args = {k: v for k, v in args.items() if v is not None} # Load config file and override with any provided command line args config_file_path = args.pop('config_file', None) if config_file_path is None: file_basename = utils.stripExtension(__file__) config_fn = f"{file_basename}.yaml" config_file_path = os.path.expanduser( os.path.join( '~', 'repo', 'blocks', 'blocks', 'estimation', 'scripts', 'config', config_fn ) ) else: config_fn = os.path.basename(config_file_path) with open(config_file_path, 'rt') as config_file: config = yaml.safe_load(config_file) config.update(args) # Create output directory, instantiate log file and write config options out_dir = os.path.expanduser(config['out_dir']) if not os.path.exists(out_dir): os.makedirs(out_dir) logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt')) with open(os.path.join(out_dir, config_fn), 'w') as outfile: yaml.dump(config, outfile) utils.copyFile(__file__, out_dir) utils.autoreload_ipython() main(**config)
the-stack_106_17083
#!/usr/bin/env python # encoding: utf-8 import pickle from flask import current_app import application.models as Models from application.services.user import create_user DEFAULT_BACKUP_FILE = '/tmp/posts.bak' def import_posts(): backup_file = current_app.config.get('BACKUP_FILE', DEFAULT_BACKUP_FILE) with open(backup_file, "rb") as f: data = f.read() users = Models.User.objects.all() if not users: user = create_user() else: user = users[0] records = pickle.loads(data) for post in records['posts']: post.author = user post.save() for cate in records['categories']: cate.save() for tag in records['tags']: tag.save() for image in records['images']: image.save() return True def export_posts(): post_bak = {"posts": [], "categories": [], "tags": [], "images": []} for post in Models.Post.objects.all(): post_bak["posts"].append(post) for cate in Models.Category.objects.all(): post_bak["categories"].append(cate) for tag in Models.Tag.objects.all(): post_bak["tags"].append(tag) for image in Models.Images.objects.all(): post_bak["images"].append(image) with open("/tmp/posts.bak", "wb") as f: f.write(pickle.dumps(post_bak)) return True
the-stack_106_17084
import io import os from distutils.file_util import copy_file from setuptools import setup, find_packages from gremlinapi.util import get_version __version__ = get_version() def getRequires(): deps = ['requests>=2.22.0', 'urllib3>=1.25.8'] return deps dir_path = os.path.abspath(os.path.dirname(__file__)) readme = io.open(os.path.join(dir_path, 'README.md'), encoding='utf-8').read() setup( name='gremlinapi', version=str(__version__), author='Kyle Hultman', author_email='[email protected]', url='https://github.com/gremlin/gremlin-python/', packages=find_packages(exclude=["temp*.py", "test"]), include_package_data=True, license='Apache 2.0', description='Gremlin library for Python', long_description=readme, long_description_content_type="text/markdown", install_requires=getRequires(), python_requires='>=3.6', entry_points={"console_scripts": ["pgremlin = gremlinapi.cli:main"]}, classifiers=[ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ] )
the-stack_106_17087
# Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode: St = [] for num in nums: node = TreeNode(num) while St and St[-1].val < num: node.left = St.pop() if St: St[-1].right = node St.append(node) return St[0]
the-stack_106_17088
import asyncio import pytest async def _sleep_override(interval): pass asyncio.sleep = _sleep_override from receptor_satellite.worker import Host, Run # noqa: E402 from receptor_satellite.response_queue import ResponseQueue # noqa: E402 from fake_logger import FakeLogger # noqa: E402 class FakeQueue: def __init__(self): self.messages = [] def put(self, message): self.messages.append(message) class FakeSatelliteAPI: def __init__(self): self.requests = [] def record_request(self, request_type, data): self.requests.append((request_type, data)) def real_output(self, job_id, host_id, since): return {"error": None} async def output(self, job_id, host_id, since): self.record_request("output", (job_id, host_id, since)) return self.real_output(job_id, host_id, since) @pytest.fixture def base_scenario(request): queue = FakeQueue() logger = FakeLogger() satellite_api = FakeSatelliteAPI() run = Run( ResponseQueue(queue), "rem_id", "play_id", "account_no", ["host1"], "playbook", {}, satellite_api, logger, ) yield (queue, logger, satellite_api, run) def test_mark_as_failed(base_scenario): queue, logger, satellite_api, run = base_scenario host = Host(run, None, "host1") host.mark_as_failed("controlled failure") assert queue.messages == [ { "type": "playbook_run_update", "playbook_run_id": "play_id", "sequence": 0, "host": host.name, "console": "controlled failure", }, { "type": "playbook_run_finished", "playbook_run_id": "play_id", "host": host.name, "status": ResponseQueue.RESULT_FAILURE, }, ] class PollWithRetriesTestCase: def __init__( self, host_id=1, api_output=None, result=None, api_requests=[], queue_messages=[], ): self.host_id = host_id self.api_output = api_output self.result = result self.api_requests = api_requests self.queue_messages = queue_messages POLL_WITH_RETRIES_TEST_CASES = [ # Polling loop does not loop if there is no error when talking to # the API PollWithRetriesTestCase( result={"error": None, "key": "value"}, api_output={"error": None, "key": "value"}, api_requests=[("output", (None, 1, None))], ), PollWithRetriesTestCase( result={"error": True}, api_output={"error": "controlled failure"}, api_requests=[("output", (None, 1, None)) for _x in range(5)], queue_messages=[ { "type": "playbook_run_update", "playbook_run_id": "play_id", "sequence": 0, "host": "host1", "console": "controlled failure", }, { "type": "playbook_run_finished", "playbook_run_id": "play_id", "host": "host1", "status": ResponseQueue.RESULT_FAILURE, }, ], ), ] @pytest.fixture(params=POLL_WITH_RETRIES_TEST_CASES) def poll_with_retries_scenario(request, base_scenario): # host_id, output_value, result, api_requests, queue_messages = request.param param = request.param queue, logger, satellite_api, run = base_scenario host = Host(run, param.host_id, "host1") yield (queue, host, param) @pytest.mark.asyncio async def test_poll_with_retries(poll_with_retries_scenario): (queue, host, param,) = poll_with_retries_scenario satellite_api = host.run.satellite_api satellite_api.real_output = lambda j, h, s: param.api_output result = await host.poll_with_retries() assert result == param.result assert satellite_api.requests == param.api_requests assert queue.messages == param.queue_messages class PollingLoopTestCase(PollWithRetriesTestCase): def __init__(self, cancelled=False, **kwargs): super().__init__(**kwargs) self.cancelled = cancelled POLLING_LOOP_TEST_CASES = [ # If the host doesn't have an ID, it is assumed to be not known by # Satellite and is marked as failed PollingLoopTestCase( host_id=None, queue_messages=[ { "type": "playbook_run_update", "playbook_run_id": "play_id", "sequence": 0, "host": "host1", "console": "This host is not known by Satellite", }, { "type": "playbook_run_finished", "playbook_run_id": "play_id", "host": "host1", "status": ResponseQueue.RESULT_FAILURE, }, ], ), # If the polling loop receives an error from the API, it marks the # host as failed PollingLoopTestCase( api_output={"error": "controlled failure"}, api_requests=[("output", (None, 1, None)) for _x in range(5)], queue_messages=[ { "type": "playbook_run_update", "playbook_run_id": "play_id", "sequence": 0, "host": "host1", "console": "controlled failure", }, { "type": "playbook_run_finished", "playbook_run_id": "play_id", "host": "host1", "status": ResponseQueue.RESULT_FAILURE, }, ], ), # If the last output from the API ends with Exit status: 0, mark # the run on the host as success PollingLoopTestCase( api_output={ "error": None, "body": {"complete": True, "output": [{"output": "Exit status: 0"}]}, }, api_requests=[("output", (None, 1, None))], queue_messages=[ { "type": "playbook_run_update", "playbook_run_id": "play_id", "sequence": 0, "host": "host1", "console": "Exit status: 0", }, { "type": "playbook_run_finished", "playbook_run_id": "play_id", "host": "host1", "status": ResponseQueue.RESULT_SUCCESS, }, ], ), # If the run was cancelled, but the host managed to finish # successfully, mark it as success PollingLoopTestCase( cancelled=True, api_output={ "error": None, "body": {"complete": True, "output": [{"output": "Exit status: 0"}]}, }, api_requests=[("output", (None, 1, None))], queue_messages=[ { "type": "playbook_run_update", "playbook_run_id": "play_id", "sequence": 0, "host": "host1", "console": "Exit status: 0", }, { "type": "playbook_run_finished", "playbook_run_id": "play_id", "host": "host1", "status": ResponseQueue.RESULT_SUCCESS, }, ], ), # If the host failed, mark it as failed PollingLoopTestCase( api_output={ "error": None, "body": {"complete": True, "output": [{"output": "Exit status: 123"}]}, }, api_requests=[("output", (None, 1, None))], queue_messages=[ { "type": "playbook_run_update", "playbook_run_id": "play_id", "sequence": 0, "host": "host1", "console": "Exit status: 123", }, { "type": "playbook_run_finished", "playbook_run_id": "play_id", "host": "host1", "status": ResponseQueue.RESULT_FAILURE, }, ], ), # If the run was cancelled and the run on the host failed, mark it # as cancelled PollingLoopTestCase( cancelled=True, api_output={ "error": None, "body": {"complete": True, "output": [{"output": "Exit status: 123"}]}, }, api_requests=[("output", (None, 1, None))], queue_messages=[ { "type": "playbook_run_update", "playbook_run_id": "play_id", "sequence": 0, "host": "host1", "console": "Exit status: 123", }, { "type": "playbook_run_finished", "playbook_run_id": "play_id", "host": "host1", "status": ResponseQueue.RESULT_CANCEL, }, ], ), ] @pytest.fixture(params=POLLING_LOOP_TEST_CASES) def polling_loop_scenario(request, base_scenario): queue, logger, satellite_api, run = base_scenario run.cancelled = request.param.cancelled host = Host(run, request.param.host_id, "host1") yield (queue, host, request.param) @pytest.mark.asyncio async def test_polling_loop(polling_loop_scenario): (queue, host, param,) = polling_loop_scenario satellite_api = host.run.satellite_api satellite_api.real_output = lambda j, h, s: param.api_output result = await host.polling_loop() assert result == param.result assert satellite_api.requests == param.api_requests assert queue.messages == param.queue_messages def test_hostname_sanity(): hosts = ["good", "fine", "not,really,good", "ok"] logger = FakeLogger() fake_queue = FakeQueue() playbook_id = "play_id" run = Run( ResponseQueue(fake_queue), "rem_id", playbook_id, "acc_num", hosts, "playbook", {}, None, # No need for SatelliteAPI in this test logger, ) assert logger.warnings == ["Hostname 'not,really,good' contains a comma, skipping"] assert fake_queue.messages == [ { "type": "playbook_run_update", "playbook_run_id": playbook_id, "sequence": 0, "host": "not,really,good", "console": "Hostname contains a comma, skipping", }, { "type": "playbook_run_finished", "playbook_run_id": playbook_id, "host": "not,really,good", "status": "failure", }, ] assert list(map(lambda h: h.name, run.hosts)) == ["good", "fine", "ok"]
the-stack_106_17089
# optical_density_MEC.py v1.0 # by William H. Grover, Department of Bioengineering # University of California, Riverside # [email protected] # http://groverlab.org # Copyright (c) 2016 Regents of the University of California. Permission is # hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the # Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: The above copyright notice and this # permission notice shall be included in all copies or substantial portions of # the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO # EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES # OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # Usage: This program expects to be run in the same directory as the folder # named "yeast data from optical density MEC" containing binary data from the # optical density Multifluidic Evolutionary Component (MEC). When this program # is run, it reads in this raw data, filters it using a median filter, and shows # a plot of voltage vs. time. # Dependencies: # Python 2.X (http://www.python.org) # Numpy (http://www.numpy.org) # Scipy (http://www.scipy.org) # Matplotlib (http://matplotlib.org) # The latest version of this program is available at # https://github.com/groverlab/optical_density_MEC import numpy, pylab, os.path, scipy.signal # read raw data from files containing paired measurements of voltage and time dir = "yeast data from optical density MEC" voltages = numpy.array([]) # empty array to store voltage data times = numpy.array([]) # empty array to store time data for candidate in os.listdir(dir): if candidate.endswith("V"): # if file contains voltage data file = os.path.join(dir, candidate) voltages = numpy.append(voltages, numpy.fromfile(file, dtype='>f8')) elif candidate.endswith("T"): # if file contains time data file = os.path.join(dir, candidate) times = numpy.append(times, numpy.fromfile(file, dtype='>f8')) # median filter of voltage data to reduce noise voltages = scipy.signal.medfilt(voltages, kernel_size=101) # plot a subset of the data centered on growth period voltages = voltages[100000:700000] times = times[100000:700000] # create and customize the plot of voltage vs. time pylab.figure(figsize=(5,4)) # good size for paper pylab.plot(times/(60*60), voltages, "k-", linewidth=2) pylab.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.15) pylab.xlabel("Time (hours)", fontweight='bold') pylab.ylabel("""Optical absorbance MEC voltage (V) (proportional to optical density)""", fontweight='bold', multialignment='center') pylab.xlim(3,24) pylab.yticks(numpy.arange(1.2, 1.6, 0.1)) pylab.gca().invert_yaxis() pylab.show()
the-stack_106_17090
""" https://leetcode.com/problems/maximum-product-subarray/ Given an integer array nums, find the contiguous subarray within an array (containing at least one number) which has the largest product. Example 1: Input: [2,3,-2,4] Output: 6 Explanation: [2,3] has the largest product 6. Example 2: Input: [-2,0,-1] Output: 0 Explanation: The result cannot be 2, because [-2,-1] is not a subarray. """ # time complexity: O(n), space complexity: O(1) # this solution is inspired by @mzchen in the discussion area. # Using dynamic programming, we can log the min and max at every position, representing the min and max we can get at every position. # The trick here is when we meet a negative element, we should swap the min and max. # At every position, we should choose to use the prior result (max or min) or not. If we don't use the prior result and we can get better result, then we should not use the prior result. class Solution: def maxProduct(self, nums: List[int]) -> int: if not nums: return 0 result = nums[0] tempmax = tempmin = nums[0] for i in range(1, len(nums)): if nums[i] < 0: tempmax,tempmin = tempmin, tempmax tempmax = max(tempmax*nums[i], nums[i]) tempmin = min(tempmin*nums[i], nums[i]) result = max(result, tempmax) return result
the-stack_106_17091
# -*- coding: utf-8 -*- ''' The core behaviors used by minion and master ''' # pylint: disable=W0232 # pylint: disable=3rd-party-module-not-gated from __future__ import absolute_import # Import python libs import time import os import multiprocessing import logging from salt.ext.six.moves import range # Import salt libs import salt.daemons.flo import salt.daemons.masterapi from raet import raeting from raet.lane.stacking import LaneStack from raet.lane.yarding import RemoteYard import salt.utils.kinds as kinds # Import ioflo libs import ioflo.base.deeding log = logging.getLogger(__name__) # convert to set once list is larger than about 3 because set hashes INHIBIT_RETURN = [] # ['_return'] # cmd for which we should not send return @ioflo.base.deeding.deedify( 'SaltRaetWorkerFork', ioinits={ 'opts': '.salt.opts', 'proc_mgr': '.salt.usr.proc_mgr', 'worker_verify': '.salt.var.worker_verify', 'access_keys': '.salt.access_keys', 'mkey': '.salt.var.zmq.master_key', 'aes': '.salt.var.zmq.aes'}) def worker_fork(self): ''' Fork off the worker procs FloScript: do salt raet worker fork at enter ''' for index in range(int(self.opts.value['worker_threads'])): time.sleep(0.01) self.proc_mgr.value.add_process( Worker, args=( self.opts.value, index + 1, self.worker_verify.value, self.access_keys.value, self.mkey.value, self.aes.value ) ) class Worker(multiprocessing.Process): ''' Create an ioflo worker in a separate process ''' def __init__(self, opts, windex, worker_verify, access_keys, mkey, aes): super(Worker, self).__init__() self.opts = opts self.windex = windex self.worker_verify = worker_verify self.access_keys = access_keys self.mkey = mkey self.aes = aes def run(self): ''' Spin up a worker, do this in multiprocess windex is worker index ''' self.opts['__worker'] = True behaviors = ['salt.daemons.flo'] preloads = [('.salt.opts', dict(value=self.opts)), ('.salt.var.worker_verify', dict(value=self.worker_verify))] preloads.append(('.salt.var.fork.worker.windex', dict(value=self.windex))) preloads.append(('.salt.var.zmq.master_key', dict(value=self.mkey))) preloads.append(('.salt.var.zmq.aes', dict(value=self.aes))) preloads.append( ('.salt.access_keys', dict(value=self.access_keys))) preloads.extend(salt.daemons.flo.explode_opts(self.opts)) console_logdir = self.opts.get('ioflo_console_logdir', '') if console_logdir: consolepath = os.path.join(console_logdir, "worker_{0}.log".format(self.windex)) else: # empty means log to std out consolepath = '' ioflo.app.run.start( name='worker{0}'.format(self.windex), period=float(self.opts['ioflo_period']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['worker_floscript'], behaviors=behaviors, username='', password='', mode=None, houses=None, metas=None, preloads=preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath, ) class SaltRaetWorkerSetup(ioflo.base.deeding.Deed): ''' FloScript: do salt raet worker setup at enter ''' Ioinits = { 'opts': '.salt.opts', 'windex': '.salt.var.fork.worker.windex', 'access_keys': '.salt.access_keys', 'remote_loader': '.salt.loader.remote', 'local_loader': '.salt.loader.local', 'inode': '.salt.lane.manor.', 'stack': 'stack', 'local': {'ipath': 'local', 'ival': {'lanename': 'master'}} } def action(self): ''' Set up the uxd stack and behaviors ''' name = "worker{0}".format(self.windex.value) # master application kind kind = self.opts.value['__role'] if kind not in kinds.APPL_KINDS: emsg = ("Invalid application kind = '{0}' for Master Worker.".format(kind)) log.error(emsg + "\n") raise ValueError(emsg) if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master], kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]: lanename = 'master' else: # workers currently are only supported for masters emsg = ("Invalid application kind '{0}' for Master Worker.".format(kind)) log.error(emsg + '\n') raise ValueError(emsg) sockdirpath = self.opts.value['sock_dir'] self.stack.value = LaneStack( name=name, lanename=lanename, sockdirpath=sockdirpath) self.stack.value.Pk = raeting.PackKind.pack.value manor_yard = RemoteYard( stack=self.stack.value, name='manor', lanename=lanename, dirpath=sockdirpath) self.stack.value.addRemote(manor_yard) self.remote_loader.value = salt.daemons.masterapi.RemoteFuncs( self.opts.value) self.local_loader.value = salt.daemons.masterapi.LocalFuncs( self.opts.value, self.access_keys.value) init = {} init['route'] = { 'src': (None, self.stack.value.local.name, None), 'dst': (None, manor_yard.name, 'worker_req') } self.stack.value.transmit(init, self.stack.value.fetchUidByName(manor_yard.name)) self.stack.value.serviceAll() def __del__(self): self.stack.server.close() class SaltRaetWorkerRouter(ioflo.base.deeding.Deed): ''' FloScript: do salt raet worker router ''' Ioinits = { 'lane_stack': '.salt.lane.manor.stack', 'road_stack': '.salt.road.manor.stack', 'opts': '.salt.opts', 'worker_verify': '.salt.var.worker_verify', 'remote_loader': '.salt.loader.remote', 'local_loader': '.salt.loader.local', } def action(self): ''' Read in a command and execute it, send the return back up to the main master process ''' self.lane_stack.value.serviceAll() while self.lane_stack.value.rxMsgs: msg, sender = self.lane_stack.value.rxMsgs.popleft() try: s_estate, s_yard, s_share = msg['route']['src'] d_estate, d_yard, d_share = msg['route']['dst'] except (ValueError, IndexError): log.error('Received invalid message: {0}'.format(msg)) return log.debug("**** Worker Router rxMsg\n msg= {0}\n".format(msg)) if 'load' in msg: cmd = msg['load'].get('cmd') if not cmd: continue elif cmd.startswith('__'): continue ret = {} if d_share == 'remote_cmd': if hasattr(self.remote_loader.value, cmd): ret['return'] = getattr(self.remote_loader.value, cmd)(msg['load']) elif d_share == 'local_cmd': if hasattr(self.local_loader.value, cmd): ret['return'] = getattr(self.local_loader.value, cmd)(msg['load']) else: ret = {'error': 'Invalid request'} if cmd == 'publish' and 'pub' in ret.get('return', {}): r_share = 'pub_ret' ret['__worker_verify'] = self.worker_verify.value else: r_share = s_share if cmd not in INHIBIT_RETURN: ret['route'] = { 'src': (None, self.lane_stack.value.local.name, None), 'dst': (s_estate, s_yard, r_share) } self.lane_stack.value.transmit(ret, self.lane_stack.value.fetchUidByName('manor')) self.lane_stack.value.serviceAll()
the-stack_106_17092
""" """ from typing import List import numpy as np import pandas as pd import statsmodels.api as sm import src.features.build_features as bf import src.data.download as dwn def reducto_explain_downloads( log_y: bool = False, log_x: bool = True, drop_columns: List[str] = None ): """Linear regression model to explain the total number of downloads per package. Given the variables source_lines, docstring_lines, comment_lines and blank_lines are a perfectly collinear, one of them must be removed, in this case blank_lines is chosen to be removed. Prints the model summary. Parameters ---------- log_y : bool Apply logs to endogenous variable. log_x : bool Apply logs to columns lines, average_function_length, number_of_functions and source_files. drop_columns : List[str] Columns to be dropped from explanatory variables. Due to high multicollinearity some variables may be better removed """ guide = bf.get_reducto_reports_relative(log=log_x) columns_ = [ 'lines', 'source_lines', 'docstring_lines', 'comment_lines', 'average_function_length', 'number_of_functions', 'source_files' ] guide = guide[columns_] downloads = dwn.get_downloads_per_package_root(guide=list(guide.index)) # downloads = dwn.get_downloads_per_package(guide=list(guide.index)) y = list(downloads.values()) X = guide.values X = sm.add_constant(X, prepend=True) columns = ['constant'] columns.extend(list(guide.columns)) X = pd.DataFrame(X, columns=columns) if drop_columns is not None: X.drop(drop_columns, axis=1, inplace=True) if log_y: y = np.log(y) # Fit and summarize OLS model mod = sm.OLS(y, X) res = mod.fit(cov_type='HC1') print(res.summary()) return res
the-stack_106_17096
# qubit number=3 # total number=12 import numpy as np from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ import networkx as nx from qiskit.visualization import plot_histogram from typing import * from pprint import pprint from math import log2 from collections import Counter from qiskit.test.mock import FakeVigo, FakeYorktown kernel = 'circuit/bernstein' def make_circuit(n:int) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") prog = QuantumCircuit(input_qubit) prog.h(input_qubit[0]) # number=1 prog.cx(input_qubit[3],input_qubit[0]) # number=9 prog.z(input_qubit[3]) # number=10 prog.cx(input_qubit[3],input_qubit[0]) # number=11 prog.h(input_qubit[1]) # number=2 prog.z(input_qubit[1]) # number=8 prog.h(input_qubit[2]) # number=3 prog.h(input_qubit[3]) # number=4 for edge in E: k = edge[0] l = edge[1] prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1]) prog.p(gamma, k) prog.p(gamma, l) prog.rx(2 * beta, range(len(V))) prog.swap(input_qubit[3],input_qubit[0]) # number=5 prog.swap(input_qubit[3],input_qubit[0]) # number=6 # circuit end return prog if __name__ == '__main__': n = 4 V = np.arange(0, n, 1) E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)] G = nx.Graph() G.add_nodes_from(V) G.add_weighted_edges_from(E) step_size = 0.1 a_gamma = np.arange(0, np.pi, step_size) a_beta = np.arange(0, np.pi, step_size) a_gamma, a_beta = np.meshgrid(a_gamma, a_beta) F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * ( 1 + np.cos(4 * a_gamma) ** 2) result = np.where(F1 == np.amax(F1)) a = list(zip(result[0], result[1]))[0] gamma = a[0] * step_size beta = a[1] * step_size prog = make_circuit(4) sample_shot =5600 writefile = open("../data/startQiskit_Class223.csv", "w") # prog.draw('mpl', filename=(kernel + '.png')) backend = BasicAer.get_backend('statevector_simulator') circuit1 = transpile(prog, FakeYorktown()) prog = circuit1 info = execute(prog,backend=backend, shots=sample_shot).result().get_counts() print(info, file=writefile) print("results end", file=writefile) print(circuit1.depth(), file=writefile) print(circuit1, file=writefile) writefile.close()
the-stack_106_17098
import boto3 import dagster._check as check MAX_KEYS = 1000 def get_s3_keys(bucket, prefix="", since_key=None, s3_session=None): check.str_param(bucket, "bucket") check.str_param(prefix, "prefix") check.opt_str_param(since_key, "since_key") if not s3_session: s3_session = boto3.resource("s3", use_ssl=True, verify=True).meta.client cursor = "" contents = [] while True: response = s3_session.list_objects_v2( Bucket=bucket, Delimiter="", MaxKeys=MAX_KEYS, Prefix=prefix, StartAfter=cursor, ) contents.extend(response.get("Contents", [])) if response["KeyCount"] < MAX_KEYS: break cursor = response["Contents"][-1]["Key"] sorted_keys = [obj["Key"] for obj in sorted(contents, key=lambda x: x["LastModified"])] if not since_key or since_key not in sorted_keys: return sorted_keys for idx, key in enumerate(sorted_keys): if key == since_key: return sorted_keys[idx + 1 :] return []
the-stack_106_17100
#!/usr/bin/env python """ Reads a list of intervals and a maf. Produces a new maf containing the blocks or parts of blocks in the original that overlapped the intervals. If a MAF file, not UID, is provided the MAF file is indexed before being processed. NOTE: If two intervals overlap the same block it will be written twice. usage: %prog maf_file [options] -d, --dbkey=d: Database key, ie hg17 -c, --chromCol=c: Column of Chr -s, --startCol=s: Column of Start -e, --endCol=e: Column of End -S, --strandCol=S: Column of Strand -t, --mafType=t: Type of MAF source to use -m, --mafFile=m: Path of source MAF file, if not using cached version -I, --mafIndex=I: Path of precomputed source MAF file index, if not using cached version -i, --interval_file=i: Input interval file -o, --output_file=o: Output MAF file -p, --species=p: Species to include in output -P, --split_blocks_by_species=P: Split blocks by species -r, --remove_all_gap_columns=r: Remove all Gap columns -l, --indexLocation=l: Override default maf_index.loc file -z, --mafIndexFile=z: Directory of local maf index file ( maf_index.loc or maf_pairwise.loc ) """ # Dan Blankenberg from __future__ import print_function import bx.align.maf import bx.intervals.io from bx.cookbook import doc_optparse from galaxy.tools.util import maf_utilities def __main__(): index = index_filename = None # Parse Command Line options, args = doc_optparse.parse(__doc__) if options.dbkey: dbkey = options.dbkey else: dbkey = None if dbkey in [None, "?"]: maf_utilities.tool_fail("You must specify a proper build in order to extract alignments. You can specify your genome build by clicking on the pencil icon associated with your interval file.") species = maf_utilities.parse_species_option(options.species) if options.chromCol: chromCol = int(options.chromCol) - 1 else: maf_utilities.tool_fail("Chromosome column not set, click the pencil icon in the history item to set the metadata attributes.") if options.startCol: startCol = int(options.startCol) - 1 else: maf_utilities.tool_fail("Start column not set, click the pencil icon in the history item to set the metadata attributes.") if options.endCol: endCol = int(options.endCol) - 1 else: maf_utilities.tool_fail("End column not set, click the pencil icon in the history item to set the metadata attributes.") if options.strandCol: strandCol = int(options.strandCol) - 1 else: strandCol = -1 if options.interval_file: interval_file = options.interval_file else: maf_utilities.tool_fail("Input interval file has not been specified.") if options.output_file: output_file = options.output_file else: maf_utilities.tool_fail("Output file has not been specified.") split_blocks_by_species = remove_all_gap_columns = False if options.split_blocks_by_species and options.split_blocks_by_species == 'split_blocks_by_species': split_blocks_by_species = True if options.remove_all_gap_columns and options.remove_all_gap_columns == 'remove_all_gap_columns': remove_all_gap_columns = True else: remove_all_gap_columns = True # Finish parsing command line # Open indexed access to MAFs if options.mafType: if options.indexLocation: index = maf_utilities.maf_index_by_uid(options.mafType, options.indexLocation) else: index = maf_utilities.maf_index_by_uid(options.mafType, options.mafIndexFile) if index is None: maf_utilities.tool_fail("The MAF source specified (%s) appears to be invalid." % (options.mafType)) elif options.mafFile: index, index_filename = maf_utilities.open_or_build_maf_index(options.mafFile, options.mafIndex, species=[dbkey]) if index is None: maf_utilities.tool_fail("Your MAF file appears to be malformed.") else: maf_utilities.tool_fail("Desired source MAF type has not been specified.") # Create MAF writter out = bx.align.maf.Writer(open(output_file, "w")) # Iterate over input regions num_blocks = 0 num_regions = None for num_regions, region in enumerate(bx.intervals.io.NiceReaderWrapper(open(interval_file, 'r'), chrom_col=chromCol, start_col=startCol, end_col=endCol, strand_col=strandCol, fix_strand=True, return_header=False, return_comments=False)): src = maf_utilities.src_merge(dbkey, region.chrom) for block in index.get_as_iterator(src, region.start, region.end): if split_blocks_by_species: blocks = [new_block for new_block in maf_utilities.iter_blocks_split_by_species(block) if maf_utilities.component_overlaps_region(new_block.get_component_by_src_start(dbkey), region)] else: blocks = [block] for block in blocks: block = maf_utilities.chop_block_by_region(block, src, region) if block is not None: if species is not None: block = block.limit_to_species(species) block = maf_utilities.orient_block_by_region(block, src, region) if remove_all_gap_columns: block.remove_all_gap_columns() out.write(block) num_blocks += 1 # Close output MAF out.close() # remove index file if created during run maf_utilities.remove_temp_index_file(index_filename) if num_blocks: print("%i MAF blocks extracted for %i regions." % (num_blocks, (num_regions + 1))) elif num_regions is not None: print("No MAF blocks could be extracted for %i regions." % (num_regions + 1)) else: print("No valid regions have been provided.") if __name__ == "__main__": __main__()
the-stack_106_17101
#learning to modify the attributes of the turtle and window objects #Modify this program so that before it creates the window, it prompts the user to enter the desired background color. It should store the user’s responses in a variable, and modify the color of the window according to the user’s wishes. #Do similar changes to allow the user, at runtime, to set tess‘ color. #Do the same for the width of tess‘ pen. Hint: your dialog with the user will return a string, but tess‘ pensize method expects its argument to be an int. So you’ll need to convert the string to an int before you pass it to pensize. import turtle background = input("What background color should the window have? ") turtlecolor = input("What color should Tess have? ") pensize = int(input("And what should the width of Tess' pen be? ")) wn = turtle.Screen() wn.bgcolor(background) # Set the window background color wn.title("Hello, Tess!") # Set the window title tess = turtle.Turtle() tess.color(turtlecolor) # Tell tess to change her color tess.pensize(3) # Tell tess to set her pen width tess.forward(50) tess.left(120) tess.forward(50) wn.mainloop()
the-stack_106_17107
import os from datetime import datetime from logging import getLogger, Logger from typing import Dict, Any, List import json import jwt import requests from requests.exceptions import HTTPError BASE_URL: str = 'https://api.liquid.com' """API Base URL""" SIDE_BUY: str = 'buy' """Side: buy""" SIDE_SELL: str = 'sell' """Side: sell""" MIN_ORDER_QUANTITY: float = 0.001 """minimum order quantity""" PRODUCT_ID_BTCJPY: int = 5 """Product ID: BTC/JPY""" PRODUCT_ID_ETHJPY: int = 29 """Product ID: ETH/JPY""" PRODUCT_ID_XRPJPY: int = 83 """Product ID: XRP/JPY""" PRODUCT_ID_BCHJPY: int = 41 """Product ID: BCH/JPY""" PRODUCT_ID_QASHJPY: int = 50 """Product ID: QASH/JPY""" ORDER_STATUS_LIVE: str = 'live' """Order status: live""" ORDER_STATUS_CANCELLED: str = 'cancelled' """Order status: cancelled""" ORDER_STATUS_FILLED: str = 'filled' """Order status: filled""" ORDER_STATUS_FILLED: str = 'partially_filled' """Order status: partially_filled""" logger: Logger = getLogger(__name__) def privateapi(func): def wrapper(self, *args, **kwargs): if not self.api_key or not self.api_secret: raise ValueError('api_key and api_secret are required.') return func(self, *args, **kwargs) return wrapper class Liquid(object): ''' Liquid REST API Client ''' def __init__(self, api_key: str = None, api_secret: str = None): self.api_key = api_key if api_key else os.getenv('LIQUID_API_KEY', '') self.api_secret = api_secret if api_secret else os.getenv('LIQUID_API_SECRET', '') self.s = requests.Session() def __del__(self): self.s.close() def _create_auth_headers(self, path: str) -> dict: ''' _create_auth_headers creates authentication header to call private API Parameters ---------- path: str API path included in URI Returns ------- dict Authentication headers to use private API ''' payload = { 'path': path, 'nonce': int(datetime.now().timestamp() * 1000), 'token_id': self.api_key } return { 'X-Quoine-Auth': jwt.encode(payload, self.api_secret, algorithm='HS256'), 'X-Quoine-API-Version': '2', 'Content-Type': 'application/json' } def get_products(self, product_id: int = 0) -> Dict[str, Any]: path = '/products' + (f'/{product_id}' if product_id else '') res = self.s.get(BASE_URL + path) if not res.ok: logger.error(f'Failed to get products.') raise HTTPError(f'status: {res.status_code}, text: {res.text}') return json.loads(res.text) @privateapi def get_accounts_balance(self) -> Dict[str, Any]: path = '/accounts/balance' res = self.s.get(BASE_URL + path, headers=self._create_auth_headers(path)) if not res.ok: logger.error(f'Failed to get accounts balance.') raise HTTPError(f'status: {res.status_code}, text: {res.text}') return json.loads(res.text) @privateapi def get_orders(self, status: str = None) -> List[Dict[str, Any]]: path = '/orders' + (f'?status={status}' if status else "") res = self.s.get(BASE_URL + path, headers=self._create_auth_headers(path)) if not res.ok: raise HTTPError(f'status: {res.status_code}, text: {res.text}') return json.loads(res.text)['models'] @privateapi def cancel_order(self, id: str) -> None: path = f"/orders/{id}/cancel" res = self.s.put(BASE_URL + path, headers=self._create_auth_headers(path)) if not res.ok: logger.error(f'Failed to cancel order. [id={id}]') raise HTTPError(f'status: {res.status_code}, text: {res.text}') logger.info(f'Order has been cancelled. [id={id}]') @privateapi def create_order(self, product_id: int, side: str, quantity: float, price: int = None) -> Dict[str, Any]: if quantity < MIN_ORDER_QUANTITY: raise ValueError(f'Order quantity {quantity:.8f} is too small. Specify {MIN_ORDER_QUANTITY} or more.') order = { 'product_id': product_id, 'side': side, 'quantity': quantity } if price: order.update({ 'order_type': 'limit', 'price': price }) else: order.update({ 'order_type': 'market' }) headers = self._create_auth_headers('/orders/') res = self.s.post( BASE_URL + '/orders/', data=json.dumps({'order': order}), headers=headers) if not res.ok: logger.error(f'Failed to create an order. [product_id={product_id}, side={side}, price={price}, quantity={quantity}]') raise HTTPError(f'status: {res.status_code}: text: {res.text}') body = json.loads(res.text) logger.info(f"Order has been created. [order_id={body['id']}, product_id={product_id}, side={side}, price={price}, quantity={quantity}]") return body @privateapi def cancel_all_orders(self) -> None: for o in self.get_orders(status=ORDER_STATUS_LIVE): self.cancel_order(id=o['id']) @privateapi def get_fiat_deposit_requests(self, currency: str = 'JPY') -> Dict[str, Any]: path = f'/fund_infos?currency={currency}' res = self.s.get(BASE_URL + path, headers=self._create_auth_headers(path)) if not res.ok: logger.error(f'Failed to get fiat deposit requests.') raise HTTPError(f'status: {res.status_code}, text: {res.text}') return json.loads(res.text) @privateapi def get_fiat_deposits_history(self, currency: str = 'JPY', page: int = 0, limit: int = 20) -> Dict[str, Any]: path = f'/transactions?transaction_type=funding&currency={currency}' path += f'&page={page}' if page else '' path += f'&limit={limit}' if limit else '' res = self.s.get(BASE_URL + path, headers=self._create_auth_headers(path)) if not res.ok: logger.error(f'Failed to get fiat deposits history.') raise HTTPError(f'status: {res.status_code}, text: {res.text}') return json.loads(res.text) @privateapi def get_executions_me(self, product_id: str, timestamp: int = None, page: int = 1, limit: int = 200) -> List[Dict[str, Any]]: path = f'/executions/me?product_id={product_id}&page={page}&limit={limit}' + (f'&timestamp={timestamp}' if timestamp else '') res = self.s.get(BASE_URL + path, headers=self._create_auth_headers(path)) if not res.ok: logger.error(f'Failed to get execution history.') raise HTTPError(f'status: {res.status_code}, text: {res.text}') return json.loads(res.text)
the-stack_106_17112
# -*- coding: utf-8 -*- import os import logging import shutil import hashlib import collections import numpy as np import torch from PIL import Image LOGGER = logging.getLogger(__name__) class Pad: def __init__(self, border, mode='reflect'): self.border = border self.mode = mode def __call__(self, image): img = np.pad(image, [(self.border, self.border), (self.border, self.border), (0, 0)], mode=self.mode) return Image.fromarray(img) class Cutout: def __init__(self, height, width): self.height = height self.width = width def __call__(self, image): h, w = image.size(1), image.size(2) mask = np.ones((h, w), np.float32) y = np.random.choice(range(h)) x = np.random.choice(range(w)) y1 = np.clip(y - self.height // 2, 0, h) y2 = np.clip(y + self.height // 2, 0, h) x1 = np.clip(x - self.width // 2, 0, w) x2 = np.clip(x + self.width // 2, 0, w) mask[y1: y2, x1: x2] = 0. mask = torch.from_numpy(mask).to(device=image.device, dtype=image.dtype) mask = mask.expand_as(image) image *= mask return image def __repr__(self): return self.__class__.__name__ + '(height={0}, width={1})'.format(self.height, self.width) class TensorRandomHorizontalFlip: def __call__(self, tensor): choice = np.random.choice([True, False]) return torch.flip(tensor, dims=[-1]) if choice else tensor class TensorRandomCrop: def __init__(self, height, width): self.height = height self.width = width def __call__(self, tensor): C, H, W = tensor.shape h = np.random.choice(range(H + 1 - self.height)) w = np.random.choice(range(W + 1 - self.width)) return tensor[:, h:h+self.height, w:w+self.width] class ImageWriter: def __init__(self, root, delete_folder_exists=True): self.root = root if delete_folder_exists and os.path.exists(self.root): shutil.rmtree(self.root) os.makedirs(self.root, exist_ok=True) def __call__(self, image): filename = hashlib.md5(image.tobytes()).hexdigest() filepath = os.path.join(self.root, filename + '.jpg') with open(filepath, 'wb') as f: image.save(f, format='jpeg') return image
the-stack_106_17114
import numpy as np from pyscf import lib, gto from kspies import wy import matplotlib.pyplot as plt from scipy.linalg import toeplitz from scipy.linalg import eigh #Define system x = np.linspace(-10, 10, 201) #Domain h = (x[-1]-x[0])/(len(x)-1) #grid spacing n = len(x) #Dimension of basis a = np.zeros(len(x)) a[0] = 2. a[1] = -1. T = toeplitz(a,a)/(2*h**2) #Kinetic energy matrix by 2nd order FD S = np.identity(len(x)) #Overlap matrix k = 0.25 V = np.diag(0.5*k*x**2) #Harmonic potential matrix l = 0.5 #1D e-e soft repulsion parameter def deno(l): b = np.expand_dims(x, axis=0) dist = abs(b-b.T) return 1./np.sqrt(dist**2+l**2) def get_J(dm): J = np.diag(np.einsum('ii,ik->k', dm, deno(l))) return J def get_K(dm): K = np.einsum('il,il->il', dm, deno(l)) return K #Pass to mole object mol = gto.M() mol.nelectron = 4 mol.verbose = 0 mol.incore_anyway = True #Solve HF equation F = T+V for i in range(15): e,C = eigh(F,S) dm = 2*np.einsum('ik,jk->ij', C[:,:mol.nelectron//2], C[:,:mol.nelectron//2]) J = get_J(dm) K = get_K(dm) F = T+V+J-0.5*K print("EHF = ",np.einsum('ij,ji', T+V+0.5*J-0.25*K, dm)) dm_tar = dm plt.plot(x, 10*np.diag(dm_tar)/h, label='den(HF)', color='black') # x10 scaled density #Three-center overlap integral Sijt = np.zeros((n,n,n)) for i in range(n): Sijt[i,i,i] = 1. #Run WY mw = wy.RWY(mol, dm_tar, Sijt=Sijt) mw.tol = 1e-7 mw.method = 'bfgs' mw.T = T #Kinetic energy matrix - finite difference in this example mw.Tp = T #Kinetic energy matrix in potential basis mw.V = V #External potential matrix mw.S = S #Overlap matrix mw.guide = None mw.run() mw.info() mw.time_profile() #Plotting Vb = np.diag(mw.b) #-mw.b[50]) #KS potential is unique up to a constant. plt.plot(x, 10*np.diag(mw.dm)/h, label='den(WY)', color='red', linestyle='--') # x10 scaled density plt.plot(x, np.diag(V), label=r'$v_{ext}$(r)') plt.plot(x, np.diag(V+Vb), label=r'$v_{S}$(r)') plt.plot(x, 1e+6*np.diag(mw.dm-dm_tar)/h,label='den(WY-HF)', color='blue', linestyle='--') # x10^6 scaled diff plt.xlim(-10, 10) plt.ylim(-0.5, 10) plt.tight_layout() plt.legend() plt.show()
the-stack_106_17115
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms from nni.nas.pytorch.mutables import LayerChoice, InputChoice from nni.nas.pytorch.darts import DartsTrainer class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = LayerChoice([nn.Conv2d(3, 6, 3, padding=1), nn.Conv2d(3, 6, 5, padding=2)]) self.pool = nn.MaxPool2d(2, 2) self.conv2 = LayerChoice([nn.Conv2d(6, 16, 3, padding=1), nn.Conv2d(6, 16, 5, padding=2)]) self.conv3 = nn.Conv2d(16, 16, 1) self.skipconnect = InputChoice(n_candidates=1) self.bn = nn.BatchNorm2d(16) self.gap = nn.AdaptiveAvgPool2d(4) self.fc1 = nn.Linear(16 * 4 * 4, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): bs = x.size(0) x = self.pool(F.relu(self.conv1(x))) x0 = F.relu(self.conv2(x)) x1 = F.relu(self.conv3(x0)) x0 = self.skipconnect([x0]) if x0 is not None: x1 += x0 x = self.pool(self.bn(x1)) x = self.gap(x).view(bs, -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def accuracy(output, target): batch_size = target.size(0) _, predicted = torch.max(output.data, 1) return {"acc1": (predicted == target).sum().item() / batch_size} if __name__ == "__main__": transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) dataset_train = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform) dataset_valid = torchvision.datasets.CIFAR10(root="./data", train=False, download=True, transform=transform) net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) trainer = DartsTrainer(net, loss=criterion, metrics=accuracy, optimizer=optimizer, num_epochs=2, dataset_train=dataset_train, dataset_valid=dataset_valid, batch_size=64, log_frequency=10) trainer.train() trainer.export("checkpoint.json")
the-stack_106_17116
import numbers from . import meter import numpy as np import torch class AUCMeter(meter.Meter): """ The AUCMeter measures the area under the receiver-operating characteristic (ROC) curve for binary classification problems. The area under the curve (AUC) can be interpreted as the probability that, given a randomly selected positive example and a randomly selected negative example, the positive example is assigned a higher score by the classification model than the negative example. The AUCMeter is designed to operate on one-dimensional Tensors `output` and `target`, where (1) the `output` contains model output scores that ought to be higher when the model is more convinced that the example should be positively labeled, and smaller when the model believes the example should be negatively labeled (for instance, the output of a signoid function); and (2) the `target` contains only values 0 (for negative examples) and 1 (for positive examples). """ def __init__(self): super(AUCMeter, self).__init__() self.reset() def reset(self): self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy() self.targets = torch.LongTensor(torch.LongStorage()).numpy() def add(self, output, target): if torch.is_tensor(output): output = output.cpu().squeeze().numpy() if torch.is_tensor(target): target = target.cpu().squeeze().numpy() elif isinstance(target, numbers.Number): target = np.asarray([target]) if np.ndim(output) != 1: raise AssertionError('wrong output size (1D expected)') if np.ndim(target) != 1: raise AssertionError('wrong target size (1D expected)') if output.shape[0] != target.shape[0]: raise AssertionError('number of outputs and targets does not match') if not np.all(np.add(np.equal(target, 1), np.equal(target, 0))): raise AssertionError('targets should be binary (0, 1)') self.scores = np.append(self.scores, output) self.targets = np.append(self.targets, target) def value(self): # case when number of elements added are 0 if self.scores.shape[0] == 0: return 0.5 # sorting the arrays scores, sortind = torch.sort(torch.from_numpy(self.scores), dim=0, descending=True) scores = scores.numpy() sortind = sortind.numpy() # creating the roc curve tpr = np.zeros(shape=(scores.size + 1), dtype=np.float64) fpr = np.zeros(shape=(scores.size + 1), dtype=np.float64) for i in range(1, scores.size + 1): if self.targets[sortind[i - 1]] == 1: tpr[i] = tpr[i - 1] + 1 fpr[i] = fpr[i - 1] else: tpr[i] = tpr[i - 1] fpr[i] = fpr[i - 1] + 1 tpr /= (self.targets.sum() * 1.0) fpr /= ((self.targets - 1.0).sum() * -1.0) # calculating area under curve using trapezoidal rule n = tpr.shape[0] h = fpr[1:n] - fpr[0:n - 1] sum_h = np.zeros(fpr.shape) sum_h[0:n - 1] = h sum_h[1:n] += h area = (sum_h * tpr).sum() / 2.0 return (area, tpr, fpr)
the-stack_106_17117
# MIT License # # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #basic modules import time import sys import os import numpy as np #matplotlib import matplotlib as mpl mpl.use('agg') from matplotlib.backends.backend_pdf import PdfPages import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap #basemap from mpl_toolkits.basemap import Basemap class CamVisualizer(object): def __init__(self): # Create figre lats = np.linspace(-90,90,768) longs = np.linspace(-180,180,1152) self.my_map = Basemap(projection='gall', llcrnrlat=min(lats), llcrnrlon=min(longs), urcrnrlat=max(lats), urcrnrlon=max(longs), resolution = 'i') xx, yy = np.meshgrid(longs, lats) self.x_map, self.y_map = self.my_map(xx,yy) # Create new colormap colors_1 = [(252-32*i,252-32*i,252-32*i,i*1/16) for i in np.linspace(0, 1, 32)] colors_2 = [(220-60*i,220-60*i,220,i*1/16+1/16) for i in np.linspace(0, 1, 32)] colors_3 = [(160-20*i,160+30*i,220,i*3/8+1/8) for i in np.linspace(0, 1, 96)] colors_4 = [(140+80*i,190+60*i,220+30*i,i*4/8+4/8) for i in np.linspace(0, 1, 96)] colors = colors_1 + colors_2 + colors_3 + colors_4 colors = list(map(lambda c: (c[0]/256,c[1]/256,c[2]/256,c[3]), colors)) self.my_cmap = mpl.colors.LinearSegmentedColormap.from_list('mycmap', colors, N=64) #print once so that everything is set up self.my_map.bluemarble() self.my_map.drawcoastlines() def plot(self, filename, title_prefix, data, label, year, month, day, hour): # Get data tstart = time.time() data = np.roll(data,[0,int(1152/2)]) # Get labels label = np.roll(label, [0,int(1152/2)]) l1 = (label == 1) l2 = (label == 2) print("extract data: {}".format(time.time() - tstart)) #pdf #with PdfPages(filename+'.pdf') as pdf: #get figure fig = plt.figure(figsize=(100,20), dpi=100) #draw stuff tstart = time.time() self.my_map.bluemarble() self.my_map.drawcoastlines() print("draw background: {}".format(time.time() - tstart)) # Plot data tstart = time.time() self.my_map.contourf(self.x_map, self.y_map, data, 128, vmin=0, vmax=89, cmap=self.my_cmap, levels=np.arange(0,89,2)) print("draw data: {}".format(time.time() - tstart)) # Plot colorbar tstart = time.time() cbar = self.my_map.colorbar(ticks=np.arange(0,89,11)) cbar.ax.set_ylabel('Integrated Water Vapor kg $m^{-2}$',size=32) cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=28) print("draw colorbar: {}".format(time.time() - tstart)) # Draw Tropical Cyclones & Atmospheric Rivers tstart = time.time() tc_contour = self.my_map.contour(self.x_map, self.y_map, l1, [0.5], linewidths=3, colors='orange') ar_contour = self.my_map.contour(self.x_map, self.y_map, l2, [0.5], linewidths=3, colors='magenta') print("draw contours: {}".format(time.time() - tstart)) tstart = time.time() self.my_map.drawmeridians(np.arange(-180, 180, 60), labels=[0,0,0,1]) self.my_map.drawparallels(np.arange(-90, 90, 30), labels =[1,0,0,0]) print("draw meridians: {}".format(time.time() - tstart)) # Plot legend and title tstart = time.time() lines = [tc_contour.collections[0], ar_contour.collections[0]] labels = ['Tropical Cyclone', "Atmospheric River"] plt.legend(lines, labels, loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2) plt.setp(plt.gca().get_legend().get_texts(), fontsize='38') plt.title("{} Extreme Weather Patterns {:04d}-{:02d}-{:02d}".format(title_prefix, int(year), int(month), int(day)), fontdict={'fontsize': 44}) print("draw legend/title: {}".format(time.time() - tstart)) tstart = time.time() #pdf.savefig(bbox_inches='tight') #mask_ex = plt.gcf() #mask_ex.savefig(filename, bbox_inches='tight') plt.gcf().savefig(filename, format="PNG", bbox_inches='tight') plt.clf() print("save plot: {}".format(time.time() - tstart))
the-stack_106_17118
from numpy.distutils.core import setup, Extension import platform import sys import os conf = { 'fortran_lib' : None, 'fortran_library_dir' : None } if (os.environ.has_key('DFLAGS') == True): if (os.environ['DFLAGS'] == ''): import distutils.sysconfig old_str = distutils.sysconfig._config_vars['CFLAGS'] distutils.sysconfig._config_vars['CFLAGS'] = old_str.replace('-g','') old_str = distutils.sysconfig._config_vars['OPT'] distutils.sysconfig._config_vars['OPT'] = old_str.replace('-g','') if (os.environ.has_key('OCFLAGS') == True): oflag = os.environ['OCFLAGS'] import distutils.sysconfig old_str = distutils.sysconfig._config_vars['CFLAGS'] distutils.sysconfig._config_vars['CFLAGS'] = old_str.replace('-O ','').replace('-O0','').replace('-O1','').replace('-O2','').replace('-O3','').replace('-O4','').replace('-O5','') + ' ' + oflag old_str = distutils.sysconfig._config_vars['OPT'] distutils.sysconfig._config_vars['OPT'] = old_str.replace('-O ','').replace('-O0','').replace('-O1','').replace('-O2','').replace('-O3','').replace('-O4','').replace('-O5','') + ' ' + oflag needf90 = False if platform.system() == 'Darwin': needf90 = True from numpy.distutils.fcompiler.gnu import GnuFCompiler GnuFCompiler.old_get_libraries = GnuFCompiler.get_libraries GnuFCompiler.old_get_library_dirs = GnuFCompiler.get_library_dirs # Have also found that on OS X, keeping cc_dynamic on the # link line prevents linkage--fortunately it appears that # none of our code requires cc_dynamic anyway. def get_libraries_gnuf(self): from numpy.distutils.fcompiler.gnu import Gnu95FCompiler libs = self.old_get_libraries() if ('cc_dynamic' in libs and isinstance(self, Gnu95FCompiler) == False): libs.remove('cc_dynamic') return libs def get_libraries_g95(self): libs = self.old_get_libraries() new_libs = ['SystemStubs'] if (conf['fortran_lib'] != None): new_libs = [conf['fortran_lib'], 'SystemStubs'] for l in new_libs: if l not in libs: libs.append(l) return libs # Need to add path to library for g2c, even though # g2c itself *is* already in list of libraries def get_library_dirs(self): dirs = self.old_get_library_dirs() if (conf['fortran_library_dir'] != None and conf['fortran_library_dir'] != './'): if conf['fortran_library_dir'] not in dirs: dirs.append(conf['fortran_library_dir']) else: # Try a likely path if fortran_library_dir points nowhere if '/sw/lib' not in dirs: dirs.append('/sw/lib') return dirs GnuFCompiler.get_libraries = get_libraries_gnuf GnuFCompiler.get_library_dirs = get_library_dirs # # Block gfortran from adding superfluous -arch flags # def _universal_flags(self, cmd): # # These need to come from CCEXTRA_ARGS, without setting # # LDFLAGS # return ['-arch', 'i386', '-arch', 'x86_64'] from numpy.distutils.fcompiler.gnu import Gnu95FCompiler # Gnu95FCompiler._universal_flags = _universal_flags # If on the Intel Mac, must use g95 to compile Fortran # Use gcc as linker, because g95 just isn't helping us make # bundles on OS X from numpy.distutils.fcompiler.g95 import G95FCompiler G95FCompiler.old_get_library_dirs = G95FCompiler.get_library_dirs G95FCompiler.old_get_libraries = G95FCompiler.get_libraries G95FCompiler.get_library_dirs = get_library_dirs G95FCompiler.get_libraries = get_libraries_g95 G95FCompiler.executables['linker_so'] = ["gcc","-undefined dynamic_lookup -bundle"] extension_modules = [ Extension('_model', ['beta.f', 'gauss.f']) ] setup(name='_model', ext_modules=extension_modules)
the-stack_106_17119
#!/usr/bin/env python3 from pprint import pprint from flask import Flask, request, jsonify app = Flask(__name__) @app.route("/webhooks/inbound-message", methods=['POST']) def inbound_message(): data = request.get_json() pprint(data) return "200" if __name__ == '__main__': app.run(host="www.example.org", port=3000)
the-stack_106_17120
# !/usr/bin/env python # -*- coding: UTF-8 -*- """ ONTOSPY Copyright (c) __Michele Pasin__ <http://www.michelepasin.org>. All rights reserved. """ from __future__ import print_function from colorama import Fore, Style import sys import os, os.path import time import optparse import shutil import requests import platform import subprocess import rdflib import datetime try: import cPickle except ImportError: import pickle as cPickle try: import urllib2 except ImportError: # print("python3") import urllib.request from urllib.request import urlopen try: from urllib import quote # Python 2.X except ImportError: from urllib.parse import quote # Python 3+ try: from ConfigParser import SafeConfigParser except ImportError: # python3 from configparser import SafeConfigParser # Fix Python 2.x. try: input = raw_input except NameError: pass from . import * from .ontospy import Ontospy from .utils import * from .manager import * # =========== # ACTIONS FIRED FROM THE SHELL OR COMMAND LINE # note: all actions are loaded in ontospy.py and called from other modules as 'ontospy.action_bootstrap' etc... # =========== def action_analyze(sources, endpoint=None, print_opts=False, verbose=False, extra=False, raw=False): """ Load up a model into ontospy and analyze it """ for x in sources: printDebug("Parsing %s..." % str(x), fg='white') if extra: hide_base_schemas = False hide_implicit_types = False hide_implicit_preds = False else: hide_base_schemas = True hide_implicit_types = True hide_implicit_preds = True if raw: o = Ontospy(uri_or_path=sources, verbose=verbose, build_all=False) s = o.serialize() printInfo(s) return elif endpoint: g = Ontospy( sparql_endpoint=sources[0], verbose=verbose, hide_base_schemas=hide_base_schemas, hide_implicit_types=hide_implicit_types, hide_implicit_preds=hide_implicit_preds) printDebug("Extracting classes info") g.build_classes() printDebug("..done") printDebug("Extracting properties info") g.build_properties() printDebug("..done") else: g = Ontospy( uri_or_path=sources, verbose=verbose, hide_base_schemas=hide_base_schemas, hide_implicit_types=hide_implicit_types, hide_implicit_preds=hide_implicit_preds) shellPrintOverview(g, print_opts) def action_reveal_library(): path = get_home_location() if platform.system() == "Windows": os.startfile(path) elif platform.system() == "Darwin": subprocess.Popen(["open", path]) else: subprocess.Popen(["xdg-open", path]) def action_serialize(source, out_fmt="turtle", verbose=False): """ Util: render RDF into a different serialization valid options are: xml, n3, turtle, nt, pretty-xml, json-ld """ o = Ontospy(uri_or_path=source, verbose=verbose, build_all=False) s = o.serialize(out_fmt) printInfo(s) def action_jsonld_playground(source_path, verbose=False): """ Util: sends a json-ld file to the awesome https://json-ld.org/playground/ """ import webbrowser BASE_URL = "https://json-ld.org/playground/#startTab=tab-expanded&json-ld=" my_file_handle = None printDebug("Preparing... : %s" % str(source_path), "comment") try: my_file_handle = open(source_path) except IOError: printDebug("------------------\nFile not found or path is incorrect", "important") if my_file_handle: webbrowser.open(BASE_URL + quote(my_file_handle.read())) def action_listlocal(all_details=True): " select a file from the local repo " options = get_localontologies() counter = 1 # printDebug("------------------", 'comment') if not options: printDebug( "Your local library is empty. Use 'ontospy lib --bootstrap' to add some ontologies to it." ) return else: if all_details: _print_table_ontologies() else: _print2cols_ontologies() while True: printDebug( "------------------\nSelect a model by typing its number: (enter=quit)", "important") var = input() if var == "" or var == "q": return None else: try: _id = int(var) ontouri = options[_id - 1] # printDebug("\nYou selected:", "comment") printDebug( "---------\nYou selected: " + ontouri + "\n---------", "green") return ontouri except: printDebug("Please enter a valid option.", "comment") continue def _print2cols_ontologies(): ontologies = get_localontologies() if ontologies: printDebug("------------", "tip") counter = 0 out = [] for x in ontologies: counter += 1 out += ["[%s] %s" % (str(counter), x)] pprint2columns(out, max_length=60) def _print_table_ontologies(): """ list all local files 2015-10-18: removed 'cached' from report 2016-06-17: made a subroutine of action_listlocal() """ ontologies = get_localontologies() ONTOSPY_LOCAL_MODELS = get_home_location() if ontologies: printDebug("") temp = [] from collections import namedtuple Row = namedtuple('Row', ['N', 'Added', 'File']) # Row = namedtuple('Row',['N','Added','Cached', 'File']) counter = 0 for file in ontologies: counter += 1 _counter = str(counter) # name = Style.BRIGHT + file + Style.RESET_ALL name = click.style(file, fg='green') try: mtime = os.path.getmtime(ONTOSPY_LOCAL_MODELS + "/" + file) except OSError: mtime = 0 last_modified_date = str(datetime.datetime.fromtimestamp(mtime)) # cached = str(os.path.exists(ONTOSPY_LOCAL_CACHE + "/" + file + ".pickle")) temp += [Row(_counter, last_modified_date, name)] pprinttable(temp) printDebug("") return def action_import(location, verbose=True): """ Import files into the local repo """ location = str(location) # prevent errors from unicode being passed # 1) extract file from location and save locally ONTOSPY_LOCAL_MODELS = get_home_location() fullpath = "" try: if location.startswith("www."): #support for lazy people location = "http://%s" % str(location) if location.startswith("http"): headers = {'Accept': "application/rdf+xml"} try: # Py2 req = urllib2.Request(location, headers=headers) res = urllib2.urlopen(req) except: # Py3 req = urllib.request.Request(location, headers=headers) res = urlopen(req) final_location = res.geturl() # after 303 redirects printDebug("Saving data from <%s>" % final_location, "green") # filename = final_location.split("/")[-1] or final_location.split("/")[-2] filename = location.replace("http://", "").replace("/", "_") if not filename.lower().endswith( ('.rdf', '.owl', '.rdfs', '.ttl', '.n3')): filename = filename + ".rdf" fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename # 2016-04-08 # fullpath = ONTOSPY_LOCAL_MODELS + filename # print("==DEBUG", final_location, "**", filename,"**", fullpath) file_ = open(fullpath, 'wb') file_.write(res.read()) file_.close() else: if os.path.isfile(location): filename = location.split("/")[-1] or location.split("/")[-2] fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename shutil.copy(location, fullpath) else: raise ValueError('The location specified is not a file.') except: printDebug( "Error retrieving file. Please make sure <%s> is a valid location." % location, "important") if os.path.exists(fullpath): os.remove(fullpath) return None try: g = Ontospy(fullpath, verbose=verbose) # printDebug("----------") except: g = None if os.path.exists(fullpath): os.remove(fullpath) printDebug( "Error parsing file. Please make sure %s contains valid RDF." % location, "important") if g: printDebug("Caching...", "red") do_pickle_ontology(filename, g) printDebug("----------\n...completed!", "important") # finally... return g def action_import_folder(location): """Try to import all files from a local folder""" if os.path.isdir(location): onlyfiles = [ f for f in os.listdir(location) if os.path.isfile(os.path.join(location, f)) ] for file in onlyfiles: if not file.startswith("."): filepath = os.path.join(location, file) printDebug( "\n---------\n" + filepath + "\n---------", fg='red') return action_import(filepath) else: printDebug("Not a valid directory", "important") return None def action_webimport(hrlinetop=False): """ select from the available online directories for import """ DIR_OPTIONS = {1: "http://lov.okfn.org", 2: "http://prefix.cc/popular/"} selection = None while True: if hrlinetop: printDebug("----------") text = "Please select which online directory to scan: (enter=quit)\n" for x in DIR_OPTIONS: text += "%d) %s\n" % (x, DIR_OPTIONS[x]) var = input(text + "> ") if var == "q" or var == "": return None else: try: selection = int(var) test = DIR_OPTIONS[selection] #throw exception if number wrong break except: printDebug("Invalid selection. Please try again.", "important") continue printDebug("----------") text = "Search for a specific keyword? (enter=show all)\n" var = input(text + "> ") keyword = var try: if selection == 1: _import_LOV(keyword=keyword) elif selection == 2: _import_PREFIXCC(keyword=keyword) except: printDebug("Sorry, the online repository seems to be unreachable.") return True def _import_LOV( baseuri="http://lov.okfn.org/dataset/lov/api/v2/vocabulary/list", keyword=""): """ 2016-03-02: import from json list """ printDebug("----------\nReading source... <%s>" % baseuri) query = requests.get(baseuri, params={}) all_options = query.json() options = [] # pre-filter if necessary if keyword: for x in all_options: if keyword in x['uri'].lower() or keyword in x['titles'][0][ 'value'].lower() or keyword in x['nsp'].lower(): options.append(x) else: options = all_options printDebug("----------\n%d results found.\n----------" % len(options)) if options: # display: counter = 1 for x in options: uri, title, ns = x['uri'], x['titles'][0]['value'], x['nsp'] # print("%s ==> %s" % (d['titles'][0]['value'], d['uri'])) printDebug( click.style("[%d]" % counter, fg='blue') + click.style(uri + " ==> ", fg='black') + click.style(title, fg='red'), err=True) counter += 1 while True: var = input(Style.BRIGHT + "=====\nSelect ID to import: (q=quit)\n" + Style.RESET_ALL) if var == "q": break else: try: _id = int(var) ontouri = options[_id - 1]['uri'] printDebug(Fore.RED + "\n---------\n" + ontouri + "\n---------" + Style.RESET_ALL) action_analyze([ontouri]) if click.confirm( '=====\nDo you want to save to your local library?' ): action_import(ontouri) return except: printDebug("Error retrieving file. Import failed.") continue def _import_PREFIXCC(keyword=""): """ List models from web catalog (prefix.cc) and ask which one to import 2015-10-10: originally part of main ontospy; now standalone only 2016-06-19: eliminated dependency on extras.import_web """ SOURCE = "http://prefix.cc/popular/all.file.vann" options = [] printDebug("----------\nReading source...") g = Ontospy(SOURCE, verbose=False) for x in g.all_ontologies: if keyword: if keyword in unicode(x.prefix).lower() or keyword in unicode( x.uri).lower(): options += [(unicode(x.prefix), unicode(x.uri))] else: options += [(unicode(x.prefix), unicode(x.uri))] printDebug("----------\n%d results found." % len(options)) counter = 1 for x in options: printDebug(Fore.BLUE + Style.BRIGHT + "[%d]" % counter + Style.RESET_ALL + x[0] + " ==> " + Fore.RED + x[1] + Style.RESET_ALL) # printDebug(Fore.BLUE + x[0] + " ==> " + x[1]) counter += 1 while True: var = input(Style.BRIGHT + "=====\nSelect ID to import: (q=quit)\n" + Style.RESET_ALL) if var == "q": break else: try: _id = int(var) ontouri = options[_id - 1][1] printDebug(Fore.RED + "\n---------\n" + ontouri + "\n---------" + Style.RESET_ALL) action_analyze([ontouri]) if click.confirm( '=====\nDo you want to save to your local library?'): action_import(ontouri) return except: printDebug("Error retrieving file. Import failed.") continue def action_bootstrap(verbose=False): """Bootstrap the local REPO with a few cool ontologies""" printDebug("The following ontologies will be imported:") printDebug("--------------") count = 0 for s in BOOTSTRAP_ONTOLOGIES: count += 1 printInfo(count, "<%s>" % s) printDebug("--------------") printDebug("Note: this operation may take several minutes.") printDebug("Proceed? [Y/N]") var = input() if var == "y" or var == "Y": for uri in BOOTSTRAP_ONTOLOGIES: try: printDebug("--------------") action_import(uri, verbose) except: printDebug( "OPS... An Unknown Error Occurred - Aborting Installation") printDebug("\n==========\n" + "Bootstrap command completed.", "important") return True else: printDebug("--------------") printDebug("Goodbye") return False def action_update_library_location(_location): """ Sets the folder that contains models for the local library @todo: add options to move things over etc.. note: this is called from 'manager' """ # if not(os.path.exists(_location)): # os.mkdir(_location) # printDebug("Creating new folder..", "comment") printDebug("Old location: '%s'" % get_home_location(), "comment") if os.path.isdir(_location): config = SafeConfigParser() config_filename = ONTOSPY_LOCAL + '/config.ini' config.read(config_filename) if not config.has_section('models'): config.add_section('models') config.set('models', 'dir', _location) with open(config_filename, 'w') as f: config.write( f) # note: this does not remove previously saved settings return _location else: return None def action_cache_reset(): """ Delete all contents from cache folder Then re-generate cached version of all models in the local repo """ printDebug("""The existing cache will be erased and recreated.""") printDebug( """This operation may take several minutes, depending on how many files exist in your local library.""" ) ONTOSPY_LOCAL_MODELS = get_home_location() # https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder-in-python # NOTE This will not only delete the contents but the folder itself as well. shutil.rmtree(ONTOSPY_LOCAL_CACHE_TOP) var = input(Style.BRIGHT + "=====\nProceed? (y/n) " + Style.RESET_ALL) if var == "y": repo_contents = get_localontologies() printInfo(Style.BRIGHT + "\n=====\n%d ontologies available in the local library\n=====" % len(repo_contents) + Style.RESET_ALL) for onto in repo_contents: fullpath = ONTOSPY_LOCAL_MODELS + "/" + onto try: printInfo(Fore.RED + "\n=====\n" + onto + Style.RESET_ALL) printInfo("Loading graph...") g = Ontospy(fullpath) printInfo("Loaded ", fullpath) except: g = None printDebug( "Error parsing file. Please make sure %s contains valid RDF." % fullpath) if g: printInfo("Caching...") do_pickle_ontology(onto, g) printDebug(Style.BRIGHT + "===Completed===" + Style.RESET_ALL) else: printDebug("Goodbye") def actions_delete(): """ DEPRECATED (v 1.9.4) delete an ontology from the local repo """ filename = action_listlocal() ONTOSPY_LOCAL_MODELS = get_home_location() if filename: fullpath = ONTOSPY_LOCAL_MODELS + filename if os.path.exists(fullpath): var = input("Are you sure you want to delete this file? (y/n)") if var == "y": os.remove(fullpath) printDebug("Deleted %s" % fullpath, "important") cachepath = ONTOSPY_LOCAL_CACHE + filename + ".pickle" # @todo: do this operation in /cache... if os.path.exists(cachepath): os.remove(cachepath) printDebug("---------") printDebug("File deleted [%s]" % cachepath, "important") return True else: printDebug("Goodbye") return False def action_erase(): """ DEPRECATED (v 1.9.4) just a wrapper.. possibly to be extended in the future """ get_or_create_home_repo(reset=True) return True def action_visualize(args, fromshell=False, path=None, title="", viztype="", theme="", preflabel="", preflang="", verbose=False): """ export model into another format eg html, d3 etc... <fromshell> : the local name is being passed from ontospy shell """ from ..ontodocs.builder import ask_visualization, select_visualization, VISUALIZATIONS_LIST, build_visualization if fromshell: ontouri = args islocal = True else: ontouri = args[0] islocal = False # select a visualization if viztype: viztype = select_visualization(viztype) else: viztype = ask_visualization() if viztype == "": return None # raise SystemExit, 1 # 2017-01-23: bypass pickled stuff as it has wrong counts etc.. # get ontospy graph printDebug("Loading graph...", dim=True) g = Ontospy(ontouri, verbose=verbose, pref_title=preflabel, pref_lang=preflang) # put viz in home folder by default: <ontouri>/<viztype>/files.. if not path: from os.path import expanduser home = expanduser("~") onto_path = slugify(unicode(ontouri)) viz_path = slugify(unicode(VISUALIZATIONS_LIST[viztype]['Title'])) path = os.path.join(home, "ontospy-viz/" + onto_path + "/" + viz_path) if not os.path.exists(path): os.makedirs(path) # url = build_viz(ontouri, g, viztype, path) printDebug("Building visualization...", dim=True) url = build_visualization(ontouri, g, viztype, path, title, theme) return url
the-stack_106_17121
import tweepy import databasehandler as db import json from userio import say, ok, warn, error say("Establishing connection to Twitter...") auth = tweepy.OAuthHandler(db.get_authentication()["consumer_key"], db.get_authentication()["consumer_secret"]) auth.set_access_token(db.get_authentication()["access_token"], db.get_authentication()["access_token_secret"]) api = tweepy.API(auth) ok("Connection established!") #Twitter only allows access to a users most recent 3240 tweets with this method def get_all_tweets(id): tweets = [] new_tweets = api.user_timeline(user_id=id, count=200) tweets.extend(new_tweets) oldest = tweets[-1].id - 1 while len(new_tweets) > 0: say("getting tweets before " + str(oldest)) new_tweets = api.user_timeline(user_id=id, count=200, max_id=oldest) tweets.extend(new_tweets) oldest = tweets[-1].id - 1 say("..." + str(len(tweets)) + " tweets downloaded so far") return [tweet._json for tweet in tweets] # this makes me sad, but is there a better way? submit a PR. def get_account_data(account): return api.get_user(account)._json def get_latest_tweets(id): tweets = api.user_timeline(user_id=id, count=200) return [tweet._json for tweet in tweets] # this makes me sad, but is there a better way? submit a PR. def does_status_exist(id): try: api.get_status(id) return True except tweepy.TweepError as err: if err[0][0]["code"] == 144: return False else: raise err
the-stack_106_17123
# coding: utf-8 """ Python SDK for Opsgenie REST API Python SDK for Opsgenie REST API # noqa: E501 The version of the OpenAPI document: 2.0.0 Contact: [email protected] Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six class AlertAttachment(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str', 'url': 'str' } attribute_map = { 'name': 'name', 'url': 'url' } def __init__(self, name=None, url=None): # noqa: E501 """AlertAttachment - a model defined in OpenAPI""" # noqa: E501 self._name = None self._url = None self.discriminator = None if name is not None: self.name = name if url is not None: self.url = url @property def name(self): """Gets the name of this AlertAttachment. # noqa: E501 :return: The name of this AlertAttachment. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this AlertAttachment. :param name: The name of this AlertAttachment. # noqa: E501 :type: str """ self._name = name @property def url(self): """Gets the url of this AlertAttachment. # noqa: E501 :return: The url of this AlertAttachment. # noqa: E501 :rtype: str """ return self._url @url.setter def url(self, url): """Sets the url of this AlertAttachment. :param url: The url of this AlertAttachment. # noqa: E501 :type: str """ self._url = url def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, AlertAttachment): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
the-stack_106_17124
# Copyright 2019-2020 The ASReview Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pandas as pd from asreview.io.utils import standardize_dataframe def read_csv(data_fp): """CVS file reader. Parameters ---------- fp: str, pathlib.Path File path to the CSV file. Returns ------- list: List with entries. """ try: df = pd.read_csv(data_fp) except UnicodeDecodeError: df = pd.read_csv(data_fp, encoding="ISO-8859-1") return standardize_dataframe(df)
the-stack_106_17126
from loguru import logger import matplotlib.patheffects as PathEffects from matplotlib import gridspec from matplotlib import transforms as mtransforms import matplotlib.pyplot as plt def plot_curves( fig, curves, index, tracks="auto", ): if not isinstance(index, dict): index = {id(c): index for c in curves} if tracks == "auto": units = {} for c in curves: if not c.unit in units: units[c.unit] = [c.mnemonic] else: units[c.unit].append(c.mnemonic) tracks = [units[u] for u in units.keys()] logger.debug(f"tracks = {tracks}") track_titles = [", ".join(track) for track in tracks] max_taxiss = max([len(track) for track in tracks]) taxis_height = 0.03 top_axes_figy = 1 - (taxis_height * max_taxiss) gs = gridspec.GridSpec( 1, len(tracks), wspace=0, left=0.03, right=1, top=top_axes_figy, bottom=0.0 ) curve_title_fontsize = "small" taxis_lim_fontsize = "small" transforms = {} axes = {} for i, track in enumerate(tracks): if i == 0: ax = fig.add_subplot(gs[i]) plt.setp(ax.get_yticklabels(), fontsize="small") elif i > 0: ax = fig.add_subplot(gs[i], sharey=ax) plt.setp(ax.get_yticklabels(), visible=False) tr = { "x_Axes_y_Fig": mtransforms.blended_transform_factory( ax.transAxes, fig.transFigure ) } for j, curve_mnemonic in enumerate(track): taxis_xoffset = 0.02 taxis_bottom = top_axes_figy + (j * taxis_height) taxis_top = top_axes_figy + (j * taxis_height) + taxis_height taxis_mid = taxis_bottom + (taxis_height / 2) taxis_33pc = taxis_bottom + (taxis_height / 3) curve = [c for c in curves if c.mnemonic == curve_mnemonic][0] curve_index = index[id(curve)] (line,) = ax.plot(curve.data, curve_index, lw=0.8) ax.plot( [taxis_xoffset, 1 - taxis_xoffset], [taxis_mid, taxis_mid], color=line.get_color(), lw=0.4, clip_on=False, transform=tr["x_Axes_y_Fig"], ) for x in (0, 1): ax.plot( (x, x), [taxis_bottom, taxis_top], color="k", transform=tr["x_Axes_y_Fig"], clip_on=False, lw=0.4, ) ax.text( 0.5, taxis_mid, curve_mnemonic, color=line.get_color(), ha="center", va="center", fontsize=curve_title_fontsize, transform=tr["x_Axes_y_Fig"], path_effects=[PathEffects.withStroke(linewidth=4, foreground="w")], ) taxis_left_label = ax.text( 0 + taxis_xoffset, taxis_mid, f"{ax.get_xlim()[0]:.0f}", color=line.get_color(), ha="left", va="center", fontsize=taxis_lim_fontsize, transform=tr["x_Axes_y_Fig"], path_effects=[PathEffects.withStroke(linewidth=4, foreground="w")], ) taxis_right_label = ax.text( 1 - taxis_xoffset, taxis_mid, f"{ax.get_xlim()[1]:.0f}", color=line.get_color(), ha="right", va="center", fontsize=taxis_lim_fontsize, path_effects=[PathEffects.withStroke(linewidth=4, foreground="w")], transform=tr["x_Axes_y_Fig"], ) logger.debug(f"Top axis left label: {taxis_left_label}") for side in ("left", "right", "top", "bottom"): ax.spines[side].set_linewidth(0.5) ax.spines[side].set_color("k") transforms[i] = tr axes[i] = ax y0, y1 = axes[0].get_ylim() axes[0].set_ylim(y1, y0) logger.debug(f"top_axes_figy {top_axes_figy}") return axes
the-stack_106_17129
import jinja2 from . import abc_pdf __all__ = [ "DefaultTemplateManager" ] default_template = """ <html> <head> <style type="text/css"> table { font-family: "Courier New", monospace; text-align: left; border-collapse: separate; border-spacing: 5px; background: #ECE9E0; color: #262726; border: 16px solid #ECE9E0; border-radius: 20px; width: 100%; } th { font-size: 30px; padding: 10px; text-align: center; } td { background: #F5D7BF; padding: 10px; font-size: 23px; } </style> </head> <body> {{table}} </body> </html> """ class DefaultTemplateManager(abc_pdf.AbstractTemplateManager): """Class provides rendering data from template""" template = default_template def __init__(self, template=None): self.template = template or self.template def render(self, **kwargs) -> str: template_loader = jinja2.BaseLoader template = jinja2.Environment(loader=template_loader).from_string(self.template) output_text = template.render(**kwargs) return output_text
the-stack_106_17130
#!/usr/bin/env python ############################################################################## # Copyright 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. ############################################################################## from __future__ import absolute_import, division, print_function, unicode_literals import os import sys import unittest from unittest.mock import patch BENCHMARK_DIR = os.path.abspath( os.path.join( os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir ) ) sys.path.append(BENCHMARK_DIR) from platforms.ios.idb import IDB class IDBTest(unittest.TestCase): def setUp(self): pass def test_set_bundle_id(self): idb = IDB() idb.setBundleId("TEST") self.assertEqual(idb.bundle_id, "TEST") def _util_base_run(self, *args, **kwargs): self.assertEqual(args[0], ["ios-deploy"]) def test_run(self): idb = IDB() with patch( "platforms.platform_util_base.PlatformUtilBase.run", side_effect=self._util_base_run, ): idb.run() def _ios_run_for_push(self, *args, **kwargs): return args def test_push(self): src = os.path.abspath( os.path.join( BENCHMARK_DIR, os.pardir, "specifications/models/caffe2/squeezenet/squeezenet.json", ) ) tgt = "TEST_TGT" idb = IDB() with patch("platforms.ios.idb.IDB.run", side_effect=self._ios_run_for_push): push_res = idb.push(src, tgt) self.assertEqual(push_res, ("--upload", src, "--to", tgt)) def _ios_run_for_reboot(self, *args, **kwargs): self.assertTrue(args[0] == "idevicepair" or args[0] == "idevicediagnostics") self.assertEqual(args[1], "-u") self.assertEqual(args[2], "TEST_DEVICE") self.assertTrue(args[3] == "pair" or args[3] == "restart") def test_reboot(self): idb = IDB(device="TEST_DEVICE") with patch( "platforms.platform_util_base.PlatformUtilBase.run", side_effect=self._ios_run_for_reboot, ): push_res = idb.reboot() self.assertTrue(push_res) if __name__ == "__main__": unittest.main()
the-stack_106_17131
import paddle import paddle.nn as nn from model.losses import SigmoidBinaryCrossEntropyLoss class BRSMaskLoss(nn.Layer): def __init__(self, eps=1e-5): super().__init__() self._eps = eps def forward(self, result, pos_mask, neg_mask): pos_diff = (1 - result) * pos_mask pos_target = paddle.sum(pos_diff ** 2) pos_target = pos_target / (paddle.sum(pos_mask) + self._eps) neg_diff = result * neg_mask neg_target = paddle.sum(neg_diff ** 2) neg_target = neg_target / (paddle.sum(neg_mask) + self._eps) loss = pos_target + neg_target with paddle.no_grad(): f_max_pos = paddle.max(paddle.abs(pos_diff)) f_max_neg = paddle.max(paddle.abs(neg_diff)) return loss, f_max_pos, f_max_neg class OracleMaskLoss(nn.Layer): def __init__(self): super().__init__() self.gt_mask = None self.loss = SigmoidBinaryCrossEntropyLoss(from_sigmoid=True) self.predictor = None self.history = [] def set_gt_mask(self, gt_mask): self.gt_mask = gt_mask self.history = [] def forward(self, result, pos_mask, neg_mask): gt_mask = self.gt_mask if self.predictor.object_roi is not None: r1, r2, c1, c2 = self.predictor.object_roi[:4] gt_mask = gt_mask[:, :, r1:r2 + 1, c1:c2 + 1] gt_mask = paddle.nn.functional.interpolate(gt_mask, result.size()[2:], mode='bilinear', align_corners=True) if result.shape[0] == 2: gt_mask_flipped = paddle.flip(gt_mask, axis=[3]) gt_mask = paddle.concat([gt_mask, gt_mask_flipped], axis=0) loss = self.loss(result, gt_mask) self.history.append(loss.detach().cpu().numpy()[0]) if len(self.history) > 5 and abs(self.history[-5] - self.history[-1]) < 1e-5: return 0, 0, 0 return loss, 1.0, 1.0
the-stack_106_17133
#!/usr/bin/env python3 # Copyright 2017 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Statically analyze stack usage of EC firmware. Example: extra/stack_analyzer/stack_analyzer.py \ --export_taskinfo ./build/elm/util/export_taskinfo.so \ --section RW \ ./build/elm/RW/ec.RW.elf """ from __future__ import print_function import argparse import collections import ctypes import os import re import subprocess import yaml SECTION_RO = 'RO' SECTION_RW = 'RW' # Default size of extra stack frame needed by exception context switch. # This value is for cortex-m with FPU enabled. DEFAULT_EXCEPTION_FRAME_SIZE = 224 class StackAnalyzerError(Exception): """Exception class for stack analyzer utility.""" class TaskInfo(ctypes.Structure): """Taskinfo ctypes structure. The structure definition is corresponding to the "struct taskinfo" in "util/export_taskinfo.so.c". """ _fields_ = [('name', ctypes.c_char_p), ('routine', ctypes.c_char_p), ('stack_size', ctypes.c_uint32)] class Task(object): """Task information. Attributes: name: Task name. routine_name: Routine function name. stack_max_size: Max stack size. routine_address: Resolved routine address. None if it hasn't been resolved. """ def __init__(self, name, routine_name, stack_max_size, routine_address=None): """Constructor. Args: name: Task name. routine_name: Routine function name. stack_max_size: Max stack size. routine_address: Resolved routine address. """ self.name = name self.routine_name = routine_name self.stack_max_size = stack_max_size self.routine_address = routine_address def __eq__(self, other): """Task equality. Args: other: The compared object. Returns: True if equal, False if not. """ if not isinstance(other, Task): return False return (self.name == other.name and self.routine_name == other.routine_name and self.stack_max_size == other.stack_max_size and self.routine_address == other.routine_address) class Symbol(object): """Symbol information. Attributes: address: Symbol address. symtype: Symbol type, 'O' (data, object) or 'F' (function). size: Symbol size. name: Symbol name. """ def __init__(self, address, symtype, size, name): """Constructor. Args: address: Symbol address. symtype: Symbol type. size: Symbol size. name: Symbol name. """ assert symtype in ['O', 'F'] self.address = address self.symtype = symtype self.size = size self.name = name def __eq__(self, other): """Symbol equality. Args: other: The compared object. Returns: True if equal, False if not. """ if not isinstance(other, Symbol): return False return (self.address == other.address and self.symtype == other.symtype and self.size == other.size and self.name == other.name) class Callsite(object): """Function callsite. Attributes: address: Address of callsite location. None if it is unknown. target: Callee address. None if it is unknown. is_tail: A bool indicates that it is a tailing call. callee: Resolved callee function. None if it hasn't been resolved. """ def __init__(self, address, target, is_tail, callee=None): """Constructor. Args: address: Address of callsite location. None if it is unknown. target: Callee address. None if it is unknown. is_tail: A bool indicates that it is a tailing call. (function jump to another function without restoring the stack frame) callee: Resolved callee function. """ # It makes no sense that both address and target are unknown. assert not (address is None and target is None) self.address = address self.target = target self.is_tail = is_tail self.callee = callee def __eq__(self, other): """Callsite equality. Args: other: The compared object. Returns: True if equal, False if not. """ if not isinstance(other, Callsite): return False if not (self.address == other.address and self.target == other.target and self.is_tail == other.is_tail): return False if self.callee is None: return other.callee is None elif other.callee is None: return False # Assume the addresses of functions are unique. return self.callee.address == other.callee.address class Function(object): """Function. Attributes: address: Address of function. name: Name of function from its symbol. stack_frame: Size of stack frame. callsites: Callsite list. stack_max_usage: Max stack usage. None if it hasn't been analyzed. stack_max_path: Max stack usage path. None if it hasn't been analyzed. """ def __init__(self, address, name, stack_frame, callsites): """Constructor. Args: address: Address of function. name: Name of function from its symbol. stack_frame: Size of stack frame. callsites: Callsite list. """ self.address = address self.name = name self.stack_frame = stack_frame self.callsites = callsites self.stack_max_usage = None self.stack_max_path = None def __eq__(self, other): """Function equality. Args: other: The compared object. Returns: True if equal, False if not. """ if not isinstance(other, Function): return False if not (self.address == other.address and self.name == other.name and self.stack_frame == other.stack_frame and self.callsites == other.callsites and self.stack_max_usage == other.stack_max_usage): return False if self.stack_max_path is None: return other.stack_max_path is None elif other.stack_max_path is None: return False if len(self.stack_max_path) != len(other.stack_max_path): return False for self_func, other_func in zip(self.stack_max_path, other.stack_max_path): # Assume the addresses of functions are unique. if self_func.address != other_func.address: return False return True def __hash__(self): return id(self) class AndesAnalyzer(object): """Disassembly analyzer for Andes architecture. Public Methods: AnalyzeFunction: Analyze stack frame and callsites of the function. """ GENERAL_PURPOSE_REGISTER_SIZE = 4 # Possible condition code suffixes. CONDITION_CODES = [ 'eq', 'eqz', 'gez', 'gtz', 'lez', 'ltz', 'ne', 'nez', 'eqc', 'nec', 'nezs', 'nes', 'eqs'] CONDITION_CODES_RE = '({})'.format('|'.join(CONDITION_CODES)) IMM_ADDRESS_RE = r'([0-9A-Fa-f]+)\s+<([^>]+)>' # Branch instructions. JUMP_OPCODE_RE = re.compile(r'^(b{0}|j|jr|jr.|jrnez)(\d?|\d\d)$' \ .format(CONDITION_CODES_RE)) # Call instructions. CALL_OPCODE_RE = re.compile \ (r'^(jal|jral|jral.|jralnez|beqzal|bltzal|bgezal)(\d)?$') CALL_OPERAND_RE = re.compile(r'^{}$'.format(IMM_ADDRESS_RE)) # Ignore lp register because it's for return. INDIRECT_CALL_OPERAND_RE = re.compile \ (r'^\$r\d{1,}$|\$fp$|\$gp$|\$ta$|\$sp$|\$pc$') # TODO: Handle other kinds of store instructions. PUSH_OPCODE_RE = re.compile(r'^push(\d{1,})$') PUSH_OPERAND_RE = re.compile(r'^\$r\d{1,}, \#\d{1,} \! \{([^\]]+)\}') SMW_OPCODE_RE = re.compile(r'^smw(\.\w\w|\.\w\w\w)$') SMW_OPERAND_RE = re.compile(r'^(\$r\d{1,}|\$\wp), \[\$\wp\], ' r'(\$r\d{1,}|\$\wp), \#\d\w\d \! \{([^\]]+)\}') OPERANDGROUP_RE = re.compile(r'^\$r\d{1,}\~\$r\d{1,}') LWI_OPCODE_RE = re.compile(r'^lwi(\.\w\w)$') LWI_PC_OPERAND_RE = re.compile(r'^\$pc, \[([^\]]+)\]') # Example: "34280: 3f c8 0f ec addi.gp $fp, #0xfec" # Assume there is always a "\t" after the hex data. DISASM_REGEX_RE = re.compile(r'^(?P<address>[0-9A-Fa-f]+):\s+' r'(?P<words>[0-9A-Fa-f ]+)' r'\t\s*(?P<opcode>\S+)(\s+(?P<operand>[^;]*))?') def ParseInstruction(self, line, function_end): """Parse the line of instruction. Args: line: Text of disassembly. function_end: End address of the current function. None if unknown. Returns: (address, words, opcode, operand_text): The instruction address, words, opcode, and the text of operands. None if it isn't an instruction line. """ result = self.DISASM_REGEX_RE.match(line) if result is None: return None address = int(result.group('address'), 16) # Check if it's out of bound. if function_end is not None and address >= function_end: return None opcode = result.group('opcode').strip() operand_text = result.group('operand') words = result.group('words') if operand_text is None: operand_text = '' else: operand_text = operand_text.strip() return (address, words, opcode, operand_text) def AnalyzeFunction(self, function_symbol, instructions): stack_frame = 0 callsites = [] for address, words, opcode, operand_text in instructions: is_jump_opcode = self.JUMP_OPCODE_RE.match(opcode) is not None is_call_opcode = self.CALL_OPCODE_RE.match(opcode) is not None if is_jump_opcode or is_call_opcode: is_tail = is_jump_opcode result = self.CALL_OPERAND_RE.match(operand_text) if result is None: if (self.INDIRECT_CALL_OPERAND_RE.match(operand_text) is not None): # Found an indirect call. callsites.append(Callsite(address, None, is_tail)) else: target_address = int(result.group(1), 16) # Filter out the in-function target (branches and in-function calls, # which are actually branches). if not (function_symbol.size > 0 and function_symbol.address < target_address < (function_symbol.address + function_symbol.size)): # Maybe it is a callsite. callsites.append(Callsite(address, target_address, is_tail)) elif self.LWI_OPCODE_RE.match(opcode) is not None: result = self.LWI_PC_OPERAND_RE.match(operand_text) if result is not None: # Ignore "lwi $pc, [$sp], xx" because it's usually a return. if result.group(1) != '$sp': # Found an indirect call. callsites.append(Callsite(address, None, True)) elif self.PUSH_OPCODE_RE.match(opcode) is not None: # Example: fc 20 push25 $r8, #0 ! {$r6~$r8, $fp, $gp, $lp} if self.PUSH_OPERAND_RE.match(operand_text) is not None: # capture fc 20 imm5u = int(words.split(' ')[1], 16) # sp = sp - (imm5u << 3) imm8u = (imm5u<<3) & 0xff stack_frame += imm8u result = self.PUSH_OPERAND_RE.match(operand_text) operandgroup_text = result.group(1) # capture $rx~$ry if self.OPERANDGROUP_RE.match(operandgroup_text) is not None: # capture number & transfer string to integer oprandgrouphead = operandgroup_text.split(',')[0] rx=int(''.join(filter(str.isdigit, oprandgrouphead.split('~')[0]))) ry=int(''.join(filter(str.isdigit, oprandgrouphead.split('~')[1]))) stack_frame += ((len(operandgroup_text.split(','))+ry-rx) * self.GENERAL_PURPOSE_REGISTER_SIZE) else: stack_frame += (len(operandgroup_text.split(',')) * self.GENERAL_PURPOSE_REGISTER_SIZE) elif self.SMW_OPCODE_RE.match(opcode) is not None: # Example: smw.adm $r6, [$sp], $r10, #0x2 ! {$r6~$r10, $lp} if self.SMW_OPERAND_RE.match(operand_text) is not None: result = self.SMW_OPERAND_RE.match(operand_text) operandgroup_text = result.group(3) # capture $rx~$ry if self.OPERANDGROUP_RE.match(operandgroup_text) is not None: # capture number & transfer string to integer oprandgrouphead = operandgroup_text.split(',')[0] rx=int(''.join(filter(str.isdigit, oprandgrouphead.split('~')[0]))) ry=int(''.join(filter(str.isdigit, oprandgrouphead.split('~')[1]))) stack_frame += ((len(operandgroup_text.split(','))+ry-rx) * self.GENERAL_PURPOSE_REGISTER_SIZE) else: stack_frame += (len(operandgroup_text.split(',')) * self.GENERAL_PURPOSE_REGISTER_SIZE) return (stack_frame, callsites) class ArmAnalyzer(object): """Disassembly analyzer for ARM architecture. Public Methods: AnalyzeFunction: Analyze stack frame and callsites of the function. """ GENERAL_PURPOSE_REGISTER_SIZE = 4 # Possible condition code suffixes. CONDITION_CODES = ['', 'eq', 'ne', 'cs', 'hs', 'cc', 'lo', 'mi', 'pl', 'vs', 'vc', 'hi', 'ls', 'ge', 'lt', 'gt', 'le'] CONDITION_CODES_RE = '({})'.format('|'.join(CONDITION_CODES)) # Assume there is no function name containing ">". IMM_ADDRESS_RE = r'([0-9A-Fa-f]+)\s+<([^>]+)>' # Fuzzy regular expressions for instruction and operand parsing. # Branch instructions. JUMP_OPCODE_RE = re.compile( r'^(b{0}|bx{0})(\.\w)?$'.format(CONDITION_CODES_RE)) # Call instructions. CALL_OPCODE_RE = re.compile( r'^(bl{0}|blx{0})(\.\w)?$'.format(CONDITION_CODES_RE)) CALL_OPERAND_RE = re.compile(r'^{}$'.format(IMM_ADDRESS_RE)) CBZ_CBNZ_OPCODE_RE = re.compile(r'^(cbz|cbnz)(\.\w)?$') # Example: "r0, 1009bcbe <host_cmd_motion_sense+0x1d2>" CBZ_CBNZ_OPERAND_RE = re.compile(r'^[^,]+,\s+{}$'.format(IMM_ADDRESS_RE)) # Ignore lr register because it's for return. INDIRECT_CALL_OPERAND_RE = re.compile(r'^r\d+|sb|sl|fp|ip|sp|pc$') # TODO(cheyuw): Handle conditional versions of following # instructions. # TODO(cheyuw): Handle other kinds of pc modifying instructions (e.g. mov pc). LDR_OPCODE_RE = re.compile(r'^ldr(\.\w)?$') # Example: "pc, [sp], #4" LDR_PC_OPERAND_RE = re.compile(r'^pc, \[([^\]]+)\]') # TODO(cheyuw): Handle other kinds of stm instructions. PUSH_OPCODE_RE = re.compile(r'^push$') STM_OPCODE_RE = re.compile(r'^stmdb$') # Stack subtraction instructions. SUB_OPCODE_RE = re.compile(r'^sub(s|w)?(\.\w)?$') SUB_OPERAND_RE = re.compile(r'^sp[^#]+#(\d+)') # Example: "44d94: f893 0068 ldrb.w r0, [r3, #104] ; 0x68" # Assume there is always a "\t" after the hex data. DISASM_REGEX_RE = re.compile(r'^(?P<address>[0-9A-Fa-f]+):\s+[0-9A-Fa-f ]+' r'\t\s*(?P<opcode>\S+)(\s+(?P<operand>[^;]*))?') def ParseInstruction(self, line, function_end): """Parse the line of instruction. Args: line: Text of disassembly. function_end: End address of the current function. None if unknown. Returns: (address, opcode, operand_text): The instruction address, opcode, and the text of operands. None if it isn't an instruction line. """ result = self.DISASM_REGEX_RE.match(line) if result is None: return None address = int(result.group('address'), 16) # Check if it's out of bound. if function_end is not None and address >= function_end: return None opcode = result.group('opcode').strip() operand_text = result.group('operand') if operand_text is None: operand_text = '' else: operand_text = operand_text.strip() return (address, opcode, operand_text) def AnalyzeFunction(self, function_symbol, instructions): """Analyze function, resolve the size of stack frame and callsites. Args: function_symbol: Function symbol. instructions: Instruction list. Returns: (stack_frame, callsites): Size of stack frame, callsite list. """ stack_frame = 0 callsites = [] for address, opcode, operand_text in instructions: is_jump_opcode = self.JUMP_OPCODE_RE.match(opcode) is not None is_call_opcode = self.CALL_OPCODE_RE.match(opcode) is not None is_cbz_cbnz_opcode = self.CBZ_CBNZ_OPCODE_RE.match(opcode) is not None if is_jump_opcode or is_call_opcode or is_cbz_cbnz_opcode: is_tail = is_jump_opcode or is_cbz_cbnz_opcode if is_cbz_cbnz_opcode: result = self.CBZ_CBNZ_OPERAND_RE.match(operand_text) else: result = self.CALL_OPERAND_RE.match(operand_text) if result is None: # Failed to match immediate address, maybe it is an indirect call. # CBZ and CBNZ can't be indirect calls. if (not is_cbz_cbnz_opcode and self.INDIRECT_CALL_OPERAND_RE.match(operand_text) is not None): # Found an indirect call. callsites.append(Callsite(address, None, is_tail)) else: target_address = int(result.group(1), 16) # Filter out the in-function target (branches and in-function calls, # which are actually branches). if not (function_symbol.size > 0 and function_symbol.address < target_address < (function_symbol.address + function_symbol.size)): # Maybe it is a callsite. callsites.append(Callsite(address, target_address, is_tail)) elif self.LDR_OPCODE_RE.match(opcode) is not None: result = self.LDR_PC_OPERAND_RE.match(operand_text) if result is not None: # Ignore "ldr pc, [sp], xx" because it's usually a return. if result.group(1) != 'sp': # Found an indirect call. callsites.append(Callsite(address, None, True)) elif self.PUSH_OPCODE_RE.match(opcode) is not None: # Example: "{r4, r5, r6, r7, lr}" stack_frame += (len(operand_text.split(',')) * self.GENERAL_PURPOSE_REGISTER_SIZE) elif self.SUB_OPCODE_RE.match(opcode) is not None: result = self.SUB_OPERAND_RE.match(operand_text) if result is not None: stack_frame += int(result.group(1)) else: # Unhandled stack register subtraction. assert not operand_text.startswith('sp') elif self.STM_OPCODE_RE.match(opcode) is not None: if operand_text.startswith('sp!'): # Subtract and writeback to stack register. # Example: "sp!, {r4, r5, r6, r7, r8, r9, lr}" # Get the text of pushed register list. unused_sp, unused_sep, parameter_text = operand_text.partition(',') stack_frame += (len(parameter_text.split(',')) * self.GENERAL_PURPOSE_REGISTER_SIZE) return (stack_frame, callsites) class RiscvAnalyzer(object): """Disassembly analyzer for RISC-V architecture. Public Methods: AnalyzeFunction: Analyze stack frame and callsites of the function. """ # Possible condition code suffixes. CONDITION_CODES = [ 'eqz', 'nez', 'lez', 'gez', 'ltz', 'gtz', 'gt', 'le', 'gtu', 'leu', 'eq', 'ne', 'ge', 'lt', 'ltu', 'geu'] CONDITION_CODES_RE = '({})'.format('|'.join(CONDITION_CODES)) # Branch instructions. JUMP_OPCODE_RE = re.compile(r'^(b{0}|j|jr)$'.format(CONDITION_CODES_RE)) # Call instructions. CALL_OPCODE_RE = re.compile(r'^(jal|jalr)$') # Example: "j 8009b318 <set_state_prl_hr>" or # "jal ra,800a4394 <power_get_signals>" or # "bltu t0,t1,80080300 <data_loop>" JUMP_ADDRESS_RE = r'((\w(\w|\d\d),){0,2})([0-9A-Fa-f]+)\s+<([^>]+)>' CALL_OPERAND_RE = re.compile(r'^{}$'.format(JUMP_ADDRESS_RE)) # Capture address, Example: 800a4394 CAPTURE_ADDRESS = re.compile(r'[0-9A-Fa-f]{8}') # Indirect jump, Example: jalr a5 INDIRECT_CALL_OPERAND_RE = re.compile(r'^t\d+|s\d+|a\d+$') # Example: addi ADDI_OPCODE_RE = re.compile(r'^addi$') # Allocate stack instructions. ADDI_OPERAND_RE = re.compile(r'^(sp,sp,-\d+)$') # Example: "800804b6: 1101 addi sp,sp,-32" DISASM_REGEX_RE = re.compile(r'^(?P<address>[0-9A-Fa-f]+):\s+[0-9A-Fa-f ]+' r'\t\s*(?P<opcode>\S+)(\s+(?P<operand>[^;]*))?') def ParseInstruction(self, line, function_end): """Parse the line of instruction. Args: line: Text of disassembly. function_end: End address of the current function. None if unknown. Returns: (address, opcode, operand_text): The instruction address, opcode, and the text of operands. None if it isn't an instruction line. """ result = self.DISASM_REGEX_RE.match(line) if result is None: return None address = int(result.group('address'), 16) # Check if it's out of bound. if function_end is not None and address >= function_end: return None opcode = result.group('opcode').strip() operand_text = result.group('operand') if operand_text is None: operand_text = '' else: operand_text = operand_text.strip() return (address, opcode, operand_text) def AnalyzeFunction(self, function_symbol, instructions): stack_frame = 0 callsites = [] for address, opcode, operand_text in instructions: is_jump_opcode = self.JUMP_OPCODE_RE.match(opcode) is not None is_call_opcode = self.CALL_OPCODE_RE.match(opcode) is not None if is_jump_opcode or is_call_opcode: is_tail = is_jump_opcode result = self.CALL_OPERAND_RE.match(operand_text) if result is None: if (self.INDIRECT_CALL_OPERAND_RE.match(operand_text) is not None): # Found an indirect call. callsites.append(Callsite(address, None, is_tail)) else: # Capture address form operand_text and then convert to string address_str = "".join(self.CAPTURE_ADDRESS.findall(operand_text)) # String to integer target_address = int(address_str, 16) # Filter out the in-function target (branches and in-function calls, # which are actually branches). if not (function_symbol.size > 0 and function_symbol.address < target_address < (function_symbol.address + function_symbol.size)): # Maybe it is a callsite. callsites.append(Callsite(address, target_address, is_tail)) elif self.ADDI_OPCODE_RE.match(opcode) is not None: # Example: sp,sp,-32 if self.ADDI_OPERAND_RE.match(operand_text) is not None: stack_frame += abs(int(operand_text.split(",")[2])) return (stack_frame, callsites) class StackAnalyzer(object): """Class to analyze stack usage. Public Methods: Analyze: Run the stack analysis. """ C_FUNCTION_NAME = r'_A-Za-z0-9' # Assume there is no ":" in the path. # Example: "driver/accel_kionix.c:321 (discriminator 3)" ADDRTOLINE_RE = re.compile( r'^(?P<path>[^:]+):(?P<linenum>\d+)(\s+\(discriminator\s+\d+\))?$') # To eliminate the suffix appended by compilers, try to extract the # C function name from the prefix of symbol name. # Example: "SHA256_transform.constprop.28" FUNCTION_PREFIX_NAME_RE = re.compile( r'^(?P<name>[{0}]+)([^{0}].*)?$'.format(C_FUNCTION_NAME)) # Errors of annotation resolving. ANNOTATION_ERROR_INVALID = 'invalid signature' ANNOTATION_ERROR_NOTFOUND = 'function is not found' ANNOTATION_ERROR_AMBIGUOUS = 'signature is ambiguous' def __init__(self, options, symbols, rodata, tasklist, annotation): """Constructor. Args: options: Namespace from argparse.parse_args(). symbols: Symbol list. rodata: Content of .rodata section (offset, data) tasklist: Task list. annotation: Annotation config. """ self.options = options self.symbols = symbols self.rodata_offset = rodata[0] self.rodata = rodata[1] self.tasklist = tasklist self.annotation = annotation self.address_to_line_cache = {} def AddressToLine(self, address, resolve_inline=False): """Convert address to line. Args: address: Target address. resolve_inline: Output the stack of inlining. Returns: lines: List of the corresponding lines. Raises: StackAnalyzerError: If addr2line is failed. """ cache_key = (address, resolve_inline) if cache_key in self.address_to_line_cache: return self.address_to_line_cache[cache_key] try: args = [self.options.addr2line, '-f', '-e', self.options.elf_path, '{:x}'.format(address)] if resolve_inline: args.append('-i') line_text = subprocess.check_output(args, encoding='utf-8') except subprocess.CalledProcessError: raise StackAnalyzerError('addr2line failed to resolve lines.') except OSError: raise StackAnalyzerError('Failed to run addr2line.') lines = [line.strip() for line in line_text.splitlines()] # Assume the output has at least one pair like "function\nlocation\n", and # they always show up in pairs. # Example: "handle_request\n # common/usb_pd_protocol.c:1191\n" assert len(lines) >= 2 and len(lines) % 2 == 0 line_infos = [] for index in range(0, len(lines), 2): (function_name, line_text) = lines[index:index + 2] if line_text in ['??:0', ':?']: line_infos.append(None) else: result = self.ADDRTOLINE_RE.match(line_text) # Assume the output is always well-formed. assert result is not None line_infos.append((function_name.strip(), os.path.realpath(result.group('path').strip()), int(result.group('linenum')))) self.address_to_line_cache[cache_key] = line_infos return line_infos def AnalyzeDisassembly(self, disasm_text): """Parse the disassembly text, analyze, and build a map of all functions. Args: disasm_text: Disassembly text. Returns: function_map: Dict of functions. """ disasm_lines = [line.strip() for line in disasm_text.splitlines()] if 'nds' in disasm_lines[1]: analyzer = AndesAnalyzer() elif 'arm' in disasm_lines[1]: analyzer = ArmAnalyzer() elif 'riscv' in disasm_lines[1]: analyzer = RiscvAnalyzer() else: raise StackAnalyzerError('Unsupported architecture.') # Example: "08028c8c <motion_lid_calc>:" function_signature_regex = re.compile( r'^(?P<address>[0-9A-Fa-f]+)\s+<(?P<name>[^>]+)>:$') def DetectFunctionHead(line): """Check if the line is a function head. Args: line: Text of disassembly. Returns: symbol: Function symbol. None if it isn't a function head. """ result = function_signature_regex.match(line) if result is None: return None address = int(result.group('address'), 16) symbol = symbol_map.get(address) # Check if the function exists and matches. if symbol is None or symbol.symtype != 'F': return None return symbol # Build symbol map, indexed by symbol address. symbol_map = {} for symbol in self.symbols: # If there are multiple symbols with same address, keeping any of them is # good enough. symbol_map[symbol.address] = symbol # Parse the disassembly text. We update the variable "line" to next line # when needed. There are two steps of parser: # # Step 1: Searching for the function head. Once reach the function head, # move to the next line, which is the first line of function body. # # Step 2: Parsing each instruction line of function body. Once reach a # non-instruction line, stop parsing and analyze the parsed instructions. # # Finally turn back to the step 1 without updating the line, because the # current non-instruction line can be another function head. function_map = {} # The following three variables are the states of the parsing processing. # They will be initialized properly during the state changes. function_symbol = None function_end = None instructions = [] # Remove heading and tailing spaces for each line. line_index = 0 while line_index < len(disasm_lines): # Get the current line. line = disasm_lines[line_index] if function_symbol is None: # Step 1: Search for the function head. function_symbol = DetectFunctionHead(line) if function_symbol is not None: # Assume there is no empty function. If the function head is followed # by EOF, it is an empty function. assert line_index + 1 < len(disasm_lines) # Found the function head, initialize and turn to the step 2. instructions = [] # If symbol size exists, use it as a hint of function size. if function_symbol.size > 0: function_end = function_symbol.address + function_symbol.size else: function_end = None else: # Step 2: Parse the function body. instruction = analyzer.ParseInstruction(line, function_end) if instruction is not None: instructions.append(instruction) if instruction is None or line_index + 1 == len(disasm_lines): # Either the invalid instruction or EOF indicates the end of the # function, finalize the function analysis. # Assume there is no empty function. assert len(instructions) > 0 (stack_frame, callsites) = analyzer.AnalyzeFunction(function_symbol, instructions) # Assume the function addresses are unique in the disassembly. assert function_symbol.address not in function_map function_map[function_symbol.address] = Function( function_symbol.address, function_symbol.name, stack_frame, callsites) # Initialize and turn back to the step 1. function_symbol = None # If the current line isn't an instruction, it can be another function # head, skip moving to the next line. if instruction is None: continue # Move to the next line. line_index += 1 # Resolve callees of functions. for function in function_map.values(): for callsite in function.callsites: if callsite.target is not None: # Remain the callee as None if we can't resolve it. callsite.callee = function_map.get(callsite.target) return function_map def MapAnnotation(self, function_map, signature_set): """Map annotation signatures to functions. Args: function_map: Function map. signature_set: Set of annotation signatures. Returns: Map of signatures to functions, map of signatures which can't be resolved. """ # Build the symbol map indexed by symbol name. If there are multiple symbols # with the same name, add them into a set. (e.g. symbols of static function # with the same name) symbol_map = collections.defaultdict(set) for symbol in self.symbols: if symbol.symtype == 'F': # Function symbol. result = self.FUNCTION_PREFIX_NAME_RE.match(symbol.name) if result is not None: function = function_map.get(symbol.address) # Ignore the symbol not in disassembly. if function is not None: # If there are multiple symbol with the same name and point to the # same function, the set will deduplicate them. symbol_map[result.group('name').strip()].add(function) # Build the signature map indexed by annotation signature. signature_map = {} sig_error_map = {} symbol_path_map = {} for sig in signature_set: (name, path, _) = sig functions = symbol_map.get(name) if functions is None: sig_error_map[sig] = self.ANNOTATION_ERROR_NOTFOUND continue if name not in symbol_path_map: # Lazy symbol path resolving. Since the addr2line isn't fast, only # resolve needed symbol paths. group_map = collections.defaultdict(list) for function in functions: line_info = self.AddressToLine(function.address)[0] if line_info is None: continue (_, symbol_path, _) = line_info # Group the functions with the same symbol signature (symbol name + # symbol path). Assume they are the same copies and do the same # annotation operations of them because we don't know which copy is # indicated by the users. group_map[symbol_path].append(function) symbol_path_map[name] = group_map # Symbol matching. function_group = None group_map = symbol_path_map[name] if len(group_map) > 0: if path is None: if len(group_map) > 1: # There is ambiguity but the path isn't specified. sig_error_map[sig] = self.ANNOTATION_ERROR_AMBIGUOUS continue # No path signature but all symbol signatures of functions are same. # Assume they are the same functions, so there is no ambiguity. (function_group,) = group_map.values() else: function_group = group_map.get(path) if function_group is None: sig_error_map[sig] = self.ANNOTATION_ERROR_NOTFOUND continue # The function_group is a list of all the same functions (according to # our assumption) which should be annotated together. signature_map[sig] = function_group return (signature_map, sig_error_map) def LoadAnnotation(self): """Load annotation rules. Returns: Map of add rules, set of remove rules, set of text signatures which can't be parsed. """ # Assume there is no ":" in the path. # Example: "get_range.lto.2501[driver/accel_kionix.c:327]" annotation_signature_regex = re.compile( r'^(?P<name>[^\[]+)(\[(?P<path>[^:]+)(:(?P<linenum>\d+))?\])?$') def NormalizeSignature(signature_text): """Parse and normalize the annotation signature. Args: signature_text: Text of the annotation signature. Returns: (function name, path, line number) of the signature. The path and line number can be None if not exist. None if failed to parse. """ result = annotation_signature_regex.match(signature_text.strip()) if result is None: return None name_result = self.FUNCTION_PREFIX_NAME_RE.match( result.group('name').strip()) if name_result is None: return None path = result.group('path') if path is not None: path = os.path.realpath(path.strip()) linenum = result.group('linenum') if linenum is not None: linenum = int(linenum.strip()) return (name_result.group('name').strip(), path, linenum) def ExpandArray(dic): """Parse and expand a symbol array Args: dic: Dictionary for the array annotation Returns: array of (symbol name, None, None). """ # TODO(drinkcat): This function is quite inefficient, as it goes through # the symbol table multiple times. begin_name = dic['name'] end_name = dic['name'] + "_end" offset = dic['offset'] if 'offset' in dic else 0 stride = dic['stride'] begin_address = None end_address = None for symbol in self.symbols: if (symbol.name == begin_name): begin_address = symbol.address if (symbol.name == end_name): end_address = symbol.address if (not begin_address or not end_address): return None output = [] # TODO(drinkcat): This is inefficient as we go from address to symbol # object then to symbol name, and later on we'll go back from symbol name # to symbol object. for addr in range(begin_address+offset, end_address, stride): # TODO(drinkcat): Not all architectures need to drop the first bit. val = self.rodata[(addr-self.rodata_offset) // 4] & 0xfffffffe name = None for symbol in self.symbols: if (symbol.address == val): result = self.FUNCTION_PREFIX_NAME_RE.match(symbol.name) name = result.group('name') break if not name: raise StackAnalyzerError('Cannot find function for address %s.', hex(val)) output.append((name, None, None)) return output add_rules = collections.defaultdict(set) remove_rules = list() invalid_sigtxts = set() if 'add' in self.annotation and self.annotation['add'] is not None: for src_sigtxt, dst_sigtxts in self.annotation['add'].items(): src_sig = NormalizeSignature(src_sigtxt) if src_sig is None: invalid_sigtxts.add(src_sigtxt) continue for dst_sigtxt in dst_sigtxts: if isinstance(dst_sigtxt, dict): dst_sig = ExpandArray(dst_sigtxt) if dst_sig is None: invalid_sigtxts.add(str(dst_sigtxt)) else: add_rules[src_sig].update(dst_sig) else: dst_sig = NormalizeSignature(dst_sigtxt) if dst_sig is None: invalid_sigtxts.add(dst_sigtxt) else: add_rules[src_sig].add(dst_sig) if 'remove' in self.annotation and self.annotation['remove'] is not None: for sigtxt_path in self.annotation['remove']: if isinstance(sigtxt_path, str): # The path has only one vertex. sigtxt_path = [sigtxt_path] if len(sigtxt_path) == 0: continue # Generate multiple remove paths from all the combinations of the # signatures of each vertex. sig_paths = [[]] broken_flag = False for sigtxt_node in sigtxt_path: if isinstance(sigtxt_node, str): # The vertex has only one signature. sigtxt_set = {sigtxt_node} elif isinstance(sigtxt_node, list): # The vertex has multiple signatures. sigtxt_set = set(sigtxt_node) else: # Assume the format of annotation is verified. There should be no # invalid case. assert False sig_set = set() for sigtxt in sigtxt_set: sig = NormalizeSignature(sigtxt) if sig is None: invalid_sigtxts.add(sigtxt) broken_flag = True elif not broken_flag: sig_set.add(sig) if broken_flag: continue # Append each signature of the current node to the all previous # remove paths. sig_paths = [path + [sig] for path in sig_paths for sig in sig_set] if not broken_flag: # All signatures are normalized. The remove path has no error. remove_rules.extend(sig_paths) return (add_rules, remove_rules, invalid_sigtxts) def ResolveAnnotation(self, function_map): """Resolve annotation. Args: function_map: Function map. Returns: Set of added call edges, list of remove paths, set of eliminated callsite addresses, set of annotation signatures which can't be resolved. """ def StringifySignature(signature): """Stringify the tupled signature. Args: signature: Tupled signature. Returns: Signature string. """ (name, path, linenum) = signature bracket_text = '' if path is not None: path = os.path.relpath(path) if linenum is None: bracket_text = '[{}]'.format(path) else: bracket_text = '[{}:{}]'.format(path, linenum) return name + bracket_text (add_rules, remove_rules, invalid_sigtxts) = self.LoadAnnotation() signature_set = set() for src_sig, dst_sigs in add_rules.items(): signature_set.add(src_sig) signature_set.update(dst_sigs) for remove_sigs in remove_rules: signature_set.update(remove_sigs) # Map signatures to functions. (signature_map, sig_error_map) = self.MapAnnotation(function_map, signature_set) # Build the indirect callsite map indexed by callsite signature. indirect_map = collections.defaultdict(set) for function in function_map.values(): for callsite in function.callsites: if callsite.target is not None: continue # Found an indirect callsite. line_info = self.AddressToLine(callsite.address)[0] if line_info is None: continue (name, path, linenum) = line_info result = self.FUNCTION_PREFIX_NAME_RE.match(name) if result is None: continue indirect_map[(result.group('name').strip(), path, linenum)].add( (function, callsite.address)) # Generate the annotation sets. add_set = set() remove_list = list() eliminated_addrs = set() for src_sig, dst_sigs in add_rules.items(): src_funcs = set(signature_map.get(src_sig, [])) # Try to match the source signature to the indirect callsites. Even if it # can't be found in disassembly. indirect_calls = indirect_map.get(src_sig) if indirect_calls is not None: for function, callsite_address in indirect_calls: # Add the caller of the indirect callsite to the source functions. src_funcs.add(function) # Assume each callsite can be represented by a unique address. eliminated_addrs.add(callsite_address) if src_sig in sig_error_map: # Assume the error is always the not found error. Since the signature # found in indirect callsite map must be a full signature, it can't # happen the ambiguous error. assert sig_error_map[src_sig] == self.ANNOTATION_ERROR_NOTFOUND # Found in inline stack, remove the not found error. del sig_error_map[src_sig] for dst_sig in dst_sigs: dst_funcs = signature_map.get(dst_sig) if dst_funcs is None: continue # Duplicate the call edge for all the same source and destination # functions. for src_func in src_funcs: for dst_func in dst_funcs: add_set.add((src_func, dst_func)) for remove_sigs in remove_rules: # Since each signature can be mapped to multiple functions, generate # multiple remove paths from all the combinations of these functions. remove_paths = [[]] skip_flag = False for remove_sig in remove_sigs: # Transform each signature to the corresponding functions. remove_funcs = signature_map.get(remove_sig) if remove_funcs is None: # There is an unresolved signature in the remove path. Ignore the # whole broken remove path. skip_flag = True break else: # Append each function of the current signature to the all previous # remove paths. remove_paths = [p + [f] for p in remove_paths for f in remove_funcs] if skip_flag: # Ignore the broken remove path. continue for remove_path in remove_paths: # Deduplicate the remove paths. if remove_path not in remove_list: remove_list.append(remove_path) # Format the error messages. failed_sigtxts = set() for sigtxt in invalid_sigtxts: failed_sigtxts.add((sigtxt, self.ANNOTATION_ERROR_INVALID)) for sig, error in sig_error_map.items(): failed_sigtxts.add((StringifySignature(sig), error)) return (add_set, remove_list, eliminated_addrs, failed_sigtxts) def PreprocessAnnotation(self, function_map, add_set, remove_list, eliminated_addrs): """Preprocess the annotation and callgraph. Add the missing call edges, and delete simple remove paths (the paths have one or two vertices) from the function_map. Eliminate the annotated indirect callsites. Return the remaining remove list. Args: function_map: Function map. add_set: Set of missing call edges. remove_list: List of remove paths. eliminated_addrs: Set of eliminated callsite addresses. Returns: List of remaining remove paths. """ def CheckEdge(path): """Check if all edges of the path are on the callgraph. Args: path: Path. Returns: True or False. """ for index in range(len(path) - 1): if (path[index], path[index + 1]) not in edge_set: return False return True for src_func, dst_func in add_set: # TODO(cheyuw): Support tailing call annotation. src_func.callsites.append( Callsite(None, dst_func.address, False, dst_func)) # Delete simple remove paths. remove_simple = set(tuple(p) for p in remove_list if len(p) <= 2) edge_set = set() for function in function_map.values(): cleaned_callsites = [] for callsite in function.callsites: if ((callsite.callee,) in remove_simple or (function, callsite.callee) in remove_simple): continue if callsite.target is None and callsite.address in eliminated_addrs: continue cleaned_callsites.append(callsite) if callsite.callee is not None: edge_set.add((function, callsite.callee)) function.callsites = cleaned_callsites return [p for p in remove_list if len(p) >= 3 and CheckEdge(p)] def AnalyzeCallGraph(self, function_map, remove_list): """Analyze callgraph. It will update the max stack size and path for each function. Args: function_map: Function map. remove_list: List of remove paths. Returns: List of function cycles. """ def Traverse(curr_state): """Traverse the callgraph and calculate the max stack usages of functions. Args: curr_state: Current state. Returns: SCC lowest link. """ scc_index = scc_index_counter[0] scc_index_counter[0] += 1 scc_index_map[curr_state] = scc_index scc_lowlink = scc_index scc_stack.append(curr_state) # Push the current state in the stack. We can use a set to maintain this # because the stacked states are unique; otherwise we will find a cycle # first. stacked_states.add(curr_state) (curr_address, curr_positions) = curr_state curr_func = function_map[curr_address] invalid_flag = False new_positions = list(curr_positions) for index, position in enumerate(curr_positions): remove_path = remove_list[index] # The position of each remove path in the state is the length of the # longest matching path between the prefix of the remove path and the # suffix of the current traversing path. We maintain this length when # appending the next callee to the traversing path. And it can be used # to check if the remove path appears in the traversing path. # TODO(cheyuw): Implement KMP algorithm to match remove paths # efficiently. if remove_path[position] is curr_func: # Matches the current function, extend the length. new_positions[index] = position + 1 if new_positions[index] == len(remove_path): # The length of the longest matching path is equal to the length of # the remove path, which means the suffix of the current traversing # path matches the remove path. invalid_flag = True break else: # We can't get the new longest matching path by extending the previous # one directly. Fallback to search the new longest matching path. # If we can't find any matching path in the following search, reset # the matching length to 0. new_positions[index] = 0 # We want to find the new longest matching prefix of remove path with # the suffix of the current traversing path. Because the new longest # matching path won't be longer than the prevous one now, and part of # the suffix matches the prefix of remove path, we can get the needed # suffix from the previous matching prefix of the invalid path. suffix = remove_path[:position] + [curr_func] for offset in range(1, len(suffix)): length = position - offset if remove_path[:length] == suffix[offset:]: new_positions[index] = length break new_positions = tuple(new_positions) # If the current suffix is invalid, set the max stack usage to 0. max_stack_usage = 0 max_callee_state = None self_loop = False if not invalid_flag: # Max stack usage is at least equal to the stack frame. max_stack_usage = curr_func.stack_frame for callsite in curr_func.callsites: callee = callsite.callee if callee is None: continue callee_state = (callee.address, new_positions) if callee_state not in scc_index_map: # Unvisited state. scc_lowlink = min(scc_lowlink, Traverse(callee_state)) elif callee_state in stacked_states: # The state is shown in the stack. There is a cycle. sub_stack_usage = 0 scc_lowlink = min(scc_lowlink, scc_index_map[callee_state]) if callee_state == curr_state: self_loop = True done_result = done_states.get(callee_state) if done_result is not None: # Already done this state and use its result. If the state reaches a # cycle, reusing the result will cause inaccuracy (the stack usage # of cycle depends on where the entrance is). But it's fine since we # can't get accurate stack usage under this situation, and we rely # on user-provided annotations to break the cycle, after which the # result will be accurate again. (sub_stack_usage, _) = done_result if callsite.is_tail: # For tailing call, since the callee reuses the stack frame of the # caller, choose the larger one directly. stack_usage = max(curr_func.stack_frame, sub_stack_usage) else: stack_usage = curr_func.stack_frame + sub_stack_usage if stack_usage > max_stack_usage: max_stack_usage = stack_usage max_callee_state = callee_state if scc_lowlink == scc_index: group = [] while scc_stack[-1] != curr_state: scc_state = scc_stack.pop() stacked_states.remove(scc_state) group.append(scc_state) scc_stack.pop() stacked_states.remove(curr_state) # If the cycle is not empty, record it. if len(group) > 0 or self_loop: group.append(curr_state) cycle_groups.append(group) # Store the done result. done_states[curr_state] = (max_stack_usage, max_callee_state) if curr_positions == initial_positions: # If the current state is initial state, we traversed the callgraph by # using the current function as start point. Update the stack usage of # the function. # If the function matches a single vertex remove path, this will set its # max stack usage to 0, which is not expected (we still calculate its # max stack usage, but prevent any function from calling it). However, # all the single vertex remove paths have been preprocessed and removed. curr_func.stack_max_usage = max_stack_usage # Reconstruct the max stack path by traversing the state transitions. max_stack_path = [curr_func] callee_state = max_callee_state while callee_state is not None: # The first element of state tuple is function address. max_stack_path.append(function_map[callee_state[0]]) done_result = done_states.get(callee_state) # All of the descendants should be done. assert done_result is not None (_, callee_state) = done_result curr_func.stack_max_path = max_stack_path return scc_lowlink # The state is the concatenation of the current function address and the # state of matching position. initial_positions = (0,) * len(remove_list) done_states = {} stacked_states = set() scc_index_counter = [0] scc_index_map = {} scc_stack = [] cycle_groups = [] for function in function_map.values(): if function.stack_max_usage is None: Traverse((function.address, initial_positions)) cycle_functions = [] for group in cycle_groups: cycle = set(function_map[state[0]] for state in group) if cycle not in cycle_functions: cycle_functions.append(cycle) return cycle_functions def Analyze(self): """Run the stack analysis. Raises: StackAnalyzerError: If disassembly fails. """ def OutputInlineStack(address, prefix=''): """Output beautiful inline stack. Args: address: Address. prefix: Prefix of each line. Returns: Key for sorting, output text """ line_infos = self.AddressToLine(address, True) if line_infos[0] is None: order_key = (None, None) else: (_, path, linenum) = line_infos[0] order_key = (linenum, path) line_texts = [] for line_info in reversed(line_infos): if line_info is None: (function_name, path, linenum) = ('??', '??', 0) else: (function_name, path, linenum) = line_info line_texts.append('{}[{}:{}]'.format(function_name, os.path.relpath(path), linenum)) output = '{}-> {} {:x}\n'.format(prefix, line_texts[0], address) for depth, line_text in enumerate(line_texts[1:]): output += '{} {}- {}\n'.format(prefix, ' ' * depth, line_text) # Remove the last newline character. return (order_key, output.rstrip('\n')) # Analyze disassembly. try: disasm_text = subprocess.check_output([self.options.objdump, '-d', self.options.elf_path], encoding='utf-8') except subprocess.CalledProcessError: raise StackAnalyzerError('objdump failed to disassemble.') except OSError: raise StackAnalyzerError('Failed to run objdump.') function_map = self.AnalyzeDisassembly(disasm_text) result = self.ResolveAnnotation(function_map) (add_set, remove_list, eliminated_addrs, failed_sigtxts) = result remove_list = self.PreprocessAnnotation(function_map, add_set, remove_list, eliminated_addrs) cycle_functions = self.AnalyzeCallGraph(function_map, remove_list) # Print the results of task-aware stack analysis. extra_stack_frame = self.annotation.get('exception_frame_size', DEFAULT_EXCEPTION_FRAME_SIZE) for task in self.tasklist: routine_func = function_map[task.routine_address] print('Task: {}, Max size: {} ({} + {}), Allocated size: {}'.format( task.name, routine_func.stack_max_usage + extra_stack_frame, routine_func.stack_max_usage, extra_stack_frame, task.stack_max_size)) print('Call Trace:') max_stack_path = routine_func.stack_max_path # Assume the routine function is resolved. assert max_stack_path is not None for depth, curr_func in enumerate(max_stack_path): line_info = self.AddressToLine(curr_func.address)[0] if line_info is None: (path, linenum) = ('??', 0) else: (_, path, linenum) = line_info print(' {} ({}) [{}:{}] {:x}'.format(curr_func.name, curr_func.stack_frame, os.path.relpath(path), linenum, curr_func.address)) if depth + 1 < len(max_stack_path): succ_func = max_stack_path[depth + 1] text_list = [] for callsite in curr_func.callsites: if callsite.callee is succ_func: indent_prefix = ' ' if callsite.address is None: order_text = (None, '{}-> [annotation]'.format(indent_prefix)) else: order_text = OutputInlineStack(callsite.address, indent_prefix) text_list.append(order_text) for _, text in sorted(text_list, key=lambda item: item[0]): print(text) print('Unresolved indirect callsites:') for function in function_map.values(): indirect_callsites = [] for callsite in function.callsites: if callsite.target is None: indirect_callsites.append(callsite.address) if len(indirect_callsites) > 0: print(' In function {}:'.format(function.name)) text_list = [] for address in indirect_callsites: text_list.append(OutputInlineStack(address, ' ')) for _, text in sorted(text_list, key=lambda item: item[0]): print(text) print('Unresolved annotation signatures:') for sigtxt, error in failed_sigtxts: print(' {}: {}'.format(sigtxt, error)) if len(cycle_functions) > 0: print('There are cycles in the following function sets:') for functions in cycle_functions: print('[{}]'.format(', '.join(function.name for function in functions))) def ParseArgs(): """Parse commandline arguments. Returns: options: Namespace from argparse.parse_args(). """ parser = argparse.ArgumentParser(description="EC firmware stack analyzer.") parser.add_argument('elf_path', help="the path of EC firmware ELF") parser.add_argument('--export_taskinfo', required=True, help="the path of export_taskinfo.so utility") parser.add_argument('--section', required=True, help='the section.', choices=[SECTION_RO, SECTION_RW]) parser.add_argument('--objdump', default='objdump', help='the path of objdump') parser.add_argument('--addr2line', default='addr2line', help='the path of addr2line') parser.add_argument('--annotation', default=None, help='the path of annotation file') # TODO(cheyuw): Add an option for dumping stack usage of all functions. return parser.parse_args() def ParseSymbolText(symbol_text): """Parse the content of the symbol text. Args: symbol_text: Text of the symbols. Returns: symbols: Symbol list. """ # Example: "10093064 g F .text 0000015c .hidden hook_task" symbol_regex = re.compile(r'^(?P<address>[0-9A-Fa-f]+)\s+[lwg]\s+' r'((?P<type>[OF])\s+)?\S+\s+' r'(?P<size>[0-9A-Fa-f]+)\s+' r'(\S+\s+)?(?P<name>\S+)$') symbols = [] for line in symbol_text.splitlines(): line = line.strip() result = symbol_regex.match(line) if result is not None: address = int(result.group('address'), 16) symtype = result.group('type') if symtype is None: symtype = 'O' size = int(result.group('size'), 16) name = result.group('name') symbols.append(Symbol(address, symtype, size, name)) return symbols def ParseRoDataText(rodata_text): """Parse the content of rodata Args: symbol_text: Text of the rodata dump. Returns: symbols: Symbol list. """ # Examples: 8018ab0 00040048 00010000 10020000 4b8e0108 ...H........K... # 100a7294 00000000 00000000 01000000 ............ base_offset = None offset = None rodata = [] for line in rodata_text.splitlines(): line = line.strip() space = line.find(' ') if space < 0: continue try: address = int(line[0:space], 16) except ValueError: continue if not base_offset: base_offset = address offset = address elif address != offset: raise StackAnalyzerError('objdump of rodata not contiguous.') for i in range(0, 4): num = line[(space + 1 + i*9):(space + 9 + i*9)] if len(num.strip()) > 0: val = int(num, 16) else: val = 0 # TODO(drinkcat): Not all platforms are necessarily big-endian rodata.append((val & 0x000000ff) << 24 | (val & 0x0000ff00) << 8 | (val & 0x00ff0000) >> 8 | (val & 0xff000000) >> 24) offset = offset + 4*4 return (base_offset, rodata) def LoadTasklist(section, export_taskinfo, symbols): """Load the task information. Args: section: Section (RO | RW). export_taskinfo: Handle of export_taskinfo.so. symbols: Symbol list. Returns: tasklist: Task list. """ TaskInfoPointer = ctypes.POINTER(TaskInfo) taskinfos = TaskInfoPointer() if section == SECTION_RO: get_taskinfos_func = export_taskinfo.get_ro_taskinfos else: get_taskinfos_func = export_taskinfo.get_rw_taskinfos taskinfo_num = get_taskinfos_func(ctypes.pointer(taskinfos)) tasklist = [] for index in range(taskinfo_num): taskinfo = taskinfos[index] tasklist.append(Task(taskinfo.name.decode('utf-8'), taskinfo.routine.decode('utf-8'), taskinfo.stack_size)) # Resolve routine address for each task. It's more efficient to resolve all # routine addresses of tasks together. routine_map = dict((task.routine_name, None) for task in tasklist) for symbol in symbols: # Resolve task routine address. if symbol.name in routine_map: # Assume the symbol of routine is unique. assert routine_map[symbol.name] is None routine_map[symbol.name] = symbol.address for task in tasklist: address = routine_map[task.routine_name] # Assume we have resolved all routine addresses. assert address is not None task.routine_address = address return tasklist def main(): """Main function.""" try: options = ParseArgs() # Load annotation config. if options.annotation is None: annotation = {} elif not os.path.exists(options.annotation): print('Warning: Annotation file {} does not exist.' .format(options.annotation)) annotation = {} else: try: with open(options.annotation, 'r') as annotation_file: annotation = yaml.safe_load(annotation_file) except yaml.YAMLError: raise StackAnalyzerError('Failed to parse annotation file {}.' .format(options.annotation)) except IOError: raise StackAnalyzerError('Failed to open annotation file {}.' .format(options.annotation)) # TODO(cheyuw): Do complete annotation format verification. if not isinstance(annotation, dict): raise StackAnalyzerError('Invalid annotation file {}.' .format(options.annotation)) # Generate and parse the symbols. try: symbol_text = subprocess.check_output([options.objdump, '-t', options.elf_path], encoding='utf-8') rodata_text = subprocess.check_output([options.objdump, '-s', '-j', '.rodata', options.elf_path], encoding='utf-8') except subprocess.CalledProcessError: raise StackAnalyzerError('objdump failed to dump symbol table or rodata.') except OSError: raise StackAnalyzerError('Failed to run objdump.') symbols = ParseSymbolText(symbol_text) rodata = ParseRoDataText(rodata_text) # Load the tasklist. try: export_taskinfo = ctypes.CDLL(options.export_taskinfo) except OSError: raise StackAnalyzerError('Failed to load export_taskinfo.') tasklist = LoadTasklist(options.section, export_taskinfo, symbols) analyzer = StackAnalyzer(options, symbols, rodata, tasklist, annotation) analyzer.Analyze() except StackAnalyzerError as e: print('Error: {}'.format(e)) if __name__ == '__main__': main()
the-stack_106_17134
import rlkit.misc.hyperparameter as hyp from rlkit.demos.source.dict_to_mdp_path_loader import EncoderDictToMDPPathLoader from rlkit.launchers.experiments.ashvin.awac_rig import awac_rig_experiment from rlkit.launchers.launcher_util import run_experiment from rlkit.launchers.arglauncher import run_variants from rlkit.torch.sac.policies import GaussianPolicy, GaussianMixturePolicy from rlkit.envs.encoder_wrappers import PresamplingEncoderWrappedEnv from sawyer_control.envs.sawyer_grip import SawyerGripEnv # from sawyer_control.envs.sawyer_grip_stub import SawyerGripEnv from rlkit.torch.networks import Clamp from rlkit.torch.vae.vq_vae import VQ_VAE from rlkit.torch.vae.vq_vae_trainer import VQ_VAETrainer from rlkit.torch.grill.common import train_vqvae mini_demos = [ dict(path='/home/ashvin/data/real_world_val/fixed_fixed_drawer_demos.npy', obs_dict=True, is_demo=True,) ] #ASHVIN: MAKE THIS SPECIFIC TO THE TASK YOURE DOING!! all_demos = [ dict(path='/home/ashvin/data/real_world_val/fixed_fixed_drawer_demos.npy', obs_dict=True, is_demo=True,), dict(path='/home/ashvin/data/real_world_val/fixed_fixed_pot_demos.npy', obs_dict=True, is_demo=True,), dict(path='/home/ashvin/data/real_world_val/fixed_fixed_pnp_demos.npy', obs_dict=True, is_demo=True,), dict(path='/home/ashvin/data/real_world_val/fixed_fixed_tray_demos.npy', obs_dict=True, is_demo=True,), ] if __name__ == "__main__": variant = dict( imsize=48, env_class=SawyerGripEnv, env_kwargs=dict( action_mode='position', config_name='ashvin_config', reset_free=False, position_action_scale=0.05, max_speed=0.4, step_sleep_time=0.2, crop_version_str="crop_val_torch", ), policy_class=GaussianPolicy, policy_kwargs=dict( hidden_sizes=[256, 256, 256, 256, ], max_log_std=0, min_log_std=-6, std_architecture="values", ), qf_kwargs=dict( hidden_sizes=[256, 256], ), trainer_kwargs=dict( discount=0.99, soft_target_tau=5e-3, target_update_period=1, policy_lr=3e-4, qf_lr=3E-4, reward_scale=1, beta=1, use_automatic_entropy_tuning=False, alpha=0, bc_num_pretrain_steps=0, q_num_pretrain1_steps=0, q_num_pretrain2_steps=0, #25001 #HERE policy_weight_decay=1e-4, q_weight_decay=0, rl_weight=1.0, use_awr_update=True, use_reparam_update=False, compute_bc=True, reparam_weight=0.0, awr_weight=1.0, bc_weight=0.0, reward_transform_kwargs=None, terminal_transform_kwargs=None, ), max_path_length=75, #50 algo_kwargs=dict( batch_size=1024, #1024 num_epochs=101, #1001 num_eval_steps_per_epoch=600, #500 num_expl_steps_per_train_loop=600, #500 num_trains_per_train_loop=600, #500 min_num_steps_before_training=150, #4000 ), replay_buffer_kwargs=dict( fraction_future_context=0.6, fraction_distribution_context=0.0, # TODO: Try less? max_size=int(5E5), # HERE# HERE# HERE# HERE# HERE# HERE# HERE# HERE# HERE (DOUBLE CHECK THAT DEMOS FIT!!!!) ), demo_replay_buffer_kwargs=dict( fraction_future_context=0.6, fraction_distribution_context=0.0, # TODO: Try less? ), reward_kwargs=dict( reward_type='sparse', epsilon=1.0, ), observation_key='latent_observation', desired_goal_key='latent_desired_goal', save_video=True, save_video_kwargs=dict( save_video_period=1, pad_color=0, ), # encoder_wrapper=PresamplingEncoderWrappedEnv, # Uncomment if using pixelcnn reset_keys_map=dict( image_observation="initial_latent_state" ), path_loader_class=EncoderDictToMDPPathLoader, path_loader_kwargs=dict( recompute_reward=True, ), renderer_kwargs=dict( create_image_format='HWC', output_image_format='CWH', flatten_image=True, width=48, height=48, ), add_env_demos=False, add_env_offpolicy_data=False, load_demos=True, pretrain_policy=True, pretrain_rl=True, evaluation_goal_sampling_mode="presampled_images", # exploration_goal_sampling_mode="presampled_images", exploration_goal_sampling_mode="presampled_latents", # exploration_goal_sampling_mode="presampled_conditional_prior", train_vae_kwargs=dict( imsize=48, beta=1, beta_schedule_kwargs=dict( x_values=(0, 250), y_values=(0, 100), ), num_epochs=1501, #1501 embedding_dim=5, dump_skew_debug_plots=False, decoder_activation='sigmoid', use_linear_dynamics=False, generate_vae_dataset_kwargs=dict( N=1000, n_random_steps=2, test_p=.9, dataset_path={ 'train': 'demos/icra2021/dataset_v1_train.npy', 'test': 'demos/icra2021/dataset_v1_test.npy', }, augment_data=False, use_cached=False, show=False, oracle_dataset=False, oracle_dataset_using_set_to_goal=False, non_presampled_goal_img_is_garbage=False, random_rollout_data=True, random_rollout_data_set_to_goal=True, conditional_vae_dataset=True, save_trajectories=False, enviorment_dataset=False, tag="ccrig_tuning_orig_network", ), vae_trainer_class=VQ_VAETrainer, vae_class=VQ_VAE, vae_kwargs=dict( input_channels=3, imsize=48, ), algo_kwargs=dict( key_to_reconstruct='x_t', start_skew_epoch=5000, is_auto_encoder=False, batch_size=128, lr=1e-3, skew_config=dict( method='vae_prob', power=0, ), weight_decay=0.0, skew_dataset=False, priority_function_kwargs=dict( decoder_distribution='gaussian_identity_variance', sampling_method='importance_sampling', num_latents_to_sample=10, ), use_parallel_dataloading=False, ), save_period=10, ), train_model_func=train_vqvae, presampled_goal_kwargs=dict( eval_goals='/media/ashvin/data2/data/val/v1/close_blackhandle_eval_goals.pkl', # ASHVIN: UPDATE THIS expl_goals='/media/ashvin/data2/data/val/v1/close_blackhandle_expl_goals.npy', # ASHVIN: UPDATE THIS # expl_goals='/media/ashvin/data2/data/val/v1/close_red_drawer1_expl_goals.npy', ), launcher_config=dict( unpack_variant=True, region='us-west-1', ), logger_config=dict( snapshot_mode='gap', snapshot_gap=1, ), pickle_paths=True, pretrained_vae_path="/home/ashvin/data/sasha/awac-exps/real-world/vqvae/run14/itr_3.pt", pretrained_algo_path='/home/ashvin/data/ashvin/icra2021/final/new/close-horizontal-drawer-no-pretrained-goals1/run1/id0/itr_0.pt', ) search_space = { "seed": range(1), 'path_loader_kwargs.demo_paths': [all_demos], 'reward_kwargs.epsilon': [2,], #1.75 is mean 'trainer_kwargs.beta': [0.3], 'num_pybullet_objects':[None], 'policy_kwargs.min_log_std': [-6], 'trainer_kwargs.awr_weight': [1.0], 'trainer_kwargs.awr_use_mle_for_vf': [True], 'trainer_kwargs.awr_sample_actions': [False], 'trainer_kwargs.clip_score': [2], 'trainer_kwargs.awr_min_q': [True], 'trainer_kwargs.reward_transform_kwargs': [None, ], 'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0)], 'qf_kwargs.output_activation': [Clamp(max=0)], } sweeper = hyp.DeterministicHyperparameterSweeper( search_space, default_parameters=variant, ) variants = [] for variant in sweeper.iterate_hyperparameters(): variants.append(variant) run_variants(awac_rig_experiment, variants, run_id=20) #HERE
the-stack_106_17135
import constant from discord import Message, utils from discord.ext.commands import Bot, Cog class Thread(Cog): def __init__(self, bot: Bot): self.bot = bot @Cog.listener() async def on_message(self, message: Message): author = message.author channel = message.channel if author.bot: return # 新着順ソート if ( channel.category.id == constant.CAT_THREAD and channel.id != constant.CH_THREAD_MASTER ): position = self.bot.get_channel(constant.CH_THREAD_MASTER).position + 1 if channel.position <= position: return await channel.edit(position=position) if channel.id == constant.CH_THREAD_MASTER: name = message.content elif message.content.startswith("##"): name = message.content[2:] else: return guild = message.guild ch_thread = utils.get(guild.channels, name=name) cat_thread = self.bot.get_channel(constant.CAT_THREAD) ch_main = self.bot.get_channel(constant.CH_MAIN) # 同名CHがない場合 if ch_thread is None: new_thread = await cat_thread.create_text_channel(name=name) await channel.send(f"{author.mention} {new_thread.mention} を作成しました。") if not message.channel == ch_main: await ch_main.send(f"{new_thread.mention} が作成されました。") await self.bot.database.insert( constant.TABLE_NAME, channel_id=new_thread.id, author_id=author.id, channel_type="thread", ) await cat_thread.edit(name=f"THREAD ─ {len(cat_thread.channels)}") return # 同名CHがスレッドカテゴリーにある場合 if ch_thread.category == cat_thread: text = "はもう作られています。" # 同名CHがアーカイブカテゴリーにある場合 elif ch_thread.category.id == constant.CAT_THREAD_ARCHIVE: text = "をアーカイブから戻しました。" await ch_thread.edit(category=cat_thread) await ch_thread.edit(sync_permissions=True) await ch_main.send(f"{ch_thread.mention} が再開されました。") await self.bot.database.update( constant.TABLE_NAME, {"author_id": author.id}, channel_id=ch_thread.id, channel_type="thread", ) await cat_thread.edit(name=f"THREAD ─ {len(channel.category.channels)}") cat_thread_archive = self.bot.get_channel(constant.CAT_THREAD_ARCHIVE) await cat_thread_archive.edit( name=f"📜 THREAD ─ {len(cat_thread_archive.channels)}" ) await channel.send(f"{author.mention} {ch_thread.mention} {text}") def setup(bot: Bot): bot.add_cog(Thread(bot))
the-stack_106_17136
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2018, John McNamara, [email protected] # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('chart_errorbars07.xlsx') self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']} def test_create_file(self): """Test the creation of an XlsxWriter file with error bars.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'stock'}) date_format = workbook.add_format({'num_format': 14}) chart.axis_ids = [45470848, 45472768] data = [ [39083, 39084, 39085, 39086, 39087], [27.2, 25.03, 19.05, 20.34, 18.5], [23.49, 19.55, 15.12, 17.84, 16.34], [25.45, 23.05, 17.32, 20.45, 17.34], ] for row in range(5): worksheet.write(row, 0, data[0][row], date_format) worksheet.write(row, 1, data[1][row]) worksheet.write(row, 2, data[2][row]) worksheet.write(row, 3, data[3][row]) worksheet.set_column('A:D', 11) chart.add_series({ 'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$B$1:$B$5', 'y_error_bars': {'type': 'standard_error'}, }) chart.add_series({ 'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$C$1:$C$5', 'y_error_bars': {'type': 'standard_error'}, }) chart.add_series({ 'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$D$1:$D$5', 'y_error_bars': {'type': 'standard_error'}, }) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
the-stack_106_17137
"""Anagram finding functions.""" from nagaram.scrabble import blank_tiles, word_list, word_score def _letter_map(word): """Creates a map of letter use in a word. Args: word: a string to create a letter map from Returns: a dictionary of {letter: integer count of letter in word} """ lmap = {} for letter in word: try: lmap[letter] += 1 except KeyError: lmap[letter] = 1 return lmap def anagrams_in_word(word, sowpods=False, start="", end=""): """Finds anagrams in word. Args: word: the string to base our search off of sowpods: boolean to declare TWL or SOWPODS words file start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yields: a tuple of (word, score) that can be made with the input_word """ input_letters, blanks, questions = blank_tiles(word) for tile in start + end: input_letters.append(tile) for word in word_list(sowpods, start, end): lmap = _letter_map(input_letters) used_blanks = 0 for letter in word: if letter in lmap: lmap[letter] -= 1 if lmap[letter] < 0: used_blanks += 1 if used_blanks > (blanks + questions): break else: used_blanks += 1 if used_blanks > (blanks + questions): break else: yield (word, word_score(word, input_letters, questions))
the-stack_106_17138
# Copyright 2018 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from datetime import datetime import google.protobuf.pyext._message as message import numpy as np import pandas as pd import pytz from google.protobuf.message import Message from pandas.api.types import is_datetime64_any_dtype as is_datetime from pandas.core.dtypes.dtypes import CategoricalDtype from . import frames_pb2 as fpb from .dtypes import dtype_of from .errors import MessageError, WriteError pb_list_types = ( message.RepeatedCompositeContainer, message.RepeatedScalarContainer, ) def pb_value(v): """Convert Python type to frames_pb2.Value""" if v is None: return None dtype = dtype_of(v) kw = {dtype.val_key: v} return fpb.Value(**kw) def pb_map(d): """Convert map values to frames_pb2.Value""" return None if d is None else {k: pb_value(v) for k, v in d.items()} def SchemaField(name=None, doc=None, default=None, type=None, properties=None): """A schema field""" # We return a frames_pb2.SchemaField from Python types return fpb.SchemaField( name=name, doc=doc, default=pb_value(default), type=type, properties=pb_map(properties), ) def pb2py(obj): """Convert protobuf object to Python object""" if isinstance(obj, fpb.Value): return getattr(obj, obj.WhichOneof('value')) if isinstance(obj, Message): return { field.name: pb2py(value) for field, value in obj.ListFields() } if isinstance(obj, pb_list_types): return [pb2py(v) for v in obj] if isinstance(obj, message.MessageMapContainer): return { key: pb2py(value) for key, value in obj.items() } return obj def msg2df(frame, frame_factory, columns=None, do_reorder=True): indices = [col2series(idx, None) for idx in frame.indices] if len(indices) == 1: new_index = indices[0] elif len(indices) > 1: new_index = pd.MultiIndex.from_arrays(indices) else: new_index = None data = {col.name: col2series(col, new_index) for col in frame.columns} df = frame_factory(data, new_index) # IG-14809 - return an empty dataframe (without columns metadata) in case of empty dataset if df.empty: return frame_factory({}, new_index) with warnings.catch_warnings(): warnings.simplefilter('ignore') df.labels = pb2py(frame.labels) if do_reorder: if columns: df = df.reindex(columns=columns) else: is_range = True indices = [False] * len(df.columns) for name in df.columns: try: if name.startswith('column_'): col_index = int(name[len('column_'):]) if col_index < len(indices): indices[col_index] = True continue except ValueError: pass is_range = False break if is_range and all(elem for elem in indices): renameDict = {} for i in range(len(df.columns)): renameDict['column_' + str(i)] = i df.rename(columns=renameDict, inplace=True) new_index = pd.RangeIndex(start=0, step=1, stop=len(df.columns)) df = df.reindex(columns=new_index) else: df = df.reindex(columns=sorted(df.columns)) df = insert_nulls_based_on_null_values_map(df, frame.null_values) return df def col2series(col, index): current_dtype = "" if col.dtype == fpb.BOOLEAN: data = col.bools current_dtype = "bool" elif col.dtype == fpb.FLOAT: data = col.floats current_dtype = "float" elif col.dtype == fpb.INTEGER: data = col.ints current_dtype = "int" elif col.dtype == fpb.STRING: data = col.strings current_dtype = "object" elif col.dtype == fpb.TIME: data = [pd.Timestamp(t, unit='ns') for t in col.times] current_dtype = "datetime64[ns, UTC]" else: raise MessageError('unknown dtype - {}'.format(col.dtype)) if col.kind == col.LABEL: data = [data[0]] * col.size if col.dtype == fpb.STRING: data = pd.Series(data, dtype='category', index=index, name=col.name) else: data = pd.Series(data, index=index, name=col.name, dtype=current_dtype) return data def idx2series(idx): return pd.Series(idx.values, name=idx.name) def df2msg(df, labels=None, index_cols=None): indices = None if index_cols is not None: # if there is already an index set, we want to preserve it. if not (type(df.index) == pd.RangeIndex and df.index.name is None): df.reset_index(inplace=True) indices = [series2col(df[name], name) for name in index_cols] cols = [col for col in df.columns if col not in index_cols] df = df[cols] elif should_encode_index(df): if hasattr(df.index, 'levels'): by_name = df.index.get_level_values names = df.index.names serieses = (idx2series(by_name(name)) for name in names) else: serieses = [idx2series(df.index)] indices = [series2col(s, s.name) for s in serieses] schema = get_actual_types(df) df, null_values = normalize_df(df.copy(), schema) is_range = isinstance(df.columns, pd.RangeIndex) columns = [] for name in df.columns: if not is_range and not isinstance(name, str): raise Exception('Column names must be strings') series = df[name] if isinstance(name, int): name = 'column_' + str(name) columns.append(series2col_with_dtype(series, name, schema[name])) return fpb.Frame( columns=columns, indices=indices, labels=pb_map(labels), null_values=null_values, ) def series2col_with_dtype(s, name, dtype): kw = { 'name': name, 'kind': fpb.Column.SLICE, } if dtype == fpb.INTEGER: kw['dtype'] = fpb.INTEGER kw['ints'] = s elif dtype == fpb.FLOAT: kw['dtype'] = fpb.FLOAT kw['floats'] = s elif dtype == fpb.STRING: # Pandas dtype for str is object kw['strings'] = s kw['dtype'] = fpb.STRING elif dtype == fpb.BOOLEAN: kw['bools'] = s kw['dtype'] = fpb.BOOLEAN elif dtype == fpb.TIME: if s.dt.tz: try: s = s.dt.tz_localize(pytz.UTC) except TypeError: s = s.dt.tz_convert('UTC') kw['times'] = s.astype(np.int64) kw['dtype'] = fpb.TIME elif dtype == fpb.NULL: kw['dtype'] = fpb.NULL else: raise WriteError('{} - unsupported type - {}'.format(s.name, s.dtype)) return fpb.Column(**kw) def series2col(s, name): kw = { 'name': name, 'kind': fpb.Column.SLICE, } if is_int_dtype(s.dtype): kw['dtype'] = fpb.INTEGER kw['ints'] = s elif is_float_dtype(s.dtype): kw['dtype'] = fpb.FLOAT kw['floats'] = s elif s.dtype == np.object: # Pandas dtype for str is object kw['strings'] = s kw['dtype'] = fpb.STRING elif s.dtype == np.bool: kw['bools'] = s kw['dtype'] = fpb.BOOLEAN elif is_datetime(s.dtype): if s.dt.tz: try: s = s.dt.tz_localize(pytz.UTC) except TypeError: s = s.dt.tz_convert('UTC') kw['times'] = s.astype(np.int64) kw['dtype'] = fpb.TIME elif is_categorical_dtype(s.dtype): # We assume catgorical data is strings kw['strings'] = s.astype(str) kw['dtype'] = fpb.STRING else: raise WriteError('{} - unsupported type - {}'.format(s.name, s.dtype)) return fpb.Column(**kw) def insert_nulls_based_on_null_values_map(df, null_values): # if there are no Null values at all, skip if len(null_values) == 0: return df i = 0 casted_columns = {} for key in df.index: for col_name in null_values[i].nullColumns: # boolean columns should be converted to `object` to be able to # represent None. if df[col_name].dtype == np.bool and \ col_name not in casted_columns: casted_columns[col_name] = True df[col_name] = df[col_name].astype(object) df.at[key, col_name] = None i = i + 1 return df def normalize_df(df, schema): """ This function converts all 'Null' values to the according default values based on the column type, and creates an indication list to specify where are the null values :param schema: dictionary specifying the real type of every column :param df: :return: """ null_values = [] nulls_exist = False for col_pos, col_name in enumerate(df.columns): col = df[col_name] row_index = 0 for index, value in col.items(): if col_pos == 0: null_values.append(fpb.NullValuesMap(nullColumns={})) if pd.isnull(value): nulls_exist = True df.at[index, col_name] = get_empty_value_by_type(schema[col_name]) null_values[row_index].nullColumns[col_name] = True row_index = row_index + 1 if not nulls_exist: null_values = [] return df, null_values def get_empty_value_by_type(dtype): if dtype == fpb.INTEGER: return 0 elif dtype == fpb.FLOAT: return 0.0 elif dtype == fpb.STRING: return '' elif dtype == fpb.TIME: return datetime.fromtimestamp(0) elif dtype == fpb.BOOLEAN: return False elif dtype == fpb.NULL: return False raise Exception('unsupported type {}'.format(dtype)) def get_actual_types(df): column_types = {} for col_name in df.columns: col = df[col_name] if is_int_dtype(col.dtype): column_types[col.name] = fpb.INTEGER elif is_float_dtype(col.dtype): column_types[col.name] = fpb.FLOAT elif col.dtype == np.object: # Pandas dtype for str or "mixed" type is object # Go over the column values until we reach a real value # and determine whether it's bool or string has_data = False for x in col: if pd.isnull(x): continue if isinstance(x, str): column_types[col.name] = fpb.STRING has_data = True break if isinstance(x, bool): column_types[col.name] = fpb.BOOLEAN has_data = True break raise WriteError('{} - contains an unsupported value type - {}' .format(col_name, type(x))) # If all items in the column are None # it does not matter what type the column will be, set the column as INTEGER if not has_data: column_types[col.name] = fpb.NULL elif col.dtype == np.bool: column_types[col.name] = fpb.BOOLEAN elif is_datetime(col.dtype): column_types[col.name] = fpb.TIME elif is_categorical_dtype(col.dtype): # We assume catgorical data is strings column_types[col.name] = fpb.STRING else: raise WriteError('{} - unsupported type - {}' .format(col_name, col.dtype)) return column_types def should_encode_index(df): if df.index.name: return True return not isinstance(df.index, pd.RangeIndex) # We can't use a set since hash(np.int64) != hash(pd.Series([1]).dtype) def is_int_dtype(dtype): return \ dtype == np.int64 or \ dtype == np.int32 or \ dtype == np.int16 or \ dtype == np.int8 or \ dtype == np.int def is_float_dtype(dtype): return \ dtype == np.float64 or \ dtype == np.float32 or \ dtype == np.float16 or \ dtype == np.float def is_categorical_dtype(dtype): return isinstance(dtype, CategoricalDtype) def _fix_pd(): # cudf works with older versions of pandas import pandas.api.types if not hasattr(pandas.api.types, 'is_categorical_dtype'): pandas.api.types.is_categorical_dtype = is_categorical_dtype import pandas.core.common if not hasattr(pandas.core.common, 'is_categorical_dtype'): pandas.core.common.is_categorical_dtype = is_categorical_dtype _fix_pd() del _fix_pd
the-stack_106_17139
import re import random import discord from redbot.core import commands, data_manager, Config, checks, bot from .eris_event_lib import ErisEventMixin BaseCog = getattr(commands, "Cog", object) RETYPE = type(re.compile("a")) class JustMetHer(BaseCog, ErisEventMixin): def __init__(self, bot_instance: bot): super().__init__() self.bot = bot_instance # https://regex101.com/r/STgPIR/1 self.searchpattern: RETYPE = re.compile( r"\b(\w{2,}[^aeiouy\s0-9])[aeiouy]re?\b", flags=re.IGNORECASE ) self.bot.add_listener(self.met_her, "on_message") async def met_her(self, message: discord.Message): clean_message: str = message.clean_content.lower() matched = self.searchpattern.search(clean_message) activated: bool = matched and (random.random() < (1 / 10)) if not activated: return ctx = await self.bot.get_context(message) async with self.lock_config.channel(message.channel).get_lock(): allowed: bool = await self.allowed(ctx, message) if not allowed: return phrase = ["I hardly knew 'er!", "I just met 'er!"] await ctx.send( f"{matched.group(1)}'r? {random.choice(phrase)}", reference=message ) await self.log_last_message(ctx, message)
the-stack_106_17142
# Copyright (c) 2012-2013, Mark Peek <[email protected]> # All rights reserved. # # See LICENSE file for full license. from . import AWSHelperFn, AWSObject, AWSProperty, FindInMap, Ref from .validators import ( boolean, integer, integer_range, network_port, positive_integer ) try: from awacs.aws import Policy policytypes = (dict, Policy) except ImportError: policytypes = dict, class Tag(AWSHelperFn): def __init__(self, key, value): self.data = {'Key': key, 'Value': value} def JSONrepr(self): return self.data class CustomerGateway(AWSObject): resource_type = "AWS::EC2::CustomerGateway" props = { 'BgpAsn': (integer, True), 'IpAddress': (basestring, True), 'Tags': (list, False), 'Type': (basestring, True), } class DHCPOptions(AWSObject): resource_type = "AWS::EC2::DHCPOptions" props = { 'DomainName': (basestring, False), 'DomainNameServers': (list, False), 'NetbiosNameServers': (list, False), 'NetbiosNodeType': (integer, False), 'NtpServers': (list, False), 'Tags': (list, False), } class EIP(AWSObject): resource_type = "AWS::EC2::EIP" props = { 'InstanceId': (basestring, False), 'Domain': (basestring, False), } class EIPAssociation(AWSObject): resource_type = "AWS::EC2::EIPAssociation" props = { 'AllocationId': (basestring, False), 'EIP': (basestring, False), 'InstanceId': (basestring, False), 'NetworkInterfaceId': (basestring, False), 'PrivateIpAddress': (basestring, False), } class NatGateway(AWSObject): resource_type = "AWS::EC2::NatGateway" props = { 'AllocationId': (basestring, True), 'SubnetId': (basestring, True), } class EBSBlockDevice(AWSProperty): props = { 'DeleteOnTermination': (boolean, False), 'Encrypted': (boolean, False), 'Iops': (integer, False), # Conditional 'SnapshotId': (basestring, False), # Conditional 'VolumeSize': (integer, False), # Conditional 'VolumeType': (basestring, False), } class BlockDeviceMapping(AWSProperty): props = { 'DeviceName': (basestring, True), 'Ebs': (EBSBlockDevice, False), # Conditional 'NoDevice': (dict, False), 'VirtualName': (basestring, False), # Conditional } class MountPoint(AWSProperty): props = { 'Device': (basestring, True), 'VolumeId': (basestring, True), } class PrivateIpAddressSpecification(AWSProperty): props = { 'Primary': (boolean, True), 'PrivateIpAddress': (basestring, True), } class NetworkInterfaceProperty(AWSProperty): props = { 'AssociatePublicIpAddress': (boolean, False), 'DeleteOnTermination': (boolean, False), 'Description': (basestring, False), 'DeviceIndex': (integer, True), 'GroupSet': ([basestring, FindInMap, Ref], False), 'NetworkInterfaceId': (basestring, False), 'PrivateIpAddress': (basestring, False), 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False), 'SecondaryPrivateIpAddressCount': (integer, False), 'SubnetId': (basestring, False), } class AssociationParameters(AWSProperty): props = { 'Key': (basestring, True), 'Value': (basestring, True), } class SsmAssociations(AWSProperty): props = { 'AssociationParameters': ([AssociationParameters], False), 'DocumentName': (basestring, True), } class Instance(AWSObject): resource_type = "AWS::EC2::Instance" props = { 'AvailabilityZone': (basestring, False), 'BlockDeviceMappings': (list, False), 'DisableApiTermination': (boolean, False), 'EbsOptimized': (boolean, False), 'IamInstanceProfile': (basestring, False), 'ImageId': (basestring, True), 'InstanceInitiatedShutdownBehavior': (basestring, False), 'InstanceType': (basestring, False), 'KernelId': (basestring, False), 'KeyName': (basestring, False), 'Monitoring': (boolean, False), 'NetworkInterfaces': ([NetworkInterfaceProperty], False), 'PlacementGroupName': (basestring, False), 'PrivateIpAddress': (basestring, False), 'RamdiskId': (basestring, False), 'SecurityGroupIds': (list, False), 'SecurityGroups': (list, False), 'SsmAssociations': ([SsmAssociations], False), 'SourceDestCheck': (boolean, False), 'SubnetId': (basestring, False), 'Tags': (list, False), 'Tenancy': (basestring, False), 'UserData': (basestring, False), 'Volumes': (list, False), } class InternetGateway(AWSObject): resource_type = "AWS::EC2::InternetGateway" props = { 'Tags': (list, False), } class NetworkAcl(AWSObject): resource_type = "AWS::EC2::NetworkAcl" props = { 'Tags': (list, False), 'VpcId': (basestring, True), } class ICMP(AWSProperty): props = { 'Code': (integer, False), 'Type': (integer, False), } class PortRange(AWSProperty): props = { 'From': (network_port, False), 'To': (network_port, False), } class NetworkAclEntry(AWSObject): resource_type = "AWS::EC2::NetworkAclEntry" props = { 'CidrBlock': (basestring, True), 'Egress': (boolean, True), 'Icmp': (ICMP, False), # Conditional 'NetworkAclId': (basestring, True), 'PortRange': (PortRange, False), # Conditional 'Protocol': (network_port, True), 'RuleAction': (basestring, True), 'RuleNumber': (integer_range(1, 32766), True), } class NetworkInterface(AWSObject): resource_type = "AWS::EC2::NetworkInterface" props = { 'Description': (basestring, False), 'GroupSet': (list, False), 'PrivateIpAddress': (basestring, False), 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False), 'SecondaryPrivateIpAddressCount': (integer, False), 'SourceDestCheck': (boolean, False), 'SubnetId': (basestring, True), 'Tags': (list, False), } class NetworkInterfaceAttachment(AWSObject): resource_type = "AWS::EC2::NetworkInterfaceAttachment" props = { 'DeleteOnTermination': (boolean, False), 'DeviceIndex': (integer, True), 'InstanceId': (basestring, True), 'NetworkInterfaceId': (basestring, True), } class Route(AWSObject): resource_type = "AWS::EC2::Route" props = { 'DestinationCidrBlock': (basestring, True), 'GatewayId': (basestring, False), 'InstanceId': (basestring, False), 'NatGatewayId': (basestring, False), 'NetworkInterfaceId': (basestring, False), 'RouteTableId': (basestring, True), 'VpcPeeringConnectionId': (basestring, False), } class RouteTable(AWSObject): resource_type = "AWS::EC2::RouteTable" props = { 'Tags': (list, False), 'VpcId': (basestring, True), } class SecurityGroupEgress(AWSObject): resource_type = "AWS::EC2::SecurityGroupEgress" props = { 'CidrIp': (basestring, False), 'DestinationSecurityGroupId': (basestring, False), 'FromPort': (network_port, True), 'GroupId': (basestring, True), 'IpProtocol': (basestring, True), 'ToPort': (network_port, True), # # Workaround for a bug in CloudFormation and EC2 where the # DestinationSecurityGroupId property is ignored causing # egress rules targeting a security group to be ignored. # Using SourceSecurityGroupId instead works fine even in # egress rules. AWS have known about this bug for a while. # 'SourceSecurityGroupId': (basestring, False), } class SecurityGroupIngress(AWSObject): resource_type = "AWS::EC2::SecurityGroupIngress" props = { 'CidrIp': (basestring, False), 'FromPort': (network_port, False), 'GroupName': (basestring, False), 'GroupId': (basestring, False), 'IpProtocol': (basestring, True), 'SourceSecurityGroupName': (basestring, False), 'SourceSecurityGroupId': (basestring, False), 'SourceSecurityGroupOwnerId': (basestring, False), 'ToPort': (network_port, False), } class SecurityGroupRule(AWSProperty): props = { 'CidrIp': (basestring, False), 'FromPort': (network_port, True), 'IpProtocol': (basestring, True), 'SourceSecurityGroupId': (basestring, False), 'SourceSecurityGroupName': (basestring, False), 'SourceSecurityGroupOwnerId': (basestring, False), 'ToPort': (network_port, True), 'DestinationSecurityGroupId': (basestring, False), } class SecurityGroup(AWSObject): resource_type = "AWS::EC2::SecurityGroup" props = { 'GroupDescription': (basestring, True), 'SecurityGroupEgress': (list, False), 'SecurityGroupIngress': (list, False), 'VpcId': (basestring, False), 'Tags': (list, False), } class Subnet(AWSObject): resource_type = "AWS::EC2::Subnet" props = { 'AvailabilityZone': (basestring, False), 'CidrBlock': (basestring, True), 'MapPublicIpOnLaunch': (boolean, False), 'Tags': (list, False), 'VpcId': (basestring, True), } class SubnetNetworkAclAssociation(AWSObject): resource_type = "AWS::EC2::SubnetNetworkAclAssociation" props = { 'SubnetId': (basestring, True), 'NetworkAclId': (basestring, True), } class SubnetRouteTableAssociation(AWSObject): resource_type = "AWS::EC2::SubnetRouteTableAssociation" props = { 'RouteTableId': (basestring, True), 'SubnetId': (basestring, True), } class Volume(AWSObject): resource_type = "AWS::EC2::Volume" props = { 'AutoEnableIO': (boolean, False), 'AvailabilityZone': (basestring, True), 'Encrypted': (boolean, False), 'Iops': (integer, False), 'KmsKeyId': (basestring, False), 'Size': (basestring, False), 'SnapshotId': (basestring, False), 'Tags': (list, False), 'VolumeType': (basestring, False), } class VolumeAttachment(AWSObject): resource_type = "AWS::EC2::VolumeAttachment" props = { 'Device': (basestring, True), 'InstanceId': (basestring, True), 'VolumeId': (basestring, True), } class VPC(AWSObject): resource_type = "AWS::EC2::VPC" props = { 'CidrBlock': (basestring, True), 'EnableDnsSupport': (boolean, False), 'EnableDnsHostnames': (boolean, False), 'InstanceTenancy': (basestring, False), 'Tags': (list, False), } class VPCDHCPOptionsAssociation(AWSObject): resource_type = "AWS::EC2::VPCDHCPOptionsAssociation" props = { 'DhcpOptionsId': (basestring, True), 'VpcId': (basestring, True), } class VPCEndpoint(AWSObject): resource_type = "AWS::EC2::VPCEndpoint" props = { 'PolicyDocument': (policytypes, False), 'RouteTableIds': ([basestring, Ref], False), 'ServiceName': (basestring, True), 'VpcId': (basestring, True), } class VPCGatewayAttachment(AWSObject): resource_type = "AWS::EC2::VPCGatewayAttachment" props = { 'InternetGatewayId': (basestring, False), 'VpcId': (basestring, True), 'VpnGatewayId': (basestring, False), } class VPNConnection(AWSObject): resource_type = "AWS::EC2::VPNConnection" props = { 'Type': (basestring, True), 'CustomerGatewayId': (basestring, True), 'StaticRoutesOnly': (boolean, False), 'Tags': (list, False), 'VpnGatewayId': (basestring, True), } class VPNConnectionRoute(AWSObject): resource_type = "AWS::EC2::VPNConnectionRoute" props = { 'DestinationCidrBlock': (basestring, True), 'VpnConnectionId': (basestring, True), } class VPNGateway(AWSObject): resource_type = "AWS::EC2::VPNGateway" props = { 'Type': (basestring, True), 'Tags': (list, False), } class VPNGatewayRoutePropagation(AWSObject): resource_type = "AWS::EC2::VPNGatewayRoutePropagation" props = { 'RouteTableIds': ([basestring, Ref], True), 'VpnGatewayId': (basestring, True), } class VPCPeeringConnection(AWSObject): resource_type = "AWS::EC2::VPCPeeringConnection" props = { 'PeerVpcId': (basestring, True), 'VpcId': (basestring, True), 'Tags': (list, False), } class Monitoring(AWSProperty): props = { 'Enabled': (boolean, False), } class NetworkInterfaces(AWSProperty): props = { 'AssociatePublicIpAddress': (boolean, False), 'DeleteOnTermination': (boolean, False), 'Description': (basestring, False), 'DeviceIndex': (integer, True), 'Groups': ([basestring], False), 'NetworkInterfaceId': (basestring, False), 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False), 'SecondaryPrivateIpAddressCount': (integer, False), 'SubnetId': (basestring, False), } class SecurityGroups(AWSProperty): props = { 'GroupId': (basestring, False), } class IamInstanceProfile(AWSProperty): props = { 'Arn': (basestring, False), } class LaunchSpecifications(AWSProperty): props = { 'BlockDeviceMappings': ([BlockDeviceMapping], False), 'EbsOptimized': (boolean, False), 'IamInstanceProfile': (IamInstanceProfile, False), 'ImageId': (basestring, True), 'InstanceType': (basestring, True), 'KernelId': (basestring, False), 'KeyName': (basestring, False), 'Monitoring': (Monitoring, False), 'NetworkInterfaces': ([NetworkInterfaces], False), 'Placement': (basestring, False), 'RamdiskId': (basestring, False), 'SecurityGroups': ([SecurityGroups], False), 'SubnetId': (basestring, False), 'UserData': (basestring, False), 'WeightedCapacity': (positive_integer, False), } class SpotFleetRequestConfigData(AWSProperty): props = { 'AllocationStrategy': (basestring, False), 'ExcessCapacityTerminationPolicy': (basestring, False), 'IamFleetRole': (basestring, True), 'LaunchSpecifications': ([LaunchSpecifications], True), 'SpotPrice': (basestring, True), 'TargetCapacity': (positive_integer, True), 'TerminateInstancesWithExpiration': (boolean, False), 'ValidFrom': (basestring, False), 'ValidUntil': (basestring, False), } class SpotFleet(AWSObject): resource_type = "AWS::EC2::SpotFleet" props = { 'SpotFleetRequestConfigData': (SpotFleetRequestConfigData, True), } class PlacementGroup(AWSObject): resource_type = "AWS::EC2::PlacementGroup" props = { 'Strategy': (basestring, True), }
the-stack_106_17145
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the prioritisetransaction mining RPC.""" from test_framework.test_framework import BitcoinLamboTestFramework from test_framework.util import * from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE class PrioritiseTransactionTest(BitcoinLamboTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-printpriority=1"], ["-printpriority=1"]] def run_test(self): self.txouts = gen_return_txouts() self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] utxo_count = 90 utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count) base_fee = self.relayfee*100 # our transactions are smaller than 100kb txids = [] # Create 3 batches of transactions at 3 different fee rate levels range_size = utxo_count // 3 for i in range(3): txids.append([]) start_range = i * range_size end_range = start_range + range_size txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee) # Make sure that the size of each group of transactions exceeds # MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create # more transactions. mempool = self.nodes[0].getrawmempool(True) sizes = [0, 0, 0] for i in range(3): for j in txids[i]: assert(j in mempool) sizes[i] += mempool[j]['size'] assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count # add a fee delta to something in the cheapest bucket and make sure it gets mined # also check that a different entry in the cheapest bucket is NOT mined self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN)) self.nodes[0].generate(1) mempool = self.nodes[0].getrawmempool() self.log.info("Assert that prioritised transaction was mined") assert(txids[0][0] not in mempool) assert(txids[0][1] in mempool) high_fee_tx = None for x in txids[2]: if x not in mempool: high_fee_tx = x # Something high-fee should have been mined! assert(high_fee_tx != None) # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction so that it's now low fee). self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN)) # Add everything back to mempool self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Check to make sure our high fee rate tx is back in the mempool mempool = self.nodes[0].getrawmempool() assert(high_fee_tx in mempool) # Now verify the modified-high feerate transaction isn't mined before # the other high fee transactions. Keep mining until our mempool has # decreased by all the high fee size that we calculated above. while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): self.nodes[0].generate(1) # High fee transaction should not have been mined, but other high fee rate # transactions should have been. mempool = self.nodes[0].getrawmempool() self.log.info("Assert that de-prioritised transaction is still in mempool") assert(high_fee_tx in mempool) for x in txids[2]: if (x != high_fee_tx): assert(x not in mempool) # Create a free transaction. Should be rejected. utxo_list = self.nodes[0].listunspent() assert(len(utxo_list) > 0) utxo = utxo_list[0] inputs = [] outputs = {} inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]}) outputs[self.nodes[0].getnewaddress()] = utxo["amount"] raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"] tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"] # This will raise an exception due to min relay fee not being met assert_raises_jsonrpc(-26, "66: min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex) assert(tx_id not in self.nodes[0].getrawmempool()) # This is a less than 1000-byte transaction, so just set the fee # to be the minimum for a 1000 byte transaction and check that it is # accepted. self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN)) self.log.info("Assert that prioritised free transaction is accepted to mempool") assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id) assert(tx_id in self.nodes[0].getrawmempool()) # Test that calling prioritisetransaction is sufficient to trigger # getblocktemplate to (eventually) return a new block. mock_time = int(time.time()) self.nodes[0].setmocktime(mock_time) template = self.nodes[0].getblocktemplate() self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN)) self.nodes[0].setmocktime(mock_time+10) new_template = self.nodes[0].getblocktemplate() assert(template != new_template) if __name__ == '__main__': PrioritiseTransactionTest().main()
the-stack_106_17146
from jira_config import jira_config from jira_data import jira_data from jira_graph import jira_graph import sys jira_lookup = jira_config() def extract_csv_data_and_plot(filename, teams): plotter = jira_graph(jira_lookup) plotter.create_ticket_graphs_by_team(filename, teams) def get_filter_data_and_plot(filter_id, teams): jira_query = jira_data(jira_lookup) filename = jira_query.save_filter_data(filter_id) extract_csv_data_and_plot(filename, teams) def get_filter_id(filter_param): # Try to lookup as filter name, otherwise assume it's an id filter_id = jira_lookup.find_filter_id(filter_param) return filter_id if filter_id else filter_param def parse_teams(configured_teams, teams): teams_to_show = [] for team_name in teams: stripped_team_name = team_name.strip() if stripped_team_name in configured_teams: teams_to_show.append(stripped_team_name) else: print("Unknown team: \"{0}\". Options are {1}".format(team_name, configured_teams)) if len(teams_to_show) == 0: print("No teams recognised: {0}, using configured teams".format(teams)) teams_to_show = configured_teams return teams_to_show def get_default_filter_id(): filter_id = jira_lookup.first_filter_id if filter_id: return filter_id else: print("Error: no filters are configured") def show_usage(): print("Usage:\r\n======") print(" report.py") print(" report.py \"<filter>\"") print(" report.py \"<filter>\" \"<teams>\"") print(" report.py -t \"<teams>\"") print(" report.py -f \"<csv_filename>\"") print(" report.py -f \"<csv_filename>\" \"<teams>\"") def main(): configured_teams = jira_lookup.teams args = sys.argv[1:] if len(args) == 0: # Try using the first filter configured get_filter_data_and_plot(get_default_filter_id(), configured_teams) elif len(args) == 1: if args[0] == "-h" or args[0] == "-help": show_usage() else: # Assume filter id passed get_filter_data_and_plot(get_filter_id(args[0]), configured_teams) elif len(args) == 2: if args[0] == "-t": get_filter_data_and_plot(get_default_filter_id(), parse_teams(configured_teams, args[1].split(","))) elif args[0] == "-f": extract_csv_data_and_plot(args[1], configured_teams) else: # Assume filter id and teams passed get_filter_data_and_plot(get_filter_id(args[0]), parse_teams(configured_teams, args[1].split(","))) elif len(args) == 3: if args[0] == "-f": extract_csv_data_and_plot(args[1], parse_teams(configured_teams, args[2].split(","))) else: print("Unknown args: " + str(args)) show_usage() else: show_usage() if __name__ == "__main__": main()
the-stack_106_17147
# !/usr/bin/python3 # coding: utf-8 # Copyright 2015-2018 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from PIL import Image from send2trash import send2trash THIS_FOLDER = os.getcwd() INPUT_FOLDER = os.path.join(THIS_FOLDER, "img") TMP_FOLDER = os.path.join(THIS_FOLDER, "tmp") OUTPUT_FOLDER = os.path.join(THIS_FOLDER, "txt") def prepare_folders(): """ :return: void Creates necessary folders """ for folder in [ INPUT_FOLDER, TMP_FOLDER, OUTPUT_FOLDER ]: if not os.path.exists(folder): os.makedirs(folder) def find_images(folder): """ :param folder: str Path to folder to search :return: generator of str List of images in folder """ for file in os.listdir(folder): full_path = os.path.join(folder, file) if os.path.isfile(full_path): try: _ = Image.open(full_path) # if constructor succeeds yield file except: pass def rotate_image(input_file, output_file, angle=90): """ :param input_file: str Path to image to rotate :param output_file: str Path to output image :param angle: float Angle to rotate :return: void Rotates image and saves result """ cmd = "convert -rotate " + "' " + str(angle) + "' " cmd += "'" + input_file + "' '" + output_file + "'" print("Running", cmd) os.system(cmd) # sharpen def sharpen_image(input_file, output_file): """ :param input_file: str Path to image to prettify :param output_file: str Path to output image :return: void Prettifies image and saves result """ rotate_image(input_file, output_file) # rotate cmd = "convert -auto-level -sharpen 0x4.0 -contrast " cmd += "'" + output_file + "' '" + output_file + "'" print("Running", cmd) os.system(cmd) # sharpen def run_tesseract(input_file, output_file): """ :param input_file: str Path to image to OCR :param output_file: str Path to output file :return: void Runs tesseract on image and saves result """ cmd = "tesseract -l pol " cmd += "'" + input_file + "' '" + output_file + "'" print("Running", cmd) os.system(cmd) def main(): prepare_folders() images = list(find_images(INPUT_FOLDER)) print("Found the following images in", INPUT_FOLDER) print(images) for image in images: input_path = os.path.join( INPUT_FOLDER, image ) tmp_path = os.path.join( TMP_FOLDER, image ) out_path = os.path.join( OUTPUT_FOLDER, image + ".out.txt" ) sharpen_image(input_path, tmp_path) run_tesseract(tmp_path, out_path) print("Removing tmp folder") send2trash(TMP_FOLDER) if __name__ == '__main__': main()
the-stack_106_17148
import logging from pathlib import Path import pytest from math import isclose from farm.data_handler.processor import SquadProcessor from farm.modeling.adaptive_model import AdaptiveModel from farm.infer import QAInferencer from farm.data_handler.inputs import QAInput, Question @pytest.mark.parametrize("distilbert_squad", [True, False], indirect=True) def test_training(distilbert_squad, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) model, processor = distilbert_squad assert type(model) == AdaptiveModel assert type(processor) == SquadProcessor @pytest.mark.parametrize("distilbert_squad", [True, False], indirect=True) def test_save_load(distilbert_squad, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) model, processor = distilbert_squad save_dir = Path("testsave/qa_squad") model.save(save_dir) processor.save(save_dir) inferencer = QAInferencer.load(save_dir, batch_size=2, gpu=False, num_processes=0, task_type="question_answering") assert inferencer is not None @pytest.mark.parametrize("bert_base_squad2", [True, False], indirect=True) def test_inference_dicts(bert_base_squad2): qa_format_1 = [ { "questions": ["Who counted the game among the best ever made?"], "text": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created." }] qa_format_2 = [{"qas":["Who counted the game among the best ever made?"], "context": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.", }] result1 = bert_base_squad2.inference_from_dicts(dicts=qa_format_1) result2 = bert_base_squad2.inference_from_dicts(dicts=qa_format_2) assert result1 == result2 @pytest.fixture() @pytest.mark.parametrize("bert_base_squad2", [True, False], indirect=True) def span_inference_result(bert_base_squad2, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) obj_input = [QAInput(doc_text="Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.", questions=Question("Who counted the game among the best ever made?", uid="best_id_ever"))] result = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0] return result @pytest.fixture() @pytest.mark.parametrize("bert_base_squad2", [True, False], indirect=True) def no_answer_inference_result(bert_base_squad2, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) obj_input = [QAInput(doc_text="The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain \"Amazonas\" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.", questions=Question("The Amazon represents less than half of the planets remaining what?", uid="best_id_ever"))] result = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0] return result def test_inference_objs(span_inference_result, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) assert span_inference_result def test_span_performance(span_inference_result, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) best_pred = span_inference_result.prediction[0] assert best_pred.answer == "GameTrailers" best_score_gold = 11.7282 best_score = best_pred.score assert isclose(best_score, best_score_gold, rel_tol=0.0001) no_answer_gap_gold = 12.6491 no_answer_gap = span_inference_result.no_answer_gap assert isclose(no_answer_gap, no_answer_gap_gold, rel_tol=0.0001) def test_no_answer_performance(no_answer_inference_result, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) best_pred = no_answer_inference_result.prediction[0] assert best_pred.answer == "no_answer" best_score_gold = 15.8022 best_score = best_pred.score assert isclose(best_score, best_score_gold, rel_tol=0.0001) no_answer_gap_gold = -15.0159 no_answer_gap = no_answer_inference_result.no_answer_gap assert isclose(no_answer_gap, no_answer_gap_gold, rel_tol=0.0001) def test_qa_pred_attributes(span_inference_result, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) qa_pred = span_inference_result attributes_gold = ['aggregation_level', 'answer_types', 'context', 'context_window_size', 'ground_truth_answer', 'id', 'n_passages', 'no_answer_gap', 'prediction', 'question', 'to_json', 'to_squad_eval', 'token_offsets'] for ag in attributes_gold: assert ag in dir(qa_pred) def test_qa_candidate_attributes(span_inference_result, caplog=None): if caplog: caplog.set_level(logging.CRITICAL) qa_candidate = span_inference_result.prediction[0] attributes_gold = ['add_cls', 'aggregation_level', 'answer', 'answer_support', 'answer_type', 'context_window', 'n_passages_in_doc', 'offset_answer_end', 'offset_answer_start', 'offset_answer_support_end', 'offset_answer_support_start', 'offset_context_window_end', 'offset_context_window_start', 'offset_unit', 'passage_id', 'probability', 'score', 'set_answer_string', 'set_context_window', 'to_doc_level', 'to_list'] for ag in attributes_gold: assert ag in dir(qa_candidate) def test_id(span_inference_result, no_answer_inference_result): assert span_inference_result.id == "best_id_ever" assert no_answer_inference_result.id == "best_id_ever" if(__name__=="__main__"): test_training() test_save_load() test_inference_dicts() test_inference_objs()
the-stack_106_17149
"""Tools for reading a directory of torrent files """ import os import hashlib from BitTornado.Meta.bencode import bencode from BitTornado.Meta.Info import check_info, MetaInfo def _errfunc(msg): print(":: ", msg) def parsedir(directory, parsed, files, blocked, exts=('.torrent',), return_metainfo=False, errfunc=_errfunc): """Parse bencoded files in a directory structure. Parameters str - path of directory {str: {str: *}} - dictionary, keyed by sha hash of encoded info dict, of torrent file metadata {str: [(float, int), str]} - dictionary, keyed by file path, of (mtime, length) pairs and a hash value (corresponds to keys of parsed) {str} - set of previously blocked file paths (str,) - tuple of valid file extensions bool - parsed metadata to include full torrent data f(str) - function to process error messages Returns {str: {str: *}} - dictionary, keyed by sha hash of encoded info dict, of torrent file metadata {str: [(float, int), str]} - dictionary, keyed by file path, of (mtime, length) pairs and parsed hash value (0 if unable to parse) {str} - set of file paths of unparseable or duplicate torrents {str: {str: *}} - dictionary, keyed by sha hash of encoded info dict, of metadata of torrent files added during directory parse {str: {str: *}} - dictionary, keyed by sha hash of encoded info dict, of metadata of torrent files removed during directory parse """ new_files, torrent_type = get_files(directory, exts) # removed_files = (files \ new_files) U changed_files removed_files = {path: files[path] for path in files if path not in new_files or files[path][0] != new_files[path][0]} # Missing files are removed removed = {filehash: parsed[filehash] for _, filehash in removed_files.values()} # unchanged_files = files \ removed_files unchanged_files = {path: files[path] for path in files if path not in removed_files} # Parse new files and those whose mtime or length has change # Block those that are unmodified but unparsed (indicates error) new_blocked = set() to_parse = [] for path in new_files: if path not in unchanged_files: to_parse.append(path) elif unchanged_files[path][1] == 0: new_blocked.add(path) new_files.update(unchanged_files) # Keep old parsed files new_parsed = {infohash: parsed[infohash] for _, infohash in unchanged_files.values()} # Attempt to parse new files added = {} for path in sorted(to_parse): try: torrentinfo, infohash = parse_torrent(path, return_metainfo) torrentinfo['type'] = torrent_type[path] if infohash not in new_parsed: new_parsed[infohash] = torrentinfo added[infohash] = torrentinfo new_files[path][1] = infohash else: # Don't warn if we've blocked before if path not in blocked: errfunc('**warning** {} is a duplicate torrent for {}' ''.format(path, new_parsed[infohash]['path'])) new_blocked.add(path) except (IOError, ValueError): errfunc('**warning** {} has errors'.format(path)) new_blocked.add(path) return (new_parsed, new_files, new_blocked, added, removed) def get_files(directory, exts=('.torrent',)): """Get the shallowest set of files with valid extensions in a directory structure. If no valid files are found in a directory, search all subdirectories. If a valid file is found in a directory, no subdirectories will be searched. Parameters str - path of directory (str,) - tuple of valid file extensions Returns {str: [(float, int), 0]} - dictionary, keyed by file path, of (mtime, length) pairs and an uninitialized hash value {str: str} - dictionary, keyed by file path, of file extension """ files = {} file_type = {} # Ignore '.' files and directories subs = (candidate for candidate in os.listdir(directory) if candidate[0] != '.') # Find valid files subdirs = [] for sub in subs: loc = os.path.join(directory, sub) if os.path.isdir(loc): subdirs.append(loc) continue extmatches = [ext[1:] for ext in exts if sub.endswith(ext)] if extmatches: files[loc] = [(int(os.path.getmtime(loc)), os.path.getsize(loc)), 0] file_type[loc] = extmatches[0] # Recurse if no valid files found if not files and subdirs: for subdir in subdirs: subfiles, subfile_type = get_files(subdir, exts) files.update(subfiles) file_type.update(subfile_type) return files, file_type def parse_torrent(path, return_metainfo=False): """Load and derive metadata from torrent file Parameters str - path of file to parse bool - parsed metadata to include full torrent data Returns {str: *} - torrent file metadata str - sha hash of encoded info dict """ fname = os.path.basename(path) data = MetaInfo.read(path) # Validate and hash info dict info = data['info'] check_info(info) infohash = hashlib.sha1(bencode(info)).digest() single = 'length' in info torrentinfo = { 'path': path, 'file': fname, 'name': info.get('name', fname), 'numfiles': 1 if single else len(info['files']), 'length': info['length'] if single else sum( li['length'] for li in info['files'] if 'length' in li) } for key in ('failure reason', 'warning message', 'announce-list'): if key in data: torrentinfo[key] = data[key] if return_metainfo: torrentinfo['metainfo'] = data return torrentinfo, infohash
the-stack_106_17152
from shapely import geometry from shapely.geometry import shape, Polygon, LineString, Point, MultiLineString, MultiPolygon import matplotlib.pyplot as plt import requests import rasterio from pandas import DataFrame from geopandas import GeoDataFrame import geopandas as gpd import pandas as pd from math import acos, sqrt, cos, sin, degrees, radians, fabs, atan2, fmod, isnan from map2loop import m2l_utils from map2loop import m2l_interpolation import numpy as np import os import random #################################################### # Export orientation data in csv format with heights and strat code added # # save_orientations(structure_code,output_path,c_l,orientation_decimate,dtm) # Args: # structure_code geopandas point layer # output_path directory of outputs from m2lc # c_l dictionary of codes and labels specific to input geo information layers # decimate saves every nth orientation point (without reference to spatial density or structural complexity) # dtm rasterio format georeferenced dtm grid # # Save dip,dip direction of bedding extracted from geology layer with additional height information from dtm and joined with # polygon information from geology polygon layer. Stored as csv file. # Orientation data needs calculated height as file does not provide it, taken from SRTM data already downloaded. # To calculate polarity (WHICH WE DON'T DO YET) we can calculate the dot product of the dip direction of a bedding plane # and the vector to that points nearest basal contact node, if abs(acos(dot product))>90 then right way up. #################################################### def save_orientations(structures,path_out,c_l,orientation_decimate,dtm,dtb,dtb_null,cover_map): i=0 f=open(path_out+'/orientations.csv',"w") f.write("X,Y,Z,azimuth,dip,polarity,formation\n") for indx,apoint in structures.iterrows(): if(not str(apoint[c_l['r1']])=='None'): if(not c_l['intrusive'] in apoint[c_l['r1']]): if(apoint[c_l['d']]!=0 and m2l_utils.mod_safe(i,orientation_decimate)==0): locations=[(apoint['geometry'].x, apoint['geometry'].y)] if(apoint['geometry'].x > dtm.bounds[0] and apoint['geometry'].x < dtm.bounds[2] and apoint['geometry'].y > dtm.bounds[1] and apoint['geometry'].y < dtm.bounds[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) if(c_l['otype']=='strike'): dipdir=apoint[c_l['dd']]+90 else: dipdir=apoint[c_l['dd']] ostr="{},{},{},{},{},{},{}\n"\ .format(apoint['geometry'].x,apoint['geometry'].y,height,dipdir,apoint[c_l['d']], 1,apoint[c_l['c']].replace(" ","_").replace("-","_")) #ostr=str(apoint['geometry'].x)+","+str(apoint['geometry'].y)+","+height+","+str(dipdir)+","+str(apoint[c_l['d']])+",1,"+str(apoint[c_l['c']].replace(" ","_").replace("-","_"))+"\n" f.write(ostr) i=i+1 f.close() print(i,'orientations saved to',path_out+'orientations.csv') #################################################### # Find those series that don't have any orientation or contact point data and add some random data # create_orientations(tmp_path, output_path, dtm,geol_clip,structure_clip,c_l) # Args: # tmp_path directory of temporary outputs # output_path directory of outputs # dtm rasterio format elevation grid # geology geopandas layer of geology polygons # structures geopandas layer of orientation points c_l dictionary of codes and labels specific to input geo information layers # c_l dictionary of codes and labels specific to input geo information layers # # Save additional arbitrary dip/dip direction data for series/groups that don’t have structural information available. # Ignores intrusive polygons. Somewhat superceded by interpolation codes. Could use dip direction normal to basal contact # (if there is one) but don't do this yet. #################################################### def create_orientations( path_in, path_out,dtm,dtb,dtb_null,cover_map,geology,structures,c_l): """Create orientations if there is a series that does not have one.""" #f=open(path_in+'/groups.csv',"r") #contents =f.readlines() #f.close #ngroups=contents[0].split(" ") #ngroups=int(ngroups[1]) contents=np.genfromtxt(path_in+'groups.csv',delimiter=',',dtype='U100') ngroups=len(contents[0])-1 #print(len(contents[0])) groups=[] for i in range (1,int(ngroups)+1): #print(contents[0][i].replace("\n","")) groups.append((contents[0][i].replace("\n",""),0)) #print(ngroups,groups) for i in range (1,ngroups): for indx,apoint in structures.iterrows(): if(str(apoint[c_l['g']])=='None'): agroup=apoint[c_l['c']].replace(" ","_").replace("-","_") else: agroup=apoint[c_l['g']].replace(" ","_").replace("-","_") #print(agroup) if(groups[i][0]==agroup): lgroups=list(groups[i]) lgroups[1]=1 lgroups=tuple(lgroups) groups[i]=lgroups #print("Orientations----------\n",ngroups,groups) for i in range (0,ngroups): for indx,apoly in geology.iterrows(): agroup=apoint[c_l['g']].replace(" ","_").replace("-","_") #print(agroup) if(groups[i][0]==agroup): lgroups=list(groups[i]) lgroups[1]=1 lgroups=tuple(lgroups) groups[i]=lgroups all_codes=[] for ind,ageol in geology.iterrows(): # central polygon all_codes.append(ageol[c_l['c']]) #print("Contacts----------\n",len(set(all_codes)),set(all_codes)) f=open(path_out+'/empty_series_orientations.csv',"w") f.write("X,Y,Z,azimuth,dip,polarity,formation\n") #f.write("X,Y,Z,DipDirection,dip,dippolarity,formation\n") for i in range (0,ngroups): if(groups[i][1]==0): for indx,ageol in geology.iterrows(): if(ageol[c_l['c']].replace("-","_")==groups[i][0] and groups[i][1]==0 and not c_l['intrusive'] in ageol[c_l['r1']] ): apoly=Polygon(ageol['geometry']) apoint=apoly.representative_point() #print(apoint.x,apoint.y) locations=[(apoint.x,apoint.y)] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) if(height==-999): print("point off map",locations) height=0 # needs a better solution! ostr="{},{},{},{},{},{},{}\n"\ .format(apoint.x,apoint.y,height,0,45,1,ageol[c_l['c']].replace(" ","_").replace("-","_")) #ostr=str(apoint.x)+","+str(apoint.y)+","+height+",0,45,1"+","+str(ageol[c_l['c']].replace(" ","_").replace("-","_"))+"\n" f.write(ostr) #plt.title(str(ageol[c_l['c']].replace(" ","_").replace("-","_"))) #plt.scatter(apoint.x,apoint.y,color="red") #plt.plot(*apoly.exterior.xy) #plt.show() break f.close() print('extra orientations saved as',path_out+'/empty_series_orientations.csv') #################################################### # Convert polygons with holes into distinct poygons #modified from https://stackoverflow.com/questions/21824157/how-to-extract-interior-polygon-coordinates-using-shapely # # extract_poly_coords(part,i) # Args: # part shapely format polygon or multipolygon with or without interior holes # i counter for distict interior/exterior polylines # Returns: # exterior_coords exterior coordinates of ploygon interior_coords array of interior hole's interior coordinates # # Shapely multgipolygons can contain interior holes which need to be extracted as distinct contact polylines # for use in map2loop. This code achieves that. #################################################### def extract_poly_coords(geom,i): if geom.type == 'Polygon': exterior_coords = geom.exterior.coords[:] interior_coords = [] for interior in geom.interiors: interior_coords += (i,interior.coords[:]) i=i+1 elif geom.type == 'MultiPolygon': exterior_coords = [] interior_coords = [] for part in geom: epc = extract_poly_coords(part,i) # Recursive call exterior_coords += epc['exterior_coords'] interior_coords += epc['interior_coords'] i=i+1 else: raise ValueError('Unhandled geometry type: ' + repr(geom.type)) return {'exterior_coords': exterior_coords, 'interior_coords': interior_coords} #################################################### # extract stratigraphically lower contacts from geology polygons and save as points # # save_basal_contacts(tmp_path,dtm,geol_clip,contact_decimate,c_l,intrusion_mode) # Args: # tmp_path directory of temporary outputs # dtm rasterio format elevation grid # geol_clip geopandas layer of clipped geology polygons # contact_decimate decimation factor for saving every nth input point on contact polylines # c_l dictionary of codes and labels specific to input geo information layers # intrusion_mode Boolean for saving intrusive contacts or not # Returns: # dictionaries of basal contacts with and without decimation. # # Saves a shapefile of the basal contacts of each stratigraphic unit (but not intrusives). This analysis uses # the relative age of each unit, and includes faulted contacts, that are filtered out by another function. # Orientation data needs calculated height as file does not provide it, taken from SRTM data already downloaded. # Need to reduce number of points whilst retaining useful info (Ranee's job!)' To calculate which are the basal units # contact for a polygon find the polygons which are older than the selected polygon # If there are no older units for a polygon it has no basal contact. We keep every nth node based on the decimate term # (simple count along polyline). gempy seems to need at least two points per surface, so we always take the first two points. #################################################### def save_basal_contacts(path_in,dtm,dtb,dtb_null,cover_map,geol_clip,contact_decimate,c_l,intrusion_mode): #print("decimation: 1 /",contact_decimate) plist=[] i=0 all_geom=m2l_utils.explode(geol_clip) for indx,ageol in all_geom.iterrows(): # central polygon all_coords=extract_poly_coords(ageol.geometry,0) plist+=(i,list(all_coords['exterior_coords']),ageol[c_l['c']],ageol[c_l['ds']],ageol[c_l['g']],ageol[c_l['r1']],ageol[c_l['o']]) i=i+1 for j in range(0,len(all_coords['interior_coords']),2): plist+=(i,list(all_coords['interior_coords'][j+1]),ageol[c_l['c']],ageol[c_l['ds']],ageol[c_l['g']],ageol[c_l['r1']],ageol[c_l['o']]) i=i+1 #dtm = rasterio.open(path_in+'/dtm_rp.tif') ag=open(path_in+'/all_sorts.csv',"r") contents =ag.readlines() ag.close #print("surfaces:",len(contents)) #print("polygons:",len(all_geom)) ulist=[] for i in range(1,len(contents)): #print(contents[i].replace("\n","")) cont_list=contents[i].split(",") ulist.append([i, cont_list[4].replace("\n","")]) #print(ulist) allc=open(path_in+'/all_contacts.csv',"w") allc.write('GROUP_,id,x,y,z,code\n') ac=open(path_in+'/contacts.csv',"w") ac.write("X,Y,Z,formation\n") #print(dtm.bounds) j=0 allpts=0 deci_points=0 ls_dict={} ls_dict_decimate={} id=0 #print(len(plist)) for a_poly in range(0,len(plist),7): ntest1=str(plist[a_poly+5]) ntest2=str(plist[a_poly+3]) if(not ntest1 == 'None' and not ntest2 == 'None' ): if( not c_l['intrusive'] in plist[a_poly+5]): a_polygon=Polygon(plist[a_poly+1]) agp=str(plist[a_poly+4]) if(agp=='None'): agp=plist[a_poly+2].replace(" ","_").replace("-","_") neighbours=[] j+=1 out=[item for item in ulist if plist[a_poly+2].replace(" ","_").replace("-","_") in item] if(len(out)>0): central=out[0][0] #relative age of central polygon for b_poly in range(0,len(plist),7): b_polygon=LineString(plist[b_poly+1]) ntest1=str(plist[b_poly+5]) ntest2=str(plist[b_poly+3]) if(not ntest1 == 'None' and not ntest2 == 'None' ): if(plist[a_poly] != plist[b_poly]): #do not compare with self if (a_polygon.intersects(b_polygon)) : # is a neighbour, but not a sill if( (not c_l['sill'] in plist[b_poly+3] or not c_l['intrusive'] in plist[b_poly+5]) and intrusion_mode==0): #intrusion_mode=0 (sills only excluded) neighbours.append((b_poly)) elif((not c_l['intrusive'] in plist[b_poly+5]) and intrusion_mode==1): #intrusion_mode=1 (all intrusions excluded) neighbours.append((b_poly)) if(len(neighbours) >0): for i in range (0,len(neighbours)): b_polygon=LineString(plist[neighbours[i]+1]) out=[item for item in ulist if plist[neighbours[i]+2].replace(" ","_").replace("-","_") in item] if(len(out)>0): #if(out[0][0] > central and out[0][0] < youngest_older): # neighbour is older than central, and younger than previous candidate if(out[0][0] > central ): # neighbour is older than central if(not a_polygon.is_valid ): a_polygon = a_polygon.buffer(0) if(not b_polygon.is_valid): b_polygon = b_polygon.buffer(0) LineStringC = a_polygon.intersection(b_polygon) if(LineStringC.wkt.split(" ")[0]=='GEOMETRYCOLLECTION' ): #ignore weird intersections for now, worry about them later! #print("debug:GC") continue elif(LineStringC.wkt.split(" ")[0]=='MULTIPOLYGON' or LineStringC.wkt.split(" ")[0]=='POLYGON'): print("debug:MP,P",ageol[c_l['c']]) elif(LineStringC.wkt.split(" ")[0]=='MULTILINESTRING'): k=0 if(str(plist[a_poly+4])=='None'): ls_dict[id] = {"id": id,c_l['c']:plist[a_poly+2].replace(" ","_").replace("-","_"),c_l['g']:plist[a_poly+2].replace(" ","_").replace("-","_"), "geometry": LineStringC} else: ls_dict[id] = {"id": id,c_l['c']:plist[a_poly+2].replace(" ","_").replace("-","_"),c_l['g']:plist[a_poly+4].replace(" ","_").replace("-","_"), "geometry": LineStringC} id=id+1 for lineC in LineStringC: #process all linestrings if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(LineStringC)-1)/2) or k==len(LineStringC)-1): #decimate to reduce number of points, but also take second and third point of a series to keep gempy happy locations=[(lineC.coords[0][0],lineC.coords[0][1])] #doesn't like point right on edge? if(lineC.coords[0][0] > dtm.bounds[0] and lineC.coords[0][0] < dtm.bounds[2] and lineC.coords[0][1] > dtm.bounds[1] and lineC.coords[0][1] < dtm.bounds[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(lineC.coords[0][0],lineC.coords[0][1],height,plist[a_poly+2].replace(" ","_").replace("-","_")) #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+height+","+str(plist[a_poly+2].replace(" ","_").replace("-","_"))+"\n" ac.write(ostr) allc.write(agp+","+str(ageol[c_l['o']])+","+ostr) if(str(plist[a_poly+4])=='None'): ls_dict_decimate[deci_points] = {"id": allpts,c_l['c']:plist[a_poly+2].replace(" ","_").replace("-","_"),c_l['g']:plist[a_poly+2].replace(" ","_").replace("-","_"), "geometry": Point(lineC.coords[0][0],lineC.coords[0][1])} else: ls_dict_decimate[deci_points] = {"id": allpts,c_l['c']:plist[a_poly+2].replace(" ","_").replace("-","_"),c_l['g']:plist[a_poly+4].replace(" ","_").replace("-","_"), "geometry": Point(lineC.coords[0][0],lineC.coords[0][1])} allpts+=1 deci_points=deci_points+1 else: continue #print("debug:edge points") else: locations=[(lineC.coords[0][0]+0.0000001,lineC.coords[0][1])] #doesn't like point right on edge? if(lineC.coords[0][0] > dtm.bounds[0] and lineC.coords[0][0] < dtm.bounds[2] and lineC.coords[0][1] > dtm.bounds[1] and lineC.coords[0][1] < dtm.bounds[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(lineC.coords[0][0],lineC.coords[0][1],height,plist[a_poly+2].replace(" ","_").replace("-","_")) #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+height+","+str(plist[a_poly+2].replace(" ","_").replace("-","_"))+"\n" allc.write(agp+","+str(ageol[c_l['o']])+","+ostr) allpts+=1 k+=1 elif(LineStringC.wkt.split(" ")[0]=='LINESTRING'): # apparently this is not needed k=0 for pt in LineStringC.coords: #process one linestring k+=1 elif(LineStringC.wkt.split(" ")[0]=='POINT'): # apparently this is not needed #print("debug:POINT") k=0 k+=1 else: k=0 k+=1 ac.close() allc.close() print("basal contacts saved allpts=",allpts,"deci_pts=",deci_points) print("saved as",path_in+'all_contacts.csv',"and",path_in+'contacts.csv') return(ls_dict,ls_dict_decimate) ######################################### # Remove all basal contacts that are defined by faults and save to shapefile (no decimation) # # save_basal_no_faults(path_out,path_fault,ls_dict,dist_buffer,c_l,dst_crs) # Args: # path_out directory of output csv file # path_fault path to clipped fault layer # ls_dict dictionary of basal contact points # dist_buffer distance in projection units of buffer around faults to clip # c_l dictionary of codes and labels specific to input geo information layers # dst_crs Coordinate Reference System of destination geotif (any length-based projection) # # Saves out a csv file of decimated basal contacts with height and formation information. ######################################### def save_basal_no_faults(path_out,path_fault,ls_dict,dist_buffer,c_l,dst_crs): faults_clip = gpd.read_file(path_fault) df = DataFrame.from_dict(ls_dict, "index") contacts = GeoDataFrame(df,crs=dst_crs, geometry='geometry') fault_zone = faults_clip.buffer(dist_buffer) #defines buffer around faults where strat nodes will be removed all_fz = fault_zone.unary_union contacts_nofaults = contacts.difference(all_fz) #deletes contact nodes within buffer ls_nf={} cnf_copy=contacts_nofaults.copy() #print(contacts_nofaults.shape) for i in range(0,len(contacts_nofaults)): j=len(contacts_nofaults)-i-1 #print(j) if(cnf_copy.iloc[j].geom_type=="GeometryCollection"):#remove rows with geometry collections (== empty?) cnf_copy.drop([j,j],inplace=True) else: # save to dataframe ls_nf[j]= {"id": j,c_l['c']:df.iloc[j][c_l['c']].replace(" ","_").replace("-","_"),c_l['g']:df.iloc[j][c_l['g']].replace(" ","_").replace("-","_"), "geometry": cnf_copy.iloc[j]} df_nf = DataFrame.from_dict(ls_nf, "index") contacts_nf = GeoDataFrame(df_nf,crs=dst_crs, geometry='geometry') contacts_nf.to_file(driver = 'ESRI Shapefile', filename= path_out) #contacts_nofaults = gpd.read_file('./data/faults_clip.shp') print("basal contacts without faults saved as",path_out) ######################################### # Save basal contacts from shapefile with decimation # # Args: # contacts geopandas object containing basal contact polylines # output_path directory of output csv file # dtm rasterio format elevation grid # contact_decimate decimation factor for saving every nth input point on contact polylines # ######################################### def save_basal_contacts_csv(contacts,output_path,dtm,dtb,dtb_null,cover_map,contact_decimate,c_l): f=open(output_path+'contacts4.csv','w') f.write('X,Y,Z,formation\n') for index,contact in contacts.iterrows(): i=0 lastx,lasty=-1e7,-1e7 first=True if(not str(contact.geometry)=='None'): if contact.geometry.type == 'MultiLineString': for line in contact.geometry: if(line.coords[0][0]==lastx and line.coords[0][1]==lasty): #continuation of line if(m2l_utils.mod_safe(i,contact_decimate)==0 or i==int((len(contact.geometry)-1)/2) or i==len(contact.geometry)-1): locations=[(line.coords[0][0],line.coords[0][1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(line.coords[0][0],line.coords[0][1],height,contact[c_l['c']]) #ostr=str(line.coords[0][0])+','+str(line.coords[0][1])+','+str(height)+','+str(contact[c_l['c']])+'\n' f.write(ostr) else: #new line if(not first): locations=[(lastx,lasty)] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(lastx,lasty,height,contact[c_l['c']]) #ostr=str(lastx)+','+str(lasty)+','+str(height)+','+str(contact[c_l['c']])+'\n' f.write(ostr) locations=[(line.coords[0][0],line.coords[0][1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(line.coords[0][0],line.coords[0][1],height,contact[c_l['c']]) #ostr=str(line.coords[0][0])+','+str(line.coords[0][1])+','+str(height)+','+str(contact[c_l['c']])+'\n' f.write(ostr) first=False i=i+1 lastx=line.coords[1][0] lasty=line.coords[1][1] elif contact.geometry.type == 'LineString': locations=[(contact.geometry.coords[0][0],contact.geometry.coords[0][1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(contact.geometry.coords[0][0],contact.geometry.coords[0][1],height,contact[c_l['c']]) #ostr=str(contact.geometry.coords[0][0])+','+str(contact.geometry.coords[0][1])+','+str(height)+','+str(contact[c_l['c']])+'\n' f.write(ostr) locations=[(contact.geometry.coords[1][0],contact.geometry.coords[1][1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(contact.geometry.coords[1][0],contact.geometry.coords[1][1],height,contact[c_l['c']]) #ostr=str(contact.geometry.coords[1][0])+','+str(contact.geometry.coords[1][1])+','+str(height)+','+str(contact[c_l['c']])+'\n' f.write(ostr) f.close() print('decimated contacts saved as',output_path+'contacts4.csv') ######################################### # Remove faults from decimated basal contacts as save as csv file (superceded by save_basal_contacts_csv) # # save_contacts_with_faults_removed(path_fault,path_out,dist_buffer,ls_dict,ls_dict_decimate,c_l,dst_crs,dtm) # Args: # path_fault path to clipped fault layer # path_out directory of output csv file # dist_buffer distance in projection units of buffer around faults to clip # ls_dict dictionary of basal contact points # ls_dict dictionary of decimated basal contact points # c_l dictionary of codes and labels specific to input geo information layers # dst_crs Coordinate Reference System of destination geotif (any length-based projection) # dtm rasterio format elevation grid # # Saves out csv file of basal contacts after clipping out buffered fault locations. ######################################### def save_contacts_with_faults_removed(path_fault,path_out,dist_buffer,ls_dict,ls_dict_decimate,c_l,dst_crs,dtm,dtb,dtb_null,cover_map): faults_clip = gpd.read_file(path_fault) df = DataFrame.from_dict(ls_dict, "index") contacts = GeoDataFrame(df,crs=dst_crs, geometry='geometry') fault_zone = faults_clip.buffer(dist_buffer) #defines buffer around faults where strat nodes will be removed all_fz = fault_zone.unary_union #display(all_fz) print("undecimated points:",len(ls_dict_decimate)) df_nf = DataFrame.from_dict(ls_dict_decimate, "index") contacts_nf_deci = GeoDataFrame(df_nf,crs=dst_crs, geometry='geometry') #contacts_decimate_nofaults = contacts_nf_deci.difference(all_fz) #deletes contact nodes within buffer contacts_decimate_nofaults = contacts_nf_deci[~contacts_nf_deci.geometry.within(all_fz)] cnf_de_copy=contacts_decimate_nofaults.copy() ac=open(path_out+'/contacts4.csv',"w") ac.write("X,Y,Z,formation\n") i=0 for indx,cdn in contacts_decimate_nofaults.iterrows(): if(not cdn.geometry.geom_type=="GeometryCollection"): #print(cdn.x,cdn.y) locations=[(cdn.geometry.x,cdn.geometry.y)] #doesn't like point right on edge? height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(cdn.geometry.x,cdn.geometry.y,height,cdn[c_l['c']].replace(" ","_").replace("-","_")) #ostr=str(cdn.geometry.x)+","+str(cdn.geometry.y)+","+height+","+str(cdn[c_l['c']].replace(" ","_").replace("-","_"))+"\n" ac.write(ostr) i=i+1 ac.close() print(i,"decimated contact points saved as",path_out+'/contacts4.csv') ######################################### # Save faults as contact info and make vertical (for the moment) # # save_faults(path_faults,path_fault_orientations,dtm,c_l,fault_decimate) # Args: # path_faults path to clipped fault layer # path_fault_orientations directory for outputs # dtm rasterio format elevation grid # c_l dictionary of codes and labels specific to input geo information layers # fault_decimate decimation factor for saving every nth input point on fault polylines # # Saves out csv file of fault locations after decimation. Also saves out nominal orientation data at mid point # of each fault trace with strike parallel to start end point line and arbitrary vertical dip. Also saves out csv list # of faults with their start-finish length that could be used for filtering which faults to include in model. ######################################### def save_faults(path_faults,output_path,dtm,dtb,dtb_null,cover_map,c_l,fault_decimate,fault_min_len,fault_dip): faults_clip=gpd.read_file(path_faults) f=open(output_path+'/faults.csv',"w") f.write("X,Y,Z,formation\n") fo=open(output_path+'/fault_orientations.csv',"w") fo.write("X,Y,Z,DipDirection,dip,DipPolarity,formation\n") #fo.write("X,Y,Z,azimuth,dip,polarity,formation\n") fd=open(output_path+'/fault_dimensions.csv',"w") fd.write("Fault,HorizontalRadius,VerticalRadius,InfluenceDistance\n") #fd.write("Fault_ID,strike,dip_direction,down_dip\n") split=c_l['fdipest_vals'].split(",") #convert text dips to equally spaced angles fault_dip_choices=np.linspace(0,90, len(split)+1) dip_dirs={'north':(0.0,1.0),'northeast':(.707,.707),'east':(1.0,0.0),'southeast':(.707,-.707), 'south':(0.0,-1.0),'southwest':(-.707,-.707),'west':(-1.0,0.0),'northwest':(-.707,.707)} for indx,flt in faults_clip.iterrows(): if(c_l['fault'] in flt[c_l['f']]): fault_name='Fault_'+str(flt[c_l['o']]) #display(flt.geometry.type) if(flt.geometry.type=='LineString'): flt_ls=LineString(flt.geometry) dlsx=flt_ls.coords[0][0]-flt_ls.coords[len(flt_ls.coords)-1][0] dlsy=flt_ls.coords[0][1]-flt_ls.coords[len(flt_ls.coords)-1][1] strike=sqrt((dlsx*dlsx)+(dlsy*dlsy)) if(strike>fault_min_len): i=0 saved=0 for afs in flt_ls.coords: if(m2l_utils.mod_safe(i,fault_decimate)==0 or i==int((len(flt_ls.coords)-1)/2) or i==len(flt_ls.coords)-1): #decimate to reduce number of points, but also take mid and end points of a series to keep some shape if(saved==0): p1x=afs[0] p1y=afs[1] elif(saved==1): p2x=afs[0] p2y=afs[1] elif(saved==2): p3x=afs[0] p3y=afs[1] # avoids narrow angles in fault traces which geomodeller refuses to solve # should really split fault in two at apex, but life is too short if(m2l_utils.tri_angle(p2x,p2y,p1x,p1y,p3x,p3y)<45.0): break elif(saved>2): p1x=p2x p1y=p2y p2x=p3x p2y=p3y p3x=afs[0] p3y=afs[1] # avoids narrow angles in fault traces which geomodeller refuses to solve # should really split fault in two at apex, but life is too short if(m2l_utils.tri_angle(p2x,p2y,p1x,p1y,p3x,p3y)<45.0): break saved=saved+1 locations=[(afs[0],afs[1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) # slightly randomise first and last points to avoid awkward quadruple junctions etc. #if(i==0 or i==len(flt_ls.coords)-1): # ostr=str(afs[0]+np.random.ranf())+","+str(afs[1]+np.random.ranf())+","+str(height)+","+fault_name+"\n" #else: ostr="{},{},{},{}\n"\ .format(afs[0],afs[1],height,fault_name) #ostr=str(afs[0])+","+str(afs[1])+","+str(height)+","+fault_name+"\n" f.write(ostr) i=i+1 if(dlsx==0.0 or dlsy == 0.0): continue lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy)) lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy)) azimuth=degrees(atan2(lsy,-lsx)) % 180 #normal to line segment locations=[(flt_ls.coords[int((len(afs)-1)/2)][0],flt_ls.coords[int((len(afs)-1)/2)][1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) if(flt[c_l['o']]=='-1'): print(flt[c_l['o']],c_l['fdip'],flt[c_l['fdip']],c_l['fdipnull'],c_l['fdipest'], flt[c_l['fdipest']],c_l['fdipest_vals']) if(flt[c_l['fdip']]==c_l['fdipnull']): # null specifc dip defined if(not str(flt[c_l['fdipest']])=='None'): # dip estimate defined i=0 for choice in split: if(flt[c_l['o']]=='-1'): print(choice) if(choice == flt[c_l['fdipest']]): fault_dip=int(fault_dip_choices[i+1]) if(flt[c_l['o']]=='-1'): print('found_dip',fault_dip) i=i+1 else: if(fault_dip == -999): # random flag fault_dip=random.randint(60,90) else: fault_dip=int(flt[c_l['fdip']]) # specific dip defined if(c_l['fdipdir_flag']=='num'): # numeric dip direction defined azimuth=flt[c_l['fdipdir']] elif(not str(flt[c_l['fdipdir']])=='None'): # alpha dip direction defined dotprod=degrees(acos((-lsx*dip_dirs[flt[c_l['fdipdir']]][0])+(lsy*dip_dirs[flt[c_l['fdipdir']]][1]))) if(dotprod>45): fault_dip=-fault_dip ostr="{},{},{},{},{},{},{}\n"\ .format(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][0],flt_ls.coords[int((len(flt_ls.coords)-1)/2)][1], height,azimuth,fault_dip,1,fault_name) #ostr=str(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][0])+","+str(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][1])+","+height+","+str(azimuth)+","+str(fault_dip)+",1,"+fault_name+"\n" fo.write(ostr) strike=strike*1.25 ostr="{},{},{},{}\n"\ .format(fault_name,strike/2,strike,strike/4.0) #ostr=fault_name+","+str(strike/2)+","+str(strike)+","+str(strike/4.0)+"\n" fd.write(ostr) elif(flt.geometry.type=='MultiLineString' or flt.geometry.type=='GeometryCollection' ): #shouldn't happen any more sum_strike=0 first=True for pline in flt.geometry: flt_ls=LineString(pline) dlsx=flt_ls.coords[0][0]-flt_ls.coords[len(flt_ls.coords)-1][0] dlsy=flt_ls.coords[0][1]-flt_ls.coords[len(flt_ls.coords)-1][1] sum_strike=sum_strike+sqrt((dlsx*dlsx)+(dlsy*dlsy)) if(first): firstx=flt_ls.coords[0][0] firsty=flt_ls.coords[0][1] lastx=flt_ls.coords[0][0] lasty=flt_ls.coords[0][1] ostr="{},{},{},{}\n"\ .format(fault_name,sum_strike/2,sum_strike,sum_strike/4.0) #ostr=fault_name+","+str(sum_strike/2)+","+str(sum_strike)+","+str(sum_strike/4.0)+"\n" fd.write(ostr) dlsx=firstx-lastx dlsy=firsty-lasty if(dlsx==0.0 or dlsy == 0.0): continue lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy)) lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy)) azimuth=degrees(atan2(lsy,-lsx)) % 180 #normal to line segment locations=[(flt_ls.coords[int((len(afs)-1)/2)][0],flt_ls.coords[int((len(afs)-1)/2)][1])] # should be mid-fault not mid fault segemnt but probs doesnt matter height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{},{},{},{},\n"\ .format(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][0],flt_ls.coords[int((len(flt_ls.coords)-1)/2)][1],height,azimuth,fault_dip,1,fault_name) #ostr=str(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][0])+","+str(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][1])+","+height+","+str(azimuth)+","+str(fault_dip)+",1,"+fault_name+"\n" fo.write(ostr) for pline in flt.geometry: #display(pline) #display(flt) flt_ls=LineString(pline) dlsx=flt_ls.coords[0][0]-flt_ls.coords[len(flt_ls.coords)-1][0] dlsy=flt_ls.coords[0][1]-flt_ls.coords[len(flt_ls.coords)-1][1] if(dlsx==0.0 or dlsy == 0.0): continue if(sum_strike>fault_min_len): i=0 saved=0 for afs in flt_ls.coords: if(m2l_utils.mod_safe(i,fault_decimate)==0 or i==int((len(flt_ls.coords)-1)/2) or i==len(flt_ls.coords)-1): #decimate to reduce number of points, but also take mid and end points of a series to keep some shape if(saved==0): p1x=afs[0] p1y=afs[1] elif(saved==1): p2x=afs[0] p2y=afs[1] elif(saved==2): p3x=afs[0] p3y=afs[1] # avoids narrow angles in fault traces which geomodeller refuses to solve # should really split fault in two at apex, but life is too short if(m2l_utils.tri_angle(p2x,p2y,p1x,p1y,p3x,p3y)<45.0): break elif(saved>2): p1x=p2x p1y=p2y p2x=p3x p2y=p3y p3x=afs[0] p3y=afs[1] # avoids narrow angles in fault traces which geomodeller refuses to solve # should really split fault in two at apex, but life is too short if(m2l_utils.tri_angle(p2x,p2y,p1x,p1y,p3x,p3y)<45.0): break saved=saved+1 locations=[(afs[0],afs[1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) # slightly randomise first and last points to avoid awkward quadruple junctions etc. #if(i==0 or i==len(flt_ls.coords)-1): # ostr=str(afs[0]+np.random.ranf())+","+str(afs[1]+np.random.ranf())+","+str(height)+","+fault_name+"\n" #else: ostr="{},{},{},{}\n"\ .format(afs[0],afs[1],height,fault_name) #ostr=str(afs[0])+","+str(afs[1])+","+str(height)+","+fault_name+"\n" f.write(ostr) i=i+1 f.close() fo.close() fd.close() print("fault orientations saved as",output_path+'fault_orientations.csv') print("fault positions saved as",output_path+'faults.csv') print("fault dimensions saved as",output_path+'fault_dimensions.csv') ######################################### # Save faults as contact info and make vertical (for the moment) # old code, to be deleted? ######################################### def old_save_faults(path_faults,path_fault_orientations,dtm,dtb,dtb_null,cover_map,c_l,fault_decimate,fault_min_len,fault_dip): faults_clip=gpd.read_file(path_faults) f=open(path_fault_orientations+'/faults.csv',"w") f.write("X,Y,Z,formation\n") fo=open(path_fault_orientations+'/fault_orientations.csv',"w") fo.write("X,Y,Z,DipDirection,dip,DipPolarity,formation\n") #fo.write("X,Y,Z,azimuth,dip,polarity,formation\n") fd=open(path_fault_orientations+'/fault_dimensions.csv',"w") fd.write("Fault,HorizontalRadius,VerticalRadius,InfluenceDistance\n") #fd.write("Fault_ID,strike,dip_direction,down_dip\n") for indx,flt in faults_clip.iterrows(): if(c_l['fault'] in flt[c_l['f']]): fault_name='Fault_'+str(flt[c_l['o']]) flt_ls=LineString(flt.geometry) dlsx=flt_ls.coords[0][0]-flt_ls.coords[len(flt_ls.coords)-1][0] dlsy=flt_ls.coords[0][1]-flt_ls.coords[len(flt_ls.coords)-1][1] strike=sqrt((dlsx*dlsx)+(dlsy*dlsy)) if(strike>fault_min_len): i=0 for afs in flt_ls.coords: if(m2l_utils.mod_safe(i,fault_decimate)==0 or i==int((len(flt_ls.coords)-1)/2) or i==len(flt_ls.coords)-1): #decimate to reduce number of points, but also take mid and end points of a series to keep some shape locations=[(afs[0],afs[1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(afs[0],afs[1],height,fault_name) #ostr=str(afs[0])+","+str(afs[1])+","+str(height)+","+fault_name+"\n" f.write(ostr) i=i+1 if(dlsx==0.0 or dlsy == 0.0): continue lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy)) lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy)) azimuth=degrees(atan2(lsy,-lsx)) % 180 #normal to line segment locations=[(flt_ls.coords[int((len(afs)-1)/2)][0],flt_ls.coords[int((len(afs)-1)/2)][1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{},{},{},{}\n"\ .format(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][0],flt_ls.coords[int((len(flt_ls.coords)-1)/2)][1],height,azimuth,fault_dip,1,fault_name) #ostr=str(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][0])+","+str(flt_ls.coords[int((len(flt_ls.coords)-1)/2)][1])+","+height+","+str(azimuth)+","+str(fault_dip)+",1,"+fault_name+"\n" fo.write(ostr) ostr="{},{},{},{}\n"\ .format(fault_name,strike/2,strike/2,strike/4.0) #ostr=fault_name+","+str(strike/2)+","+str(strike/2)+","+str(strike/4.0)+"\n" fd.write(ostr) f.close() fo.close() fd.close() print("fault orientations saved as",path_fault_orientations+'fault_orientations.csv') print("fault positions saved as",path_fault_orientations+'faults.csv') print("fault dimensions saved as",path_fault_orientations+'fault_dimensions.csv') ######################################## #Save fold axial traces # # save_fold_axial_traces(path_faults,path_fault_orientations,dtm,c_l,fault_decimate) # Args: # path_folds path to clipped fault layer # path_fold_orientations directory for outputs # dtm rasterio format elevation grid # c_l dictionary of codes and labels specific to input geo information layers # fold_decimate decimation factor for saving every nth input point on fold axial trace polylines # # Saves out csv file of fold axial trace locations after decimation. ######################################### def save_fold_axial_traces(path_folds,path_fold_orientations,dtm,dtb,dtb_null,cover_map,c_l,fold_decimate): folds_clip=gpd.read_file(path_folds) fo=open(path_fold_orientations+'/fold_axial_traces.csv',"w") fo.write("X,Y,Z,code,type\n") for indx,fold in folds_clip.iterrows(): fold_name=str(fold[c_l['o']]) if(fold.geometry.type=='MultiLineString'): for mls in fold.geometry: fold_ls=LineString(mls) i=0 for afs in fold_ls.coords: if(c_l['fold'] in fold[c_l['f']]): if(m2l_utils.mod_safe(i,fold_decimate)==0 or i==int((len(fold_ls.coords)-1)/2) or i==len(fold_ls.coords)-1): #decimate to reduce number of points, but also take mid and end points of a series to keep some shape locations=[(afs[0],afs[1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},FA_{},{}\n"\ .format(afs[0],afs[1],height,fold_name,fold[c_l['t']].replace(',','')) #ostr=str(afs[0])+','+str(afs[1])+','+str(height)+','+'FA_'+fold_name+','+fold[c_l['t']].replace(',','')+'\n' fo.write(ostr) i=i+1 else: fold_ls=LineString(fold.geometry) i=0 for afs in fold_ls.coords: if(c_l['fold'] in fold[c_l['f']]): if(m2l_utils.mod_safe(i,fold_decimate)==0 or i==int((len(fold_ls.coords)-1)/2) or i==len(fold_ls.coords)-1): #decimate to reduce number of points, but also take mid and end points of a series to keep some shape locations=[(afs[0],afs[1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},FA_{},{}\n"\ .format(afs[0],afs[1],height,fold_name,fold[c_l['t']].replace(',','')) #ostr=str(afs[0])+','+str(afs[1])+','+str(height)+','+'FA_'+fold_name+','+fold[c_l['t']].replace(',','')+'\n' fo.write(ostr) i=i+1 fo.close() print("fold axial traces saved as",path_fold_orientations+'fold_axial_traces.csv') ######################################### # Create basal contact points with orientation from orientations and basal points # # Args: # contacts geopandas object containing basal contacts # structures geopandas object containing bedding orientations # output_path directory for outputs # dtm rasterio format elevation grid # dist_buffer # c_l dictionary of codes and labels specific to input geo information layers ######################################### def create_basal_contact_orientations(contacts,structures,output_path,dtm,dtb,dtb_null,cover_map,dist_buffer,c_l): f=open(output_path+'projected_dip_contacts2.csv',"w") f.write('X,Y,Z,azimuth,dip,polarity,formation\n') #print("len=",len(contacts)) i=0 for indx,acontact in contacts.iterrows(): #loop through distinct linestrings #display(acontact[1].geometry) thegroup=acontact[c_l['g']].replace("_"," ") #print("thegroup=",thegroup) is_gp=structures[c_l['g']] == thegroup # subset orientations to just those with this group all_structures = structures[is_gp] for ind,astr in all_structures.iterrows(): # loop through valid orientations orig = Point(astr['geometry']) np = acontact.geometry.interpolate(acontact.geometry.project(orig)) if(np.distance(orig)<dist_buffer): for line in acontact.geometry: # loop through line segments for pair in m2l_utils.pairs(list(line.coords)): # loop through line segments segpair=LineString((pair[0],pair[1])) if segpair.distance(np)< 0.0001: # line segment closest to close point ddx=sin(radians(astr[c_l['d']])) ddy=cos(radians(astr[c_l['d']])) dlsx=pair[0][0]-pair[1][0] dlsy=pair[0][1]-pair[1][1] lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy)) lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy)) angle=degrees(acos((ddx*lsx)+(ddy*lsy))) if(fabs(angle-90)<30.0): # dip_dir normal and contact are close enough to parallel locations=[(np.x,np.y)] height= m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ls_ddir=degrees(atan2(lsy,-lsx)) #normal to line segment if (ddx*lsy)+(-ddy*lsx)<0: #dot product tests right quadrant ls_ddir=(ls_ddir-180)%360 ostr="{},{},{},{},{},{},{}\n"\ .format(np.x,np.y,height,ls_ddir,astr[c_l['d']], 1,acontact[c_l['c']].replace(" ","_").replace("-","_")) #ostr=str(np.x)+","+str(np.y)+","+height+","+str(ls_ddir)+","+str(astr[c_l['d']])+",1,"+acontact[c_l['c']].replace(" ","_").replace("-","_")+"\n" f.write(ostr) i=i+1 f.close() print("basal contact orientations saved as",output_path+'projected_dip_contacts2.csv') ######################################### # For each pluton polygon, create dip info based on ideal form with azimuth parallel to local contact # # process_plutons(tmp_path,output_path,geol_clip,local_paths,dtm,pluton_form,pluton_dip,contact_decimate,c_l) # Args: # tmp_path directory of temporary outputs from m2l # output_path directory of outputs from m2lc geol_clip path ot clipped geology layer local_paths Boolean to control if # local on web data is used dtm rasterio format elevation grid # pluton_form fundamental pluton geometry (one of domes, saucers, pendant, batholith) pluton_dip fix dip for all pluton # contacts contact_decimate decimation factor for saving every nth input point on contact polylines # c_l dictionary of codes and labels specific to input geo information layers # # Saves out csv of locations of intrusive contacts and csv of contact orientations. Orientations can take one of four modes # (inward/ outward dipping normal/reverse polarity) and have dip direction normal to local contact and fixed arbitrary dip # For each instruve but not sill polygon, find older neighbours and store decimated contact points. Also store dipping contact # orientations (user defined, just because) with four possible sub-surface configurations: # saucers: +++/ batholiths: +++/_ __ _+++ domes: /‾+++‾ pendants: +++\ _/+++ # # Saves out orientations and contact points ######################################### def process_plutons(tmp_path,output_path,geol_clip,local_paths,dtm,dtb,dtb_null,cover_map,pluton_form,pluton_dip,contact_decimate,c_l): groups=np.genfromtxt(tmp_path+'groups.csv',delimiter=',',dtype='U100') if(len(groups.shape)==1): ngroups=len(groups)-1 orig_ngroups=ngroups gp_ages=np.zeros((1000,3)) gp_names=np.zeros((1000),dtype='U100') for i in range (0,ngroups): gp_ages[i,0]=-1e6 # group max_age gp_ages[i,1]=1e6 # group min_age gp_ages[i,2]=i # group index gp_names[i]=groups[i+1].replace("\n","") else: ngroups=len(groups[0])-1 orig_ngroups=ngroups gp_ages=np.zeros((1000,3)) gp_names=np.zeros((1000),dtype='U100') for i in range (0,ngroups): gp_ages[i,0]=-1e6 # group max_age gp_ages[i,1]=1e6 # group min_age gp_ages[i,2]=i # group index gp_names[i]=groups[0][i+1].replace("\n","") #print(i,gp_names[i]) #print(local_paths) allc=open(output_path+'all_ign_contacts.csv',"w") allc.write('GROUP_,id,x,y,z,code\n') ac=open(output_path+'ign_contacts.csv',"w") ac.write("X,Y,Z,formation\n") ao=open(output_path+'ign_orientations_'+pluton_form+'.csv',"w") ao.write("X,Y,Z,azimuth,dip,polarity,formation\n") #print(output_path+'ign_orientations_'+pluton_form+'.csv') j=0 allpts=0 ls_dict={} ls_dict_decimate={} id=0 for indx,ageol in geol_clip.iterrows(): ades=str(ageol[c_l['ds']]) arck=str(ageol[c_l['r1']]) if(str(ageol[c_l['g']])=='None'): agroup=str(ageol[c_l['c']]) else: agroup=str(ageol[c_l['g']]) for i in range(0,ngroups): if (gp_names[i]==agroup): if(int(ageol[c_l['max']]) > gp_ages[i][0] ): gp_ages[i][0] = ageol[c_l['max']] if(int(ageol[c_l['min']]) < gp_ages[i][1] ): gp_ages[i][1] = ageol[c_l['min']] if(c_l['intrusive'] in arck and c_l['sill'] not in ades): newgp=str(ageol[c_l['c']]) #print(newgp) if(str(ageol[c_l['g']])=='None'): agp=str(ageol[c_l['c']]) else: agp=str(ageol[c_l['g']]) if(not newgp in gp_names): gp_names[ngroups]=newgp gp_ages[ngroups][0]=ageol[c_l['max']] gp_ages[ngroups][1]=ageol[c_l['min']] gp_ages[ngroups][2]=ngroups ngroups=ngroups+1 neighbours=[] j+=1 central_age=ageol[c_l['min']] #absolute age of central polygon central_poly=ageol.geometry for ind,bgeol in geol_clip.iterrows(): #potential neighbouring polygons if(ageol.geometry!=bgeol.geometry): #do not compare with self if (ageol.geometry.intersects(bgeol.geometry)): # is a neighbour neighbours.append([(bgeol[c_l['c']],bgeol[c_l['min']],bgeol[c_l['r1']],bgeol[c_l['ds']],bgeol.geometry)]) #display(neighbours) if(len(neighbours) >0): for i in range (0,len(neighbours)): if((c_l['intrusive'] in neighbours[i][0][2] and c_l['sill'] not in ades) #or ('intrusive' not in neighbours[i][0][2]) and neighbours[i][0][1] > central_age ): # neighbour is older than central or (c_l['intrusive'] not in neighbours[i][0][2]) and neighbours[i][0][1] ): # neighbour is older than central older_polygon=neighbours[i][0][4] if(not central_poly.is_valid ): central_poly = central_poly.buffer(0) if(not older_polygon.is_valid): older_polygon = older_polygon.buffer(0) LineStringC = central_poly.intersection(older_polygon) if(LineStringC.wkt.split(" ")[0]=='MULTIPOLYGON' or LineStringC.wkt.split(" ")[0]=='POLYGON'): #ignore polygon intersections for now, worry about them later! print(ageol[c_l['o']],"debug:",LineStringC.wkt.split(" ")[0]) continue elif(LineStringC.wkt.split(" ")[0]=='MULTILINESTRING' or LineStringC.wkt.split(" ")[0]=='GEOMETRYCOLLECTION'): k=0 ls_dict[id] = {"id": id,c_l['c']:newgp,c_l['g']:newgp, "geometry": LineStringC} id=id+1 for lineC in LineStringC: #process all linestrings if(lineC.wkt.split(" ")[0]=='LINESTRING'): if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(LineStringC)-1)/2) or k==len(LineStringC)-1): #decimate to reduce number of points, but also take second and third point of a series to keep gempy happy locations=[(lineC.coords[0][0],lineC.coords[0][1])] #doesn't like point right on edge? if(lineC.coords[0][0] > dtm.bounds[0] and lineC.coords[0][0] < dtm.bounds[2] and lineC.coords[0][1] > dtm.bounds[1] and lineC.coords[0][1] < dtm.bounds[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(lineC.coords[0][0],lineC.coords[0][1],height,newgp.replace(" ","_").replace("-","_")) #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+height+","+newgp.replace(" ","_").replace("-","_")+"\n" ac.write(ostr) allc.write(agp+","+str(ageol[c_l['o']])+","+ostr) ls_dict_decimate[allpts] = {"id": allpts,c_l['c']:newgp,c_l['g']:newgp, "geometry": Point(lineC.coords[0][0],lineC.coords[0][1])} allpts+=1 else: continue else: if(lineC.coords[0][0] > dtm.bounds[0] and lineC.coords[0][0] < dtm.bounds[2] and lineC.coords[0][1] > dtm.bounds[1] and lineC.coords[0][1] < dtm.bounds[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(lineC.coords[0][0],lineC.coords[0][1],height,newgp.replace(" ","_").replace("-","_")) #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+height+","+newgp.replace(" ","_").replace("-","_")+"\n" #ls_dict_decimate[allpts] = {"id": id,"CODE":ageol['CODE'],"GROUP_":ageol['GROUP_'], "geometry": Point(lineC.coords[0][0],lineC.coords[0][1])} allc.write(agp+","+str(ageol[c_l['o']])+","+ostr) allpts+=1 if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(LineStringC)-1)/2) or k==len(LineStringC)-1): #decimate to reduce number of points, but also take second and third point of a series to keep gempy happy dlsx=lineC.coords[0][0]-lineC.coords[1][0] dlsy=lineC.coords[0][1]-lineC.coords[1][1] lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy)) lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy)) locations=[(lineC.coords[0][0],lineC.coords[0][1])] height= m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) azimuth=(180+degrees(atan2(lsy,-lsx)))%360 #normal to line segment testpx=lineC.coords[0][0]-lsy # pt just a bit in/out from line testpy=lineC.coords[0][0]+lsx if(ageol.geometry.type=='Polygon'): if Polygon(ageol.geometry).contains(Point(testpx, testpy)): azimuth=(azimuth-180)%360 else: if MultiPolygon(ageol.geometry).contains(Point(testpx, testpy)): azimuth=(azimuth-180)%360 if(pluton_form=='saucers'): polarity=1 #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+str(height)+","+str(azimuth)+","+str(pluton_dip)+",1,"+newgp.replace(" ","_").replace("-","_")+"\n" elif(pluton_form=='domes'): polarity=0 azimuth=(azimuth-180)%360 #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+str(height)+","+str(azimuth)+","+str(pluton_dip)+",0,"+newgp.replace(" ","_").replace("-","_")+"\n" elif(pluton_form=='pendant'): polarity=0 #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+str(height)+","+str(azimuth)+","+str(pluton_dip)+",0,"+newgp.replace(" ","_").replace("-","_")+"\n" else: #pluton_form == batholith polarity=1 azimuth=(azimuth-180)%360 #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+str(height)+","+str(azimuth)+","+str(pluton_dip)+",1,"+newgp.replace(" ","_").replace("-","_")+"\n" ostr="{},{},{},{},{},{},{}\n"\ .format(lineC.coords[0][0],lineC.coords[0][1],height,azimuth,pluton_dip,polarity,newgp.replace(" ","_").replace("-","_")) ao.write(ostr) k+=1 elif(LineStringC.wkt.split(" ")[0]=='LINESTRING'): # apparently this is not needed k=0 lineC=LineString(LineStringC) if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(LineStringC)-1)/2) or k==len(LineStringC)-1): #decimate to reduce number of points, but also take second and third point of a series to keep gempy happy locations=[(lineC.coords[0][0],lineC.coords[0][1])] #doesn't like point right on edge? if(lineC.coords[0][0] > dtm.bounds[0] and lineC.coords[0][0] < dtm.bounds[2] and lineC.coords[0][1] > dtm.bounds[1] and lineC.coords[0][1] < dtm.bounds[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(lineC.coords[0][0],lineC.coords[0][1],height,newgp.replace(" ","_").replace("-","_")) #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+height+","+newgp.replace(" ","_").replace("-","_")+"\n" ac.write(ostr) allc.write(agp+","+str(ageol[c_l['o']])+","+ostr) ls_dict_decimate[allpts] = {"id": allpts,c_l['c']:newgp,c_l['g']:newgp, "geometry": Point(lineC.coords[0][0],lineC.coords[0][1])} allpts+=1 else: continue else: if(lineC.coords[0][0] > dtm.bounds[0] and lineC.coords[0][0] < dtm.bounds[2] and lineC.coords[0][1] > dtm.bounds[1] and lineC.coords[0][1] < dtm.bounds[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(lineC.coords[0][0],lineC.coords[0][1],height,newgp.replace(" ","_").replace("-","_")) #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+height+","+newgp.replace(" ","_").replace("-","_")+"\n" #ls_dict_decimate[allpts] = {"id": id,"CODE":ageol['CODE'],"GROUP_":ageol['GROUP_'], "geometry": Point(lineC.coords[0][0],lineC.coords[0][1])} allc.write(agp+","+str(ageol[c_l['o']])+","+ostr) allpts+=1 if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(LineStringC)-1)/2) or k==len(LineStringC)-1): #decimate to reduce number of points, but also take second and third point of a series to keep gempy happy dlsx=lineC.coords[0][0]-lineC.coords[1][0] dlsy=lineC.coords[0][1]-lineC.coords[1][1] lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy)) lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy)) locations=[(lineC.coords[0][0],lineC.coords[0][1])] height= m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) azimuth=(180+degrees(atan2(lsy,-lsx)))%360 #normal to line segment testpx=lineC.coords[0][0]-lsy # pt just a bit in/out from line testpy=lineC.coords[0][0]+lsx if(ageol.geometry.type=='Polygon'): if Polygon(ageol.geometry).contains(Point(testpx, testpy)): azimuth=(azimuth-180)%360 else: if MultiPolygon(ageol.geometry).contains(Point(testpx, testpy)): azimuth=(azimuth-180)%360 if(pluton_form=='saucers'): polarity=1 #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+str(height)+","+str(azimuth)+","+str(pluton_dip)+",1,"+newgp.replace(" ","_").replace("-","_")+"\n" elif(pluton_form=='domes'): polarity=0 azimuth=(azimuth-180)%360 #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+str(height)+","+str(azimuth)+","+str(pluton_dip)+",0,"+newgp.replace(" ","_").replace("-","_")+"\n" elif(pluton_form=='pendant'): polarity=0 #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+str(height)+","+str(azimuth)+","+str(pluton_dip)+",0,"+newgp.replace(" ","_").replace("-","_")+"\n" else: #pluton_form == batholith polarity=1 azimuth=(azimuth-180)%360 #ostr=str(lineC.coords[0][0])+","+str(lineC.coords[0][1])+","+str(height)+","+str(azimuth)+","+str(pluton_dip)+",1,"+newgp.replace(" ","_").replace("-","_")+"\n" ostr="{},{},{},{},{},{},{}\n"\ .format(lineC.coords[0][0],lineC.coords[0][1],height,azimuth,pluton_dip,polarity,newgp.replace(" ","_").replace("-","_")) ao.write(ostr) k+=1 elif(LineStringC.wkt.split(" ")[0]=='POINT'): # apparently this is not needed k=0 #print("debug:POINT") k+=1 else: k=0 #print(LineStringC.wkt.split(" ")[0]) # apparently this is not needed k+=1 ac.close() ao.close() allc.close() an=open(tmp_path+'groups2.csv',"w") for i in range (0,orig_ngroups): print(i,gp_names[i].replace(" ","_").replace("-","_")) an.write(gp_names[i].replace(" ","_").replace("-","_")+'\n') an.close() all_sorts=pd.read_csv(tmp_path+'all_sorts.csv',",") as_2=open(tmp_path+'all_sorts.csv',"r") contents =as_2.readlines() as_2.close all_sorts_file=open(tmp_path+'all_sorts2.csv',"w") all_sorts_file.write('index,group number,index in group,number in group,code,group\n') j=1 if(cover_map): all_sorts_file.write('-1,0,1,1,cover,cover\n') for i in range(1,len(all_sorts)+1): all_sorts_file.write(contents[i]) #don't write out if already there in new groups list# all_sorts_file.close() print('pluton contacts and orientations saved as:') print(output_path+'ign_contacts.csv') print(output_path+'ign_orientations_'+pluton_form+'.csv') ################################### # Remove orientations that don't belong to actual formations in model # # tidy_data(output_path,tmp_path,use_gcode,use_interpolations,pluton_form) # Args: # output_path directory of outputs from m2lc # tmp_path directory of temporary outputs from m2l use_gcode list of groups that will be retained if possible # use_interpolations include extra data from dip/contact interpolation pluton_form fundamental # pluton geometry (one of domes, saucers, pendant, batholith) # # Removes formations that don’t belong to a group, groups with no formations, orientations # without formations, contacts without formations etc so gempy and other packages don’t have a fit. ################################### def tidy_data(output_path,tmp_path,use_group,use_interpolations,use_fat,pluton_form,inputs,workflow): contacts=pd.read_csv(output_path+'contacts4.csv',",") all_orientations=pd.read_csv(output_path+'orientations.csv',",") intrusive_contacts=pd.read_csv(output_path+'ign_contacts.csv',",") all_sorts=pd.read_csv(tmp_path+'all_sorts2.csv',",") if('invented_orientations' in inputs and os.path.exists(output_path+'empty_series_orientations.csv')): invented_orientations=pd.read_csv(output_path+'empty_series_orientations.csv',",") all_orientations=pd.concat([all_orientations,invented_orientations],sort=False) elif('invented_orientations' in inputs and not os.path.exists(output_path+'empty_series_orientations.csv')): print('No invented orientations available for merging.') if('interpolated_orientations' in inputs and os.path.exists(tmp_path+'combo_full.csv')): interpolated_orientations=pd.read_csv(tmp_path+'combo_full.csv',",") all_orientations=pd.concat([all_orientations,interpolated_orientations.iloc[::2, :]],sort=False) elif('interpolated_orientations' in inputs and not os.path.exists(tmp_path+'combo_full.csv')): print('No interpolated orientations available for merging.') if('intrusive_orientations' in inputs and os.path.exists(output_path+'ign_orientations_'+pluton_form+'.csv')): intrusive_orientations=pd.read_csv(output_path+'ign_orientations_'+pluton_form+'.csv',",") all_orientations=pd.concat([all_orientations,intrusive_orientations],sort=False) elif('intrusive_orientations' in inputs and not os.path.exists(output_path+'ign_orientations_'+pluton_form+'.csv')): print('No intrusive orientations available for merging.') if('fat_orientations' in inputs and os.path.exists(output_path+'fold_axial_trace_orientations2.csv')): fat_orientations=pd.read_csv(output_path+'fold_axial_trace_orientations2.csv',",") all_orientations=pd.concat([all_orientations,fat_orientations],sort=False) elif('fat_orientations' in inputs and not os.path.exists(output_path+'fold_axial_trace_orientations2.csv')): print('No fat orientations available for merging.') if('near_fault_orientations' in inputs and os.path.exists(tmp_path+'ex_f_combo_full.csv')): near_fault_orientations=pd.read_csv(tmp_path+'ex_f_combo_full.csv',",") all_orientations=pd.concat([all_orientations,near_fault_orientations],sort=False) elif('near_fault_orientations' in inputs and not os.path.exists(tmp_path+'ex_f_combo_full.csv')): print('No near fault orientations available for merging.') if('cover_orientations' in inputs and os.path.exists(output_path+'cover_orientations.csv')): cover_orientations=pd.read_csv(output_path+'cover_orientations.csv',",") all_orientations=pd.concat([all_orientations,cover_orientations],sort=False) elif('cover_orientations' in inputs and not os.path.exists(output_path+'cover_orientations.csv')): print('No cover orientations available for merging.') if('contact_orientations' in inputs and os.path.exists(output_path+'contact_orientations.csv')): contact_orientations=pd.read_csv(output_path+'contact_orientations.csv',",") all_orientations=pd.concat([all_orientations,contact_orientations],sort=False) elif('contact_orientations' in inputs and not os.path.exists(output_path+'contact_orientations.csv')): print('No contact orientations available for merging.') #display(cover_orientations) #display(all_orientations) all_orientations.reset_index(inplace=True) unique_allsorts_contacts=set(all_sorts['code']) all_sorts.set_index('code', inplace = True) all_contacts=pd.concat([intrusive_contacts,contacts],sort=False) if('cover_contacts' in inputs and os.path.exists(output_path+'cover_grid.csv')): cover_contacts=pd.read_csv(output_path+'cover_grid.csv',",") all_contacts=pd.concat([all_contacts,cover_contacts],sort=False) elif('cover_contacts' in inputs and not os.path.exists(output_path+'cover_grid.csv')): print('No cover grid contacts available for merging.') if('fault_tip_contacts' in inputs): fault_tip_contacts=pd.read_csv(output_path+'fault_tip_contacts.csv',",") all_contacts=pd.concat([all_contacts,fault_tip_contacts],sort=False) elif('fault_tip_contacts' in inputs and not os.path.exists(output_path+'fault_tip_contacts.csv')): print('No fault tip contacts available for merging.') all_contacts.reset_index(inplace=True) all_contacts.to_csv(output_path+'contacts_clean.csv', index = None, header=True) output_path+'contacts_clean.csv' all_groups=set(all_sorts['group']) unique_contacts=set(all_contacts['formation']) # Remove groups that don't have any contact info no_contacts=[] groups=[] for agroup in all_groups: found=False for acontact in all_contacts.iterrows(): if(acontact[1]['formation'] in unique_allsorts_contacts): if(all_sorts.loc[acontact[1]['formation']]['group'] in agroup ): found=True break if(not found): no_contacts.append(agroup) #print('no contacts for the group:',agroup) else: groups.append(agroup) # Update list of all groups that have formations info f=open(tmp_path+'groups2.csv',"r") contents =f.readlines() f.close #ngroups=contents[0].split(" ") #ngroups=int(ngroups[1]) ngroups=len(contents) no_contacts=[] groups=[] for i in range(0,ngroups): #print(i,ngroups,contents[i]) found=False #print('GROUP',agroup) for acontact in all_contacts.iterrows(): if(acontact[1]['formation'] in unique_allsorts_contacts): if(all_sorts.loc[acontact[1]['formation']]['group'] in contents[i] and all_sorts.loc[acontact[1]['formation']]['group'] in use_group): found=True break if(not found): no_contacts.append(contents[i].replace("\n","")) #print('no contacts for the group:',contents[i].replace("\n","")) else: groups.append(contents[i].replace("\n","")) # Make new list of groups #print('groups contents',len(groups),len(contents)) fgp=open(tmp_path+'groups_clean.csv',"w") for i in range(0,len(groups)): fgp.write(groups[i].replace("\n","")+'\n') fgp.close() #print(all_groups,use_group) # Remove orientations with no equivalent formations info for agroup in all_groups: found=False for ano in all_orientations.iterrows(): #print(ano[1]['formation']) #print(all_sorts.loc[ano[1]['formation']]['group']) if(ano[1]['formation'] in unique_allsorts_contacts ): if(all_sorts.loc[ano[1]['formation']]['group'] in agroup and all_sorts.loc[ano[1]['formation']]['group'] in use_group): found=True break if(not found): no_contacts.append(agroup) print('no orientations for the group:',agroup) #print(no_contacts) # Update master list of groups and formations info fas=open(tmp_path+'all_sorts_clean.csv',"w") fas.write('index,group number,index in group,number in group,code,group,uctype\n') for a_sort in all_sorts.iterrows(): if(a_sort[1]['group'] not in no_contacts): ostr="{},{},{},{},{},{},{}\n"\ .format(a_sort[1]['index'],a_sort[1]['group number'],a_sort[1]['index in group'],a_sort[1]['number in group'],a_sort[0],a_sort[1]['group'],'erode') #ostr=str(a_sort[1]['index'])+","+str(a_sort[1]['group number'])+","+str(a_sort[1]['index in group'])+","+str(a_sort[1]['number in group'])+","+a_sort[0]+","+a_sort[1]['group']+",erode\n" fas.write(ostr) fas.close() # Update orientation info fao=open(output_path+'orientations_clean.csv',"w") fao.write('X,Y,Z,azimuth,dip,polarity,formation\n') all_sort_codes=set(all_sorts.index) #display(no_contacts,unique_contacts,all_sorts,all_sort_contacts) for ind,ano in all_orientations.iterrows(): if(ano['formation'] in all_sort_codes): if(all_sorts.loc[ano['formation']]['group'] in no_contacts or not ano['formation'] in unique_contacts or not all_sorts.loc[ano['formation']]['group'] in use_group): #fix here################################ continue #print('dud orientation:',ano[1]['formation']) else: ostr="{},{},{},{},{},{},{}\n"\ .format(ano['X'],ano['Y'],ano['Z'],ano['azimuth'],ano['dip'],ano['polarity'],ano['formation']) #ostr=str(ano['X'])+","+str(ano['Y'])+","+str(ano['Z'])+","+\ # str(ano['azimuth'])+","+str(ano['dip'])+","+str(ano['polarity'])+","+ano['formation']+"\n" fao.write(ostr) fao.close() # Update formation info age_sorted=pd.read_csv(tmp_path+'age_sorted_groups.csv',",") newdx=1 gpdx=1 fas=open(tmp_path+'all_sorts_clean.csv',"w") fas.write('index,group number,index in group,number in group,code,group,uctype\n') if(workflow['cover_map']): fas.write('-1,0,1,1,cover,cover,erode\n') for a_sort in age_sorted.iterrows(): if(a_sort[1]['group_'] not in no_contacts): for old_sort in all_sorts.iterrows(): if(a_sort[1]['group_']== old_sort[1]['group']): ostr="{},{},{},{},{},{},{}\n"\ .format(newdx,gpdx,old_sort[1]['index in group'],old_sort[1]['number in group'],old_sort[0],old_sort[1]['group'],'erode') #ostr=str(newdx)+","+str(gpdx)+","+str(old_sort[1]['index in group'])+","+str(old_sort[1]['number in group'])+","+old_sort[0]+","+old_sort[1]['group']+",erode\n" fas.write(ostr) newdx=newdx+1 gpdx=gpdx+1 fas.close() """ fac=open(output_path+'contacts_clean.csv',"w") fac.write('X,Y,Z,formation\n') for acontact in all_contacts.iterrows(): if(all_sorts.loc[acontact[1]['formation']]['group'] in no_contacts or not all_sorts.loc[acontact[1]['formation']]['group'] in use_group): continue #print('dud contact:',acontact[1]['formation']) else: ostr=str(acontact[1]['X'])+","+str(acontact[1]['Y'])+","+str(acontact[1]['Z'])+","+acontact[1]['formation']+"\n" fac.write(ostr) fac.close() """ #################################################### # calculate distance between two points (duplicate from m2l_utils?? #################################################### def xxxpt_dist(x1,y1,x2,y2): dist=sqrt(pow(x1-x2,2)+pow(y1-y2,2)) return(dist) #################################################### # determine if two bounding boxes overlap (not used currently) #################################################### def bboxes_intersect(bbox1,bbox2): if(bbox1[0]<=bbox2[2] and bbox1[0]>=bbox2[0] and bbox1[1]<=bbox2[3] and bbox1[1]<=bbox2[1]): return(True) elif(bbox1[0]<=bbox2[2] and bbox1[0]>=bbox2[0] and bbox1[3]<=bbox2[3] and bbox1[3]<=bbox2[1]): return(True) elif(bbox1[2]<=bbox2[2] and bbox1[2]>=bbox2[0] and bbox1[1]<=bbox2[3] and bbox1[1]<=bbox2[1]): return(True) elif(bbox1[2]<=bbox2[2] and bbox1[2]>=bbox2[0] and bbox1[3]<=bbox2[3] and bbox1[3]<=bbox2[1]): return(True) elif(bbox2[0]<=bbox1[2] and bbox2[0]>=bbox1[0] and bbox2[3]<=bbox1[3] and bbox2[3]<=bbox1[1]): return(True) else: return(False) #################################### # Calculate local formation thickness estimates # # calc_thickness(tmp_path,output_path,buffer,max_thickness_allowed,c_l) # Args: # tmp_path path to temprorary file storage directory # output_path path to m2l ouptuts directory # buffer distance within which interpolated bedding orientations will be used for averaging # max_thickness_allowed maximum valiud thickness (should be replaced by infinite search where no faults or fold axial traces are crossed # c_l dictionary of codes and labels specific to input geo information layers # # Calculate local formation thickness estimates by finding intersection of normals to basal contacts # with next upper formation in stratigraphy, and using interpolated orientaiton estimates to calculate true thickness #################################### def calc_thickness(tmp_path,output_path,buffer,max_thickness_allowed,c_l): contact_points_file=tmp_path+'raw_contacts.csv' interpolated_combo_file=tmp_path+'combo_full.csv' contact_lines = gpd.read_file(tmp_path+'/basal_contacts.shp') #load basal contacts as geopandas dataframe all_sorts=pd.read_csv(tmp_path+'all_sorts.csv') contacts=pd.read_csv(contact_points_file) orientations=pd.read_csv(interpolated_combo_file) olength=len(orientations) clength=len(contacts) cx=contacts['X'].to_numpy() cy=contacts['Y'].to_numpy() cl=contacts['lsx'].to_numpy(dtype=float) cm=contacts['lsy'].to_numpy(dtype=float) ctextcode=contacts['formation'].to_numpy() ox=orientations['X'].to_numpy() oy=orientations['Y'].to_numpy() dip=orientations['dip'].to_numpy().reshape(olength,1) azimuth=orientations['azimuth'].to_numpy().reshape(olength,1) l = np.zeros(len(ox)) m = np.zeros(len(ox)) n = np.zeros(len(ox)) file=open(output_path+'formation_thicknesses.csv','w') file.write('X,Y,formation,appar_th,thickness,cl,cm,meanl,meanm,meann,p1x,p1y,p2x,p2y,dip\n') dist=m2l_interpolation.distance_matrix(ox,oy,cx,cy) #np.savetxt(tmp_path+'dist.csv',dist,delimiter=',') #display("ppp",cx.shape,cy.shape,ox.shape,oy.shape,dip.shape,azimuth.shape,dist.shape) n_est=0 for k in range(0,clength): #loop through all contact segments a_dist=dist[:,k:k+1] is_close=a_dist<buffer #display("ic",a_dist.shape,is_close.shape,dip.shape) close_dip=dip[is_close] #print("cd",close_dip.shape) #print(close_dip) close_azimuth=azimuth[is_close] n_good=0 for j in range(0,len(close_dip)): #find averaged dips within buffer l[n_good],m[n_good],n[n_good]=m2l_utils.ddd2dircos(float(close_dip[j]),float(close_azimuth[j])+90.0) #print(k,len(close_dip),n_good,l[n_good],m[n_good],n[n_good]) n_good=n_good+1 if(n_good>0): #if we found any candidates lm=np.mean(l[:n_good]) #average direction cosine of points within buffer range mm=np.mean(m[:n_good]) nm=np.mean(n[:n_good]) dip_mean,dipdirection_mean=m2l_utils.dircos2ddd(lm,mm,nm) #print(k,type(cm[k]),type(buffer)) dx1=-cm[k]*buffer dy1=cl[k]*buffer dx2=-dx1 dy2=-dy1 p1=Point((dx1+cx[k],dy1+cy[k])) p2=Point((dx2+cx[k],dy2+cy[k])) ddline=LineString((p1,p2)) orig = Point((cx[k],cy[k])) crossings=np.zeros((1000,5)) g=0 for indx,apair in all_sorts.iterrows(): #loop through all basal contacts if(ctextcode[k]==apair['code']): #if(all_sorts.iloc[g]['group']==all_sorts.iloc[g-1]['group']): is_contacta=contact_lines[c_l['c']] == all_sorts.iloc[g-1]['code'] # subset contacts to just those with 'a' code acontacts = contact_lines[is_contacta] i=0 for ind,acontact in acontacts.iterrows(): #loop through distinct linestrings for upper contact #if(bboxes_intersect(ddline.bounds,acontact[1].geometry.bounds)): if(not str(acontact.geometry)=='None'): if(ddline.intersects(acontact.geometry)): isects=ddline.intersection(acontact.geometry) if(isects.geom_type=="MultiPoint"): for pt in isects: if(pt.distance(orig)<buffer*2): #print(i,",", pt.x, ",",pt.y,",",apair[1]['code'],",",apair[1]['group']) crossings[i,0]=i crossings[i,1]=int(apair['index']) crossings[i,2]=0 crossings[i,3]=pt.x crossings[i,4]=pt.y i=i+1 else: if(isects.distance(orig)<buffer*2): #print(i,",", isects.x,",", isects.y,",",apair[1]['code'],",",apair[1]['group']) crossings[i,0]=i crossings[i,1]=int(apair['index']) crossings[i,2]=0 crossings[i,3]=isects.x crossings[i,4]=isects.y i=i+1 if(i>0): #if we found any intersections with base of next higher unit min_dist=1e8 min_pt=0 for f in range(0,i): #find closest hit this_dist=m2l_utils.ptsdist(crossings[f,3],crossings[f,4],cx[k],cy[k]) if(this_dist<min_dist): min_dist=this_dist min_pt=f if(min_dist<max_thickness_allowed): #if not too far, add to output true_thick=sin(radians(dip_mean))*min_dist ostr="{},{},{},{},{},{},{},{},{},{},{},{}\n"\ .format(cx[k],cy[k],ctextcode[k],min_dist,int(true_thick),cl[k],cm[k],lm,mm,nm,p1.x,p1.y,p2.x,p2.y,dip_mean) #ostr=str(cx[k])+','+str(cy[k])+','+ctextcode[k]+','+str(int(true_thick))+\ # ','+str(cl[k])+','+str(cm[k])+','+str(lm)+','+str(mm)+','+str(nm)+','+\ # str(p1.x)+','+str(p1.y)+','+str(p2.x)+','+str(p2.y)+','+str(dip_mean)+'\n' file.write(ostr) n_est=n_est+1 g=g+1 print(n_est,'thickness estimates saved as',output_path+'formation_thicknesses.csv') def calc_thickness_with_grid(tmp_path,output_path,buffer,max_thickness_allowed,c_l,bbox,dip_grid,dip_dir_grid,x,y,spacing): contact_points_file=tmp_path+'raw_contacts.csv' contact_lines = gpd.read_file(tmp_path+'/basal_contacts.shp') #load basal contacts as geopandas dataframe all_sorts=pd.read_csv(tmp_path+'all_sorts.csv') contacts=pd.read_csv(contact_points_file) clength=len(contacts) cx=contacts['X'].to_numpy() cy=contacts['Y'].to_numpy() cl=contacts['lsx'].to_numpy(dtype=float) cm=contacts['lsy'].to_numpy(dtype=float) ctextcode=contacts['formation'].to_numpy() fth=open(output_path+'formation_thicknesses.csv','w') fth.write('X,Y,formation,appar_th,thickness,cl,cm,p1x,p1y,p2x,p2y,dip\n') #np.savetxt(tmp_path+'dist.csv',dist,delimiter=',') #display("ppp",cx.shape,cy.shape,ox.shape,oy.shape,dip.shape,azimuth.shape,dist.shape) n_est=0 for k in range(0,clength): #loop through all contact segments r=int((cy[k]-bbox[1])/spacing) c=int((cx[k]-bbox[0])/spacing) dip_mean=dip_grid[r,c] dx1=-cm[k]*buffer dy1=cl[k]*buffer dx2=-dx1 dy2=-dy1 p1=Point((dx1+cx[k],dy1+cy[k])) p2=Point((dx2+cx[k],dy2+cy[k])) ddline=LineString((p1,p2)) orig = Point((cx[k],cy[k])) crossings=np.zeros((1000,5)) g=0 for indx,apair in all_sorts.iterrows(): #loop through all basal contacts if(ctextcode[k]==apair['code']): is_contacta=contact_lines[c_l['c']] == all_sorts.iloc[g-1]['code'] # subset contacts to just those with 'a' code acontacts = contact_lines[is_contacta] i=0 for ind,acontact in acontacts.iterrows(): #loop through distinct linestrings for upper contact #if(bboxes_intersect(ddline.bounds,acontact[1].geometry.bounds)): if(not str(acontact.geometry)=='None'): if(ddline.intersects(acontact.geometry)): isects=ddline.intersection(acontact.geometry) if(isects.geom_type=="MultiPoint"): for pt in isects: if(pt.distance(orig)<buffer*2): #print(i,",", pt.x, ",",pt.y,",",apair[1]['code'],",",apair[1]['group']) crossings[i,0]=i crossings[i,1]=int(apair['index']) crossings[i,2]=0 crossings[i,3]=pt.x crossings[i,4]=pt.y i=i+1 else: if(isects.distance(orig)<buffer*2): #print(i,",", isects.x,",", isects.y,",",apair[1]['code'],",",apair[1]['group']) crossings[i,0]=i crossings[i,1]=int(apair['index']) crossings[i,2]=0 crossings[i,3]=isects.x crossings[i,4]=isects.y i=i+1 if(i>0): #if we found any intersections with base of next higher unit min_dist=1e8 min_pt=0 for f in range(0,i): #find closest hit this_dist=m2l_utils.ptsdist(crossings[f,3],crossings[f,4],cx[k],cy[k]) if(this_dist<min_dist): min_dist=this_dist min_pt=f if(min_dist<max_thickness_allowed): #if not too far, add to output true_thick=sin(radians(dip_mean))*min_dist ostr="{},{},{},{},{},{},{},{},{},{},{},{}\n"\ .format(cx[k],cy[k],ctextcode[k],min_dist,int(true_thick),cl[k],cm[k],p1.x,p1.y,p2.x,p2.y,dip_mean) #ostr=str(cx[k])+','+str(cy[k])+','+ctextcode[k]+','+str(int(true_thick))+\ # ','+str(cl[k])+','+str(cm[k])+','+str(lm)+','+str(mm)+','+str(nm)+','+\ # str(p1.x)+','+str(p1.y)+','+str(p2.x)+','+str(p2.y)+','+str(dip_mean)+'\n' fth.write(ostr) n_est=n_est+1 g=g+1 print(n_est,'thickness estimates saved as',output_path+'formation_thicknesses.csv') #################################### # Normalise thickness for each estimate to median for that formation # # normalise_thickness(output_path) # Args: # output_path path to m2l output directory # # Normalises previously calculated formation thickness by dviding by median value for that formation #################################### def normalise_thickness(output_path): thickness=pd.read_csv(output_path+'formation_thicknesses.csv', sep=',') codes=thickness.formation.unique() f=open(output_path+'formation_thicknesses_norm.csv','w') f.write('x,y,formation,app_th,thickness,norm_th\n') fs=open(output_path+'formation_summary_thicknesses.csv','w') fs.write('formation,thickness median,thickness std\n') for code in codes: is_code=thickness.formation.str.contains(code, regex=False) all_thick = thickness[is_code] all_thick2=all_thick[all_thick["thickness"]!=0] print(code,all_thick2.loc[:,"thickness"].median(),all_thick2.loc[:,"thickness"].std()) ostr="{},{},{}\n"\ .format(code,all_thick2.loc[:,"thickness"].median(),all_thick2.loc[:,"thickness"].std()) #ostr=str(code)+","+str(all_thick2.loc[:,"thickness"].median())+","+str(all_thick2.loc[:,"thickness"].std())+"\n" fs.write(ostr) med=all_thick2.loc[:,"thickness"].median() std=all_thick2.loc[:,"thickness"].std() thick=all_thick2.to_numpy() for i in range(len(thick)): if(med>0): ostr="{},{},{},{},{},{}\n"\ .format(thick[i,0],thick[i,1],thick[i,2],thick[i,3],thick[i,4],thick[i,4]/med) #ostr=str(thick[i,0])+","+str(thick[i,1])+","+str(thick[i,2])+","+str(thick[i,3])+","+str(thick[i,3]/med)+"\n" f.write(ostr) f.close() fs.close() #################################################### # Save out near-fold axial trace orientations: # save_fold_axial_traces_orientations(path_folds,output_path,tmp_path,dtm,c_l,dst_crs,fold_decimate,fat_step,close_dip) # Args: # path_faults path to clipped fold axial trace layer # path_fault_orientations directory for outputs # dtm rasterio format elevation grid # c_l dictionary of codes and labels specific to input geo information layers # fault_decimate decimation factor for saving every nth input point on fault polylines ################################################### def save_fold_axial_traces_orientations(path_folds,output_path,tmp_path,dtm,dtb,dtb_null,cover_map,c_l,dst_crs,fold_decimate,fat_step,close_dip,scheme,bbox,spacing,dip_grid,dip_dir_grid): geology = gpd.read_file(tmp_path+'geol_clip.shp') #contacts=np.genfromtxt(tmp_path+'interpolation_contacts_'+scheme+'.csv',delimiter=',',dtype='float') f=open(output_path+'fold_axial_trace_orientations2.csv','w') f.write('X,Y,Z,azimuth,dip,polarity,formation,group\n') folds_clip=gpd.read_file(path_folds,) fo=open(output_path+'fold_axial_traces.csv',"w") fo.write("X,Y,Z,code,type\n") dummy=[] dummy.append(1) for indx,fold in folds_clip.iterrows(): fold_name=str(fold[c_l['o']]) if(fold.geometry.type=='MultiLineString'): for mls in fold.geometry: fold_ls=LineString(mls) i=0 first=True for afs in fold_ls.coords: if(c_l['fold'] in fold[c_l['f']]): # save out current geometry of FAT if(m2l_utils.mod_safe(i,fold_decimate)==0 or i==int((len(fold_ls.coords)-1)/2) or i==len(fold_ls.coords)-1): #decimate to reduce number of points, but also take mid and end points of a series to keep some shape locations=[(afs[0],afs[1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},FA_{},{}\n"\ .format(afs[0],afs[1],height,fold_name,fold[c_l['t']].replace(',','')) #ostr=str(afs[0])+','+str(afs[1])+','+str(height)+','+'FA_'+fold_name+','+fold[c_l['t']].replace(',','')+'\n' fo.write(ostr) # calculate FAT normal offsets if(not first): l,m=m2l_utils.pts2dircos(lastx,lasty,afs[0],afs[1]) midx=lastx+((afs[0]-lastx)/2) midy=lasty+((afs[1]-lasty)/2) midxr=midx+(fat_step*-m) midyr=midy+(fat_step*l) midxl=midx-(fat_step*-m) midyl=midy-(fat_step*l) if(close_dip==-999): r=int((midy-bbox[1])/spacing) c=int((midx-bbox[0])/spacing) dip=dip_grid[r,c] else: dip=close_dip r=int((midy-bbox[1])/spacing) c=int((midx-bbox[0])/spacing) dip_dir=dip_dir_grid[r,c] dip2,dipdir2=m2l_utils.dircos2ddd(-m,l,cos(radians(dip))) if(c_l['syn'] in fold[c_l['t']]): dipdir2=dipdir2+180 lc=sin(radians(dip_dir-90)) mc=cos(radians(dip_dir-90)) dotprod=fabs((l*lc)+(m*mc)) #print(dotprod,midx,midy,minind,contacts[minind,2],l,m,lc,mc) # if FAT is sub-parallel to local interpolated contacts, save out as orientations if(dotprod>0.85): geometry = [Point(midxr,midyr)] gdf = GeoDataFrame(dummy, crs=dst_crs, geometry=geometry) structure_code = gpd.sjoin(gdf, geology, how="left", op="within") if(not str(structure_code.iloc[0][c_l['c']])=='nan'): locations=[(midxr,midyr)] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{},{},{},{},{}\n"\ .format(midxr,midyr,height,dipdir2,int(dip),1,str(structure_code.iloc[0][c_l['c']]).replace(" ","_").replace("-","_"),structure_code.iloc[0][c_l['g']]) #ostr=str(midxr)+','+str(midyr)+','+str(height)+','+str(dipdir)+','+str(int(dip))+',1,'+str(structure_code.iloc[0][c_l['c']]).replace(" ","_").replace("-","_")+','+str(structure_code.iloc[0][c_l['g']])+'\n' f.write(ostr) geometry = [Point(midxl,midyl)] gdf = GeoDataFrame(dummy, crs=dst_crs, geometry=geometry) structure_code = gpd.sjoin(gdf, geology, how="left", op="within") if(not str(structure_code.iloc[0][c_l['c']])=='nan'): locations=[(midxl,midyl)] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{},{},{},{},{}\n"\ .format(midxl,midyl,height,dipdir2+180,int(dip),1,str(structure_code.iloc[0][c_l['c']]).replace(" ","_").replace("-","_"),structure_code.iloc[0][c_l['g']]) #ostr=str(midxl)+','+str(midyl)+','+str(height)+','+str(dipdir+180)+','+str(int(dip))+',1,'+str(structure_code.iloc[0][c_l['c']]).replace(" ","_").replace("-","_")+','+str(structure_code.iloc[0][c_l['g']])+'\n' f.write(ostr) first=False lastx=afs[0] lasty=afs[1] i=i+1 else: fold_ls=LineString(fold.geometry) i=0 first=True for afs in fold_ls.coords: if(c_l['fold'] in fold[c_l['f']]): # save out current geometry of FAT if(m2l_utils.mod_safe(i,fold_decimate)==0 or i==int((len(fold_ls.coords)-1)/2) or i==len(fold_ls.coords)-1): #decimate to reduce number of points, but also take mid and end points of a series to keep some shape locations=[(afs[0],afs[1])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},FA_{},{}\n"\ .format(afs[0],afs[1],height,fold_name,fold[c_l['t']].replace(',','')) #ostr=str(afs[0])+','+str(afs[1])+','+str(height)+','+'FA_'+fold_name+','+fold[c_l['t']].replace(',','')+'\n' fo.write(ostr) # calculate FAT normal offsets if(not first): l,m=m2l_utils.pts2dircos(lastx,lasty,afs[0],afs[1]) midx=lastx+((afs[0]-lastx)/2) midy=lasty+((afs[1]-lasty)/2) midxr=midx+(fat_step*-m) midyr=midy+(fat_step*l) midxl=midx-(fat_step*-m) midyl=midy-(fat_step*l) if(close_dip==-999): r=int((midy-bbox[1])/spacing) c=int((midx-bbox[0])/spacing) dip=dip_grid[r,c] else: dip=close_dip dipdir=dip_dir_grid[r,c] dip2,dipdir2=m2l_utils.dircos2ddd(-m,l,cos(radians(dip))) if(c_l['syn'] in fold[c_l['t']]): dipdir2=dipdir2+180 lc=sin(radians(dipdir-90)) mc=cos(radians(dipdir-90)) dotprod=fabs((l*lc)+(m*mc)) #print(dotprod,midx,midy,minind,contacts[minind,2],l,m,lc,mc) # if FAT is sub-parallel to local interpolated contacts, save out as orientations if(dotprod>0.85): geometry = [Point(midxr,midyr)] gdf = GeoDataFrame(dummy, crs=dst_crs, geometry=geometry) structure_code = gpd.sjoin(gdf, geology, how="left", op="within") if(not str(structure_code.iloc[0][c_l['c']])=='nan'): locations=[(midxr,midyr)] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{},{},{},{},{}\n"\ .format(midxr,midyr,height,dipdir2,int(dip),1,str(structure_code.iloc[0][c_l['c']]).replace(" ","_").replace("-","_"),structure_code.iloc[0][c_l['g']]) #ostr=str(midxr)+','+str(midyr)+','+str(height)+','+str(dipdir)+','+str(int(dip))+',1,'+str(structure_code.iloc[0][c_l['c']]).replace(" ","_").replace("-","_")+','+str(structure_code.iloc[0][c_l['g']])+'\n' f.write(ostr) geometry = [Point(midxl,midyl)] gdf = GeoDataFrame(dummy, crs=dst_crs, geometry=geometry) structure_code = gpd.sjoin(gdf, geology, how="left", op="within") if(not str(structure_code.iloc[0][c_l['c']])=='nan'): locations=[(midxl,midyl)] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{},{},{},{},{}\n"\ .format(midxl,midyl,height,dipdir2+180,int(dip),1,str(structure_code.iloc[0][c_l['c']]).replace(" ","_").replace("-","_"),structure_code.iloc[0][c_l['g']]) #ostr=str(midxl)+','+str(midyl)+','+str(height)+','+str(dipdir+180)+','+str(int(dip))+',1,'+str(structure_code.iloc[0][c_l['c']]).replace(" ","_").replace("-","_")+','+str(structure_code.iloc[0][c_l['g']])+'\n' f.write(ostr) first=False lastx=afs[0] lasty=afs[1] i=i+1 fo.close() f.close() print("fold axial traces saved as",output_path+'fold_axial_traces.csv') print("fold axial trace orientations saved as",output_path+'fold_axial_trace_orientations.csv') #################################################### # Convert XZ section information to XY Model coordinates: # section2model(seismic_line,seismic_bbox,sx,sy) # Args: # seismic_line geopandas object showing surface trace of seismic line # seismic_bbox geopandas object defining TL,TR and BR coordinates of seismic interp # sx,sy XZ coordinates of a posiiton in the section # returns XY coordinates in model space ################################################### def section2model(seismic_line,seismic_bbox,sx,sy): sx1=(sx-seismic_bbox.loc['TL'].geometry.x)/(seismic_bbox.loc['TR'].geometry.x-seismic_bbox.loc['TL'].geometry.x) sy1=(sy-seismic_bbox.loc['TR'].geometry.y) for indx,lines in seismic_line.iterrows(): s_ls=LineString(lines.geometry) full_dist=s_ls.length break for indx,lines in seismic_line.iterrows(): s_ls=LineString(lines.geometry) first=True cdist=0 for seg in s_ls.coords: if(not first): dist=m2l_utils.ptsdist(seg[0],seg[1],lsegx,lsegy) cdist=cdist+dist norm_dist=cdist/full_dist if(sx1>last_norm_dist and sx1<norm_dist): local_norm=((sx1-last_norm_dist)/(norm_dist-last_norm_dist)) mx=lsegx+((seg[0]-lsegx)*local_norm) my=lsegy+((seg[1]-lsegy)*local_norm) return(mx,my) lsegx=seg[0] lsegy=seg[1] last_norm_dist=norm_dist else: first=False lsegx=seg[0] lsegy=seg[1] last_norm_dist=0 return(-999,-999) #################################################### # Extract fault and group stratigraphy information from section: # extract_section(tmp_path,output_path,seismic_line,seismic_bbox,seismic_interp,dtm,surface_cut) # Args: # tmp_path path to tmp directory # output_path path to output directory # seismic_line geopandas object showing surface trace of seismic line # seismic_bbox geopandas object defining TL,TR and BR coordinates of seismic interp # seismic_interp geopandas object containing interreted faults and strat surfaces as polylines # dtm projected dtm grid as rasterio object # surface_cut shallowest level to extract from section (in section metre coordinates) ################################################### def extract_section(tmp_path,output_path,seismic_line,seismic_bbox,seismic_interp,dtm,dtb,dtb_null,cover_map,surface_cut): fault_clip_file=tmp_path+'faults_clip.shp' faults = gpd.read_file(fault_clip_file) #import faults all_sorts=pd.read_csv(tmp_path+'all_sorts2.csv',",") sf=open(output_path+'seismic_faults.csv',"w") sf.write('X,Y,Z,formation\n') sb=open(output_path+'seismic_base.csv',"w") sb.write('X,Y,Z,formation\n') for indx,interps in seismic_interp.iterrows(): i_ls=LineString(interps.geometry) for seg in i_ls.coords: mx,my=section2model(seismic_line,seismic_bbox,seg[0],seg[1]) if( mx != -999 and my != -999): mz=seismic_bbox.loc['BR']['DEPTH']*(seismic_bbox.loc['TR'].geometry.y-seg[1])/(seismic_bbox.loc['TR'].geometry.y-seismic_bbox.loc['BR'].geometry.y) locations=[(mx,my)] height= m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) if(not height==-999 and mz>surface_cut): mz2=-mz+float(height) #print(mx,my,mz,height,mz2) if(str(interps['IDENT'])=='None'): ident='None' else: ident=str(interps['IDENT']) if('Base' in interps['FEATURE']): maxfm=0 maxname='' for indx,formation in all_sorts.iterrows(): if(formation['group'] in interps['IDENT'] and formation['index in group']>maxfm): maxfm=formation['index in group'] maxname=formation['code'] ostr="{},{},{},{}\n"\ .format(mx,my,mz2,maxname) #ostr=str(mx)+','+str(my)+','+str(mz2)+','+maxname+'\n' sb.write(ostr) else: for indx,aflt in faults.iterrows(): if(not str(aflt['NAME'])=='None' and not ident == 'None'): fname=aflt['NAME'].replace(" ","_") if(fname in interps['IDENT'] ): fault_id='Fault_'+str(aflt['OBJECTID']) ostr="{},{},{},{}\n"\ .format(mx,my,mz2,fault_id) #ostr=str(mx)+','+str(my)+','+str(mz2)+','+fault_id+'\n' sf.write(ostr) break sf.close() sb.close() #################################################### # Calculate polarity of bedding: # save_orientations_with_polarity(orientations_path,path_out,c_l,basal_path,all_sorts_path) # Args: # orientations_path path to orientations file # path_out path to output directory # c_l dictionary of codes and labels specific to input geo information layers # basal_path path to basal contacts file # all_sorts_path path to all_sorted_formations file # # -999 means couldn't calculate polarity # 0 means inverted # 1 means normal polarity ################################################### def save_orientations_with_polarity(orientations_path,path_out,c_l,basal_path,all_sorts_path): buffer=10000 contact_lines = gpd.read_file(basal_path) all_sorts=pd.read_csv(all_sorts_path,",") orientations=pd.read_csv(orientations_path,",") codes=all_sorts['code'].unique() all_sorts.set_index('code', inplace = True) f=open(path_out+'orientations_polarity.csv','w') f.write("X,Y,Z,azimuth,dip,polarity,formation\n") for indx,anori in orientations.iterrows(): # loop through orientations l,m,n=m2l_utils.ddd2dircos(float(anori["dip"]),float(anori["azimuth"])+90.0) l2=l/sqrt((l*l)+(m*m)) m2=m/sqrt((l*l)+(m*m)) dx1=0 dy1=0 dx2=m2*buffer dy2=-l2*buffer p1=Point((dx1+float(anori["X"]),dy1+float(anori["Y"]))) p2=Point((dx2+float(anori["X"]),dy2+float(anori["Y"]))) ddline=LineString((p1,p2)) orig = Point((float(anori["X"]),float(anori["Y"]))) close_dist=1e9 close_fm='' close_x=0 close_y=0 for indx2,acontact in contact_lines.iterrows(): #loop through distinct linestrings dipdir +180 if(acontact[c_l['c']] in codes): if(not str(acontact.geometry)=='None'): isects=ddline.intersection(acontact.geometry) if(isects.geom_type=="MultiPoint"): for pt in isects: if(pt.distance(orig)<buffer*2): dist=m2l_utils.ptsdist(float(anori["X"]),float(anori["Y"]),pt.x,pt.y) if(dist<close_dist): close_dist=dist close_fm=acontact[c_l['c']] close_x=pt.x close_y=pt.y sign=1 elif(isects.geom_type=="Point"): if(isects.distance(orig)<buffer*2): dist=m2l_utils.ptsdist(float(anori["X"]),float(anori["Y"]),isects.x,isects.y) if(dist<close_dist): close_dist=dist close_fm=acontact[c_l['c']] close_x=isects.x close_y=isects.y sign=1 dx2=-m2*buffer dy2=l2*buffer p1=Point((dx1+float(anori["X"]),dy1+float(anori["Y"]))) p2=Point((dx2+float(anori["X"]),dy2+float(anori["Y"]))) ddline=LineString((p1,p2)) for indx2,acontact in contact_lines.iterrows(): #loop through distinct linestrings dipdir if(acontact[c_l['c']] in codes): if(not str(acontact.geometry)=='None'): isects=ddline.intersection(acontact.geometry) if(isects.geom_type=="MultiPoint"): for pt in isects: if(pt.distance(orig)<buffer*2): dist=m2l_utils.ptsdist(float(anori["X"]),float(anori["Y"]),pt.x,pt.y) if(dist<close_dist): close_dist=dist close_fm=acontact[c_l['c']] close_x=pt.x close_y=pt.y sign=0 elif(isects.geom_type=="Point"): if(isects.distance(orig)<buffer*2): dist=m2l_utils.ptsdist(float(anori["X"]),float(anori["Y"]),isects.x,isects.y) if(dist<close_dist): close_dist=dist close_fm=acontact[c_l['c']] close_x=isects.x close_y=isects.y sign=0 if(not close_fm ==''): #print(sign,anori["formation"],close_fm,int(all_sorts.loc[anori["formation"]]["index"]),int(all_sorts.loc[close_fm]["index"])) if(sign==1): if(int(all_sorts.loc[anori["formation"]]["index"])<=int(all_sorts.loc[close_fm]["index"]) and close_dist < buffer*2): polarity=1 else: polarity=0 else: if(int(all_sorts.loc[anori["formation"]]["index"])<int(all_sorts.loc[close_fm]["index"]) and close_dist < buffer*2): polarity=0 else: polarity=1 else: #failed to find contact close enough defined by buffer polarity=-999 ostr="{},{},{},{},{},{},{}\n"\ .format(anori['X'],anori['Y'],anori['Z'],anori['azimuth'],anori['dip'],polarity,anori['formation']) #ostr=str(anori['X'])+","+str(anori['Y'])+","+str(anori['Z'])+","+str(anori['azimuth'])+","+str(anori['dip'])+","+str(polarity)+","+str(anori['formation'])+"\n" f.write(ostr) f.close() print('orientations saved to',path_out+'orientations_polarity.csv') #################################################### # Calculate stratigraphic and absolute minimum stratigraphic offset of faults: # fault_strat_offset(path_out,c_l,dst_crs,fm_thick_file, all_sorts_file,fault_file,geol_file,fault_dim_file)# Args: # orientations_path path to orientations file # path_out path to output directory # c_l dictionary of codes and labels specific to input geo information layers # dst_crs Coordinate Reference System of vector files # fm_thick_file path to summary formation thicknesses file # fault_file path to fault shapefile # geol_file path to geology polygon shapefile # fault_dim_file path to fault dimensions file # # Stratigraphic offset is the difference in stratigraphically sorted indices of formations across a fault # Absolute minimum stratigraphic ossfet is based on the calculated formation thicknesses seprarating two units across a fault ################################################### def fault_strat_offset(path_out,c_l,dst_crs,fm_thick_file, all_sorts_file,fault_file,geol_file,fault_dim_file): fm_thick=pd.read_csv(fm_thick_file,",",index_col=False) formations=fm_thick['formation'].unique() all_sorts=pd.read_csv(all_sorts_file,",") codes=all_sorts['code'].unique() faults = gpd.read_file(fault_file) geology = gpd.read_file(geol_file) als_thick=[["index","group number","index in group","number in group","code","group","uctype","thickness median"]] index=0 for ias,als in all_sorts.iterrows(): found=False for ifm,fm in fm_thick.iterrows(): if (als["code"]==fm["formation"]): als_thick+=[[index,als["group number"],als["index in group"],als["number in group"],als["code"],als["group"],"erode",fm["thickness median"]]] index=index+1 found=True break if(not found): als_thick+=[[index,als["group number"],als["index in group"],als["number in group"],als["code"],als["group"],"erode",0]] index=index+1 column_names = als_thick.pop(0) new_als = pd.DataFrame(als_thick, columns=column_names) fm_no=len(new_als) #create and fill array proving mimimum displacement for all possible strat combinations fm_thick_arr=np.zeros((fm_no,fm_no)) for i in range(0,fm_no-1): thick_diff=0 fm_thick_arr[i,0]=0 fm_thick_arr[i,1]=0 for j in range (i+1,fm_no-1): thick_diff=thick_diff+new_als.iloc[j]['thickness median'] fm_thick_arr[i,j+1]=thick_diff np.savetxt(path_out+'fault_strat_offset_array.csv',fm_thick_arr,delimiter=',') new_als.set_index('code', inplace = True) all_long_faults=np.genfromtxt(fault_dim_file,delimiter=',',dtype='U100') fault_names=all_long_faults[1:,:1] f=open(path_out+'fault_strat_offset3.csv','w') f.write('X,Y,id,left_fm,right_fm,min_offset,strat_offset\n') for index,fault in faults.iterrows(): if('Fault_'+str(fault[c_l['o']]) in fault_names): lcoords=[] rcoords=[] index=[] for i in range (0,len(fault.geometry.coords)-1): midx=fault.geometry.coords[i][0]+((fault.geometry.coords[i+1][0]-fault.geometry.coords[i][0])/2.0) midy=fault.geometry.coords[i][1]+((fault.geometry.coords[i+1][1]-fault.geometry.coords[i][1])/2.0) l,m=m2l_utils.pts2dircos(fault.geometry.coords[i][0],fault.geometry.coords[i][1],fault.geometry.coords[i+1][0],fault.geometry.coords[i+1][1]) lcoords.append([(midx+(10*m),midy-(10*l))]) rcoords.append([(midx-(10*m),midy+(10*l))]) index.append([(i)]) lgeom=[Point(xy) for xy in lcoords] rgeom=[Point(xy) for xy in rcoords] lgdf = GeoDataFrame(index, crs=dst_crs, geometry=lgeom) rgdf = GeoDataFrame(index, crs=dst_crs, geometry=rgeom) lcode = gpd.sjoin(lgdf, geology, how="left", op="within") rcode = gpd.sjoin(rgdf, geology, how="left", op="within") for i in range (0,len(fault.geometry.coords)-1): if(not str(lcode.iloc[i][c_l['c']])=='nan' and not str(rcode.iloc[i][c_l['c']])=='nan'): lcode_fm=lcode.iloc[i][c_l['c']].replace(" ","_").replace("-","_").replace("\n","") rcode_fm=rcode.iloc[i][c_l['c']].replace(" ","_").replace("-","_").replace("\n","") if(lcode_fm in codes and rcode_fm in codes and lcode_fm in formations and rcode_fm in formations ): midx=lcode.iloc[i].geometry.x+((rcode.iloc[i].geometry.x-lcode.iloc[i].geometry.x)/2) midy=lcode.iloc[i].geometry.y+((rcode.iloc[i].geometry.y-lcode.iloc[i].geometry.y)/2) fm_l= int(new_als.loc[lcode_fm]["index"]) fm_r= int(new_als.loc[rcode_fm]["index"]) if(fm_l>fm_r): t=fm_l fm_l=fm_r fm_r=t diff=fm_r-fm_l number_string = str(diff) diff = number_string.zfill(3) ostr="{},{},Fault_{},{},{},{},{}\n"\ .format(midx,midy,fault[c_l['o']],lcode_fm,rcode_fm,fm_thick_arr[fm_l,fm_r],diff) #ostr=str(midx)+','+str(midy)+','+str('Fault_'+str(fault[c_l['o']]))+','+str(lcode_fm)+','+str(rcode_fm)+','+str(fm_thick_arr[fm_l,fm_r])+","+str(diff)+'\n' elif(lcode_fm in codes and rcode_fm in codes): midx=lcode.iloc[i].geometry.x+((rcode.iloc[i].geometry.x-lcode.iloc[i].geometry.x)/2) midy=lcode.iloc[i].geometry.y+((rcode.iloc[i].geometry.y-lcode.iloc[i].geometry.y)/2) fm_l= int(new_als.loc[lcode_fm]["index"]) fm_r= int(new_als.loc[rcode_fm]["index"]) if(fm_l>fm_r): t=fm_l fm_l=fm_r fm_r=t diff=fm_r-fm_l number_string = str(diff) diff = number_string.zfill(3) ostr="{},{},Fault_{},{},{},{},{}\n"\ .format(midx,midy,fault[c_l['o']],lcode_fm,rcode_fm,'-1',diff) #ostr=str(midx)+','+str(midy)+','+str('Fault_'+str(fault[c_l['o']]))+','+str(lcode_fm)+','+str(rcode_fm)+','+str('-1')+","+str(diff)+'\n' else: ostr="{},{},Fault_{},{},{},{},{}\n"\ .format(midx,midy,fault[c_l['o']],'','','-1','-1') #ostr=str(midx)+','+str(midy)+','+str('Fault_'+str(fault[c_l['o']]))+','+str('')+','+str('')+','+str('-1')+','+str('-1')+'\n' f.write(ostr) f.close() print('minumim stratigraphic offsets saved as',path_out+'fault_strat_offset3.csv' ) ########################################################## # Extract cover thickness dip and contact info from depth to basement grid # and/or vector cover shapefile ########################################################## def process_cover(output_path,dtm,dtb,dtb_null,cover,cover_map,cover_dip,bbox,dst_crs,spacing,contact_decimate,use_vector,use_grid): if(use_grid and use_vector): #assumes a grid of depth to cover, with a defined null value for no cover, and a vector description of cover limits nx=int((bbox[2]-bbox[0])/spacing) ny=int((bbox[3]-bbox[1])/spacing) x = np.linspace(bbox[0],bbox[2], nx) y = np.linspace(bbox[1],bbox[3], ny) xi, yi = np.meshgrid(x, y) xi, yi = xi.flatten(), yi.flatten() df = pd.DataFrame({'X': xi,'Y': yi}) df['coords'] = list(zip(df['X'], df['Y'])) df['coords'] = df['coords'].apply(Point) cover_pts = gpd.GeoDataFrame(df, geometry='coords') cover_pts.crs = dst_crs actual_cover = gpd.sjoin(cover_pts, cover, how="left", op="within") actual_cover["index_right"] = actual_cover["index_right"].fillna(0) allpts=open(output_path+'/cover_grid.csv',"w") allpts.write('X,Y,Z,formation\n') for indx,pt in actual_cover.iterrows(): if(pt['index_right']>0): locations=[(pt['X'],pt['Y'])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(pt['X'],pt['Y'],height,'cover') #ostr=str(pt['X'])+','+str(pt['Y'])+','+str(height)+',cover\n' allpts.write(ostr) for indx,cpoly in cover.iterrows(): coords=extract_poly_coords(cpoly.geometry,0) ### need toignore points outside bbox and make poly os bbox k=0 for pt in coords['exterior_coords']: if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(coords['exterior_coords'])-1)/2) or k==len(coords['exterior_coords'])-1): #decimate to reduce number of points, but also take second and third point of a series locations=[(pt[0],pt[1])] if(pt[0] > bbox[0] and pt[0] < bbox[2] and pt[1] > bbox[1] and pt[1] < bbox[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,False,locations) ostr="{},{},{},{}\n"\ .format(pt[0],pt[1],height,'cover') #ostr=str(pt[0])+","+str(pt[1])+","+height+",cover\n" allpts.write(ostr) k=k+1 if(len(coords['interior_coords'])>0): for i in range(0,len(coords['interior_coords']),2): for pts in coords['interior_coords'][i+1:i+2]: for pt in pts: if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(coords['interior_coords'])-1)/2) or k==len(coords['interior_coords'])-1): #decimate to reduce number of points, but also take second and third point of a series locations=[(pt[0],pt[1])] if(pt[0] > bbox[0] and pt[0] < bbox[2] and pt[1] > bbox[1] and pt[1] < bbox[3]): height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,False,locations) ostr="{},{},{},{}\n"\ .format(pt[0],pt[1],height,'cover') #ostr=str(pt[0])+","+str(pt[1])+","+height+",cover\n" allpts.write(ostr) k=k+1 allpts.close() print("cover grid saved out as",output_path+'cover_grid.csv') elif(use_grid and not use_vector): #assumes a grid of depth to cover, with a defined null value for no cover, but no vector description of cover limits nx=int((bbox[2]-bbox[0])/spacing) ny=int((bbox[3]-bbox[1])/spacing) x = np.linspace(bbox[0],bbox[2], nx) y = np.linspace(bbox[1],bbox[3], ny) xi, yi = np.meshgrid(x, y) xi, yi = xi.flatten(), yi.flatten() df = pd.DataFrame({'X': xi,'Y': yi}) df['coords'] = list(zip(df['X'], df['Y'])) df['coords'] = df['coords'].apply(Point) cover_pts = gpd.GeoDataFrame(df, geometry='coords') cover_pts.crs = dst_crs allpts=open(output_path+'/cover_grid.csv',"w") allpts.write('X,Y,Z,formation\n') for indx,pt in cover_pts.iterrows(): if(pt['index_right']>0): locations=[(pt['X'],pt['Y'])] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) ostr="{},{},{},{}\n"\ .format(pt['X'],pt['Y'],height,'cover') #ostr=str(pt['X'])+','+str(pt['Y'])+','+str(height)+',cover\n' allpts.write(ostr) allpts.close() print("cover grid saved out as",output_path+'cover_grid.csv') if(use_vector): # assume vector of limits of cover allo=open(output_path+'cover_orientations.csv',"w") allo.write('X,Y,Z,azimuth,dip,polarity,formation\n') for indx,cpoly in cover.iterrows(): coords=extract_poly_coords(cpoly.geometry,0) ### need toignore points outside bbox and make poly os bbox k=0 first=True for pt in coords['exterior_coords']: if(first): lastx=pt[0] lasty=pt[1] first=False if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(coords['exterior_coords'])-1)/2) or k==len(coords['exterior_coords'])-1): #decimate to reduce number of points, but also take second and third point of a series locations=[(pt[0],pt[1])] if(pt[0] > bbox[0] and pt[0] < bbox[2] and pt[1] > bbox[1] and pt[1] < bbox[3]): dlsx=lastx-pt[0] dlsy=lasty-pt[1] lastx=pt[0] lasty=pt[1] if(not dlsx+dlsy==0.0): lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy)) lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy)) locations=[(pt[0],pt[1])] height= m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,False,locations) azimuth=(180+degrees(atan2(lsy,-lsx)))%360 #normal to line segment testpx=pt[0]-lsy # pt just a bit in/out from line testpy=pt[1]+lsx if Polygon(cpoly.geometry).contains(Point(testpx, testpy)): azimuth=(azimuth)%360 else: azimuth=(azimuth-180)%360 ostr="{},{},{},{},{},{},{}\n"\ .format(pt[0],pt[1],height,azimuth,cover_dip,'1','cover') #ostr=str(pt[0])+","+str(pt[1])+","+str(height)+","+str(azimuth)+","+str(cover_dip)+",1,cover\n" allo.write(ostr) k=k+1 first=True if(len(coords['interior_coords'])>0): for i in range(0,len(coords['interior_coords']),2): for pts in coords['interior_coords'][i+1:i+2]: for pt in pts: if(first): lastx=pt[0] lasty=pt[1] first=False if(m2l_utils.mod_safe(k,contact_decimate)==0 or k==int((len(coords['interior_coords'])-1)/2) or k==len(coords['interior_coords'])-1): #decimate to reduce number of points, but also take second and third point of a series locations=[(pt[0],pt[1])] if(pt[0] > bbox[0] and pt[0] < bbox[2] and pt[1] > bbox[1] and pt[1] < bbox[3]): dlsx=lastx-pt[0] dlsy=lasty-pt[1] lastx=pt[0] lasty=pt[1] if(not dlsx+dlsy==0.0): lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy)) lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy)) locations=[(pt[0],pt[1])] height= m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,False,locations) azimuth=(180+degrees(atan2(lsy,-lsx)))%360 #normal to line segment testpx=pt[0]-lsy # pt just a bit in/out from line testpy=pt[1]+lsx if Polygon(cpoly.geometry).contains(Point(testpx, testpy)): azimuth=(azimuth)%360 else: azimuth=(azimuth-180)%360 ostr="{},{},{},{},{},{},{}\n"\ .format(pt[0],pt[1],height,azimuth,cover_dip,'1','cover') #ostr=str(pt[0])+","+str(pt[1])+","+str(height)+","+str(azimuth)+","+str(cover_dip)+",1,cover\n" allo.write(ostr) k=k+1 elif(use_grid and not use_vector): # assumes grid but no vector of limits of cover allo=open(output_path+'cover_orientations.csv',"w") allo.write('X,Y,Z,azimuth,dip,polarity,formation\n') midx=bbox[0]+((bbox[2]-bbox[0])/2) midy=bbox[1]+((bbox[3]-bbox[1])/2) ostr="{},{},{},{},{},{},{}\n"\ .format(midx,midy,'0','0','0','1','cover') #ostr=str(midx)+","+str(midy)+","+str(0)+","+str(0)+","+str(0)+",1,cover\n" allo.write(ostr) allo.close() print("cover orientations saved out as",output_path+'cover_orientations.csv') ########################################################## # Save out dip info along basal contacts, dip defined, dip direction normal to local vector ########################################################## def save_basal_contacts_orientations_csv(contacts,orientations,geol_clip,tmp_path,output_path,dtm,dtb, dtb_null,cover_map,contact_decimate,c_l,contact_dip,dip_grid,spacing,bbox): interpolated_combo_file=tmp_path+'combo_full.csv' #orientations=pd.read_csv(interpolated_combo_file) f=open(output_path+'contact_orientations.csv','w') f.write("X,Y,Z,azimuth,dip,polarity,formation\n") for index,contact in contacts.iterrows(): i=0 #print(contact[c_l['c']]) first=True if(not str(contact.geometry) == 'None'): if contact.geometry.type == 'MultiLineString': for line in contact.geometry: first_in_line=True if(m2l_utils.mod_safe(i,contact_decimate)==0): if(first): lastx=line.coords[0][0] lasty=line.coords[0][1] first=False else: l,m=m2l_utils.pts2dircos(lastx,lasty,line.coords[0][0],line.coords[0][1]) midx=lastx+((line.coords[0][0]-lastx)/2) midy=lasty+((line.coords[0][1]-lasty)/2) lastx=line.coords[0][0] lasty=line.coords[0][1] if(first_in_line): found_code='' for indx,apoly in geol_clip.iterrows(): testpt=Point((midx-m,midy+l)) if(apoly.geometry.contains(testpt)): found_code=apoly[c_l['c']] break if(not found_code==''): polarity=0 else: polarity=1 first_in_line=False dip,dipdir=m2l_utils.dircos2ddd(-m,l,0) if(polarity==1): dipdir=fmod(dipdir+180,360) locations=[(midx,midy)] height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations) if(contact_dip==-999): r=int((midy-bbox[1])/spacing) c=int((midx-bbox[0])/spacing) dip=dip_grid[r,c] else: dip=contact_dip ostr="{},{},{},{},{},{},{}\n"\ .format(midx,midy,height,dipdir,str(dip),'1',str(contact[c_l['c']]).replace(" ","_").replace("-","_")) #ostr=str(midx)+','+str(midy)+','+str(height)+','+str(dipdir)+','+str(contact_dip)+',1,'+str(contact[c_l['c']]).replace(" ","_").replace("-","_")+'\n' f.write(ostr) else: lastx=line.coords[0][0] lasty=line.coords[0][1] i=i+1 f.close()