content
stringlengths
5
1.05M
#!/usr/bin/env python3 """A GraphQL schema for TANGO.""" import os import graphene from tangogql.schema.query import Query from tangogql.schema.subscription import Subscription from tangogql.schema.mutations import DatabaseMutations from tangogql.schema.attribute import ScalarDeviceAttribute from tangogql.schema.attribute import ImageDeviceAttribute from tangogql.schema.attribute import SpectrumDeviceAttribute from tangogql.schema.log import ExcuteCommandUserAction from tangogql.schema.log import SetAttributeValueUserAction from tangogql.schema.log import PutDevicePropertyUserAction from tangogql.schema.log import DeleteDevicePropertyUserAction MODE = bool(os.environ.get('READ_ONLY')) if MODE == True: mutation=None else: mutation=DatabaseMutations tangoschema = graphene.Schema(query=Query, mutation=mutation, subscription=Subscription, types=[ScalarDeviceAttribute, ImageDeviceAttribute, SpectrumDeviceAttribute, ExcuteCommandUserAction, SetAttributeValueUserAction, PutDevicePropertyUserAction, DeleteDevicePropertyUserAction] )
def requestAsyncWaveform(outputFormat, tb, te, proj, net, sta, loc, cha, label, JWT): import requests import json import configparser from halo import Halo # input outputFormat = outputFormat tb = tb te = te proj = proj net = net sta = sta loc = loc cha = cha label = label JWT = JWT # config config = configparser.ConfigParser() CONFIG_FILE = 'fetch.cfg' config.read(CONFIG_FILE) url = config.get('DEFAULT', 'url') headers = {'Content-Type': 'application/json', 'Authorization': 'JWT ' + JWT} asyncWaveformRequestMutation = """mutation { asyncWaveformRequestMutation( outputFormat: "%s", tb: "%s", te: "%s", proj: "%s", net: "%s", sta: "%s", loc: "%s", cha: "%s", label: "%s", ) { success text orderId } }"""%(outputFormat, tb, te, proj, net, sta, loc, cha, label) # print(asyncWaveformRequestMutation) # request with Halo(text='Processing', spinner='dots'): try: r = requests.post(url, json={'query': asyncWaveformRequestMutation}, headers=headers, verify=False) except requests.exceptions.RequestException as e: print('An network error occurred.', e) else: queryResults = json.loads(r.text) success = queryResults["data"]["asyncWaveformRequestMutation"]["success"] if success == False: text = queryResults["data"]["asyncWaveformRequestMutation"]["text"] print(f'\n{text}') res = { 'success': success, 'text': text } return res else: text = queryResults["data"]["asyncWaveformRequestMutation"]["text"] orderId = queryResults["data"]["asyncWaveformRequestMutation"]["orderId"] print(f'\n{text}\norderID: {orderId}') res = { 'success': success, 'text': text, 'orderId': orderId } return res
from .fieldnames import * from pathlib import Path from collections import defaultdict from typing import List class FastCatAnnotation: """asd """ def __init__(self, path: Path, cfg): self.skip_frame_factor = cfg["skip_frame_factor"] self.fast_cat_allowed_labels = cfg["fast_cat_allower_labels"] self.path = path self.flanks = self.read_fast_cat_annotation() self.labels = self.flanks_to_labels() assert path.exists() def read_fastcat_annotation(self, skip_frame_factor: int = None, allowed_label_list: List[str] = None): # Validate Metadata if not skip_frame_factor: skip_frame_factor = self.skip_frame_factor if not allowed_label_list: allowed_label_list = self.fast_cat_allowed_labels for label in self.annotation["metadata"]["classes"]: assert label in allowed_label_list label_list = [_.split(".")[0] for _ in allowed_label_list] label_list = list(set(label_list)) flanks = {_: {"start": [], "stop": []} for _ in label_list} for superframe in self.annotation["superframes"]: for frame_annotation in superframe["childFrames"]: for label in frame_annotation["classes"]: label, param = label.split(".") flanks[label][param].append(frame_annotation["frameNumber"]) for key, value in flanks.items(): assert len(value["start"]) is len(value["stop"]) flanks[key]["start"].sort() flanks[key]["stop"].sort() _matched = [] for i, _start in enumerate(flanks[key]["start"]): _stop = flanks[key]["stop"][i] assert _stop >= _start _matched.append((_start, _stop)) flanks[key] = _matched for key, value in flanks.items(): for i, _flank in enumerate(value): if i == 0: continue assert _flank[0] > value[i - 1][1] flanks["skip_frame_factor"] = skip_frame_factor return flanks def flanks_to_labels(self, flanks_dict: dict = None): if not flanks_dict: flanks_dict = self.flanks frame_labels = defaultdict(dict) skip_frame_factor = flanks_dict["skip_frame_factor"] for label, _flanks in flanks_dict.items(): if label == "skip_frame_factor": continue for _flank in _flanks: for n_frame in range(_flank[0], _flank[1]+1): frame_labels[n_frame*skip_frame_factor][label] = True return frame_labels
import os from subprocess import Popen, PIPE, STDOUT import logging import mlflow import mlflow.version from mlflow.utils.file_utils import TempDir, _copy_project from mlflow.utils.logging_utils import eprint _logger = logging.getLogger(__name__) DISABLE_ENV_CREATION = "MLFLOW_DISABLE_ENV_CREATION" _DOCKERFILE_TEMPLATE = """ # Build an image that can serve mlflow models. FROM ubuntu:18.04 RUN apt-get -y update && apt-get install -y --no-install-recommends \ wget \ curl \ nginx \ ca-certificates \ bzip2 \ build-essential \ cmake \ openjdk-8-jdk \ git-core \ maven \ && rm -rf /var/lib/apt/lists/* # Download and setup miniconda RUN curl https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh >> miniconda.sh RUN bash ./miniconda.sh -b -p /miniconda; rm ./miniconda.sh; ENV PATH="/miniconda/bin:$PATH" ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 ENV GUNICORN_CMD_ARGS="--timeout 60 -k gevent" # Set up the program in the image WORKDIR /opt/mlflow {install_mlflow} {custom_setup_steps} {entrypoint} """ def _get_mlflow_install_step(dockerfile_context_dir, mlflow_home): """ Get docker build commands for installing MLflow given a Docker context dir and optional source directory """ if mlflow_home: mlflow_dir = _copy_project( src_path=mlflow_home, dst_path=dockerfile_context_dir) return ( "COPY {mlflow_dir} /opt/mlflow\n" "RUN pip install /opt/mlflow\n" "RUN cd /opt/mlflow/mlflow/java/scoring && " "mvn --batch-mode package -DskipTests && " "mkdir -p /opt/java/jars && " "mv /opt/mlflow/mlflow/java/scoring/target/" "mlflow-scoring-*-with-dependencies.jar /opt/java/jars\n" ).format(mlflow_dir=mlflow_dir) else: return ( "RUN pip install mlflow=={version}\n" "RUN mvn " " --batch-mode dependency:copy" " -Dartifact=org.mlflow:mlflow-scoring:{version}:pom" " -DoutputDirectory=/opt/java\n" "RUN mvn " " --batch-mode dependency:copy" " -Dartifact=org.mlflow:mlflow-scoring:{version}:jar" " -DoutputDirectory=/opt/java/jars\n" "RUN cp /opt/java/mlflow-scoring-{version}.pom /opt/java/pom.xml\n" "RUN cd /opt/java && mvn " "--batch-mode dependency:copy-dependencies -DoutputDirectory=/opt/java/jars\n" ).format(version=mlflow.version.VERSION) def _build_image(image_name, entrypoint, mlflow_home=None, custom_setup_steps_hook=None): """ Build an MLflow Docker image that can be used to serve a The image is built locally and it requires Docker to run. :param image_name: Docker image name. :param entry_point: String containing ENTRYPOINT directive for docker image :param mlflow_home: (Optional) Path to a local copy of the MLflow GitHub repository. If specified, the image will install MLflow from this directory. If None, it will install MLflow from pip. :param custom_setup_steps_hook: (Optional) Single-argument function that takes the string path of a dockerfile context directory and returns a string containing Dockerfile commands to run during the image build step. """ mlflow_home = os.path.abspath(mlflow_home) if mlflow_home else None with TempDir() as tmp: cwd = tmp.path() install_mlflow = _get_mlflow_install_step(cwd, mlflow_home) custom_setup_steps = custom_setup_steps_hook(cwd) if custom_setup_steps_hook else "" with open(os.path.join(cwd, "Dockerfile"), "w") as f: f.write(_DOCKERFILE_TEMPLATE.format( install_mlflow=install_mlflow, custom_setup_steps=custom_setup_steps, entrypoint=entrypoint)) _logger.info("Building docker image with name %s", image_name) os.system('find {cwd}/'.format(cwd=cwd)) proc = Popen(["docker", "build", "-t", image_name, "-f", "Dockerfile", "."], cwd=cwd, stdout=PIPE, stderr=STDOUT, universal_newlines=True) for x in iter(proc.stdout.readline, ""): eprint(x, end='')
# encoding: utf-8 from setuptools import setup setup(name='platalea', version='0.1', description='Understanding visually grounded spoken language via multi-tasking', url='https://github.com/gchrupala/platalea', author='Grzegorz Chrupała', author_email='[email protected]', license='Apache', zip_safe=False, install_requires=[ 'torch>=1.2.0', 'torchvision>=0.4.0', 'numpy>=1.17.2', 'scipy>=1.3.1', 'configargparse>=1.0' ])
import argparse import json import sys import requests from azureml.core.authentication import ( InteractiveLoginAuthentication, MsiAuthentication, ServicePrincipalAuthentication, ) from environs import Env print("Triggering pipeline...") # --- helper class definitions # parses a dictionary argument class ParseDict(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): d = {} if values: for item in values: split_items = item.split("=", 1) key = split_items[0].strip() value = split_items[1] d[key] = value setattr(namespace, self.dest, d) # --- initialization print("Initialization...") parser = argparse.ArgumentParser(allow_abbrev=False) parser.add_argument("--endpoint", type=str, required=True, help="pipeline endpoint") parser.add_argument( "--parameter-assignments", metavar="KEY=VALUE", nargs="+", action=ParseDict, ) parser.add_argument( "--auth-type", choices=["interactive", "service_principal", "msi"], required=True, help="Specifies the method to authenticate against AzureML Services.", ) parser.add_argument( "--tenant-id", type=str, required=False, help="AAD tenant ID, only needed if service principal auth is used", ) parser.add_argument( "--service-principal-id", type=str, required=False, help="ID of the service principal triggering, only needed if service principal auth is used", ) parser.add_argument( "--service-principal-secret", type=str, required=False, help="Service principal secret, only needed if service principal auth is used. In production scenarios, " "you should NOT use this option and pass the secret through an environment variable.", ) args = parser.parse_args() endpoint = args.endpoint parameter_assignments = args.parameter_assignments auth_type = args.auth_type # --- send a request to trigger the pipeline print("Sending request...") if parameter_assignments: print(f"Parameter Assignments:\n{json.dumps(parameter_assignments, indent=2)}") auth = None if auth_type == "interactive": auth = InteractiveLoginAuthentication() if auth_type == "service_principal": env = Env(expand_vars=True) env.read_env("foundation.env") env.read_env("service-principals.env") tenant_id = args.tenant_id if args.tenant_id else env("TRIGGER_AML_PIPELINE_SP_TENANT_ID") service_principal_id = ( args.service_principal_id if args.service_principal_id else env("TRIGGER_AML_PIPELINE_SP_APP_ID") ) service_principal_secret = ( args.service_principal_secret if args.service_principal_secret else env("TRIGGER_AML_PIPELINE_SP_SECRET") ) auth = ServicePrincipalAuthentication( tenant_id=tenant_id, service_principal_id=service_principal_id, service_principal_password=service_principal_secret, ) if auth_type == "msi": auth = MsiAuthentication() aad_token = auth.get_authentication_header() trigger_pipeline_response = requests.post(endpoint, headers=aad_token, json={"ParameterAssignments": {}},) print(f"\nResponse Status Code:\n{trigger_pipeline_response.status_code}") print(f"\nResponse Body:\n{json.dumps(trigger_pipeline_response.json(), indent=2)}") print(f"\nPipeline triggered successfully.") # --- done print("Done.") # --- exit with appropriate exit code is_error = trigger_pipeline_response.status_code != 200 if is_error: sys.exit(1) else: sys.exit(0)
import json from datetime import datetime from django.http import HttpResponse from rest_framework import status from rest_framework.exceptions import ValidationError from rest_framework.response import Response from rest_framework.views import APIView from ..serializers import ImportSerializer from ..services import export_schemas, import_schemas JSON_MIME = 'application/json' JSON_NAME = 'attachment; filename="schemas-{}.json"' class ExportView(APIView): def get(self, request): params = self.request.query_params now = datetime.now().strftime('%Y%m%d-%H%M') response = HttpResponse(content_type=JSON_MIME) response['Content-Disposition'] = JSON_NAME.format(now) data = export_schemas() json.dump(data, response) return response class ImportView(APIView): def post(self, request): serializer = ImportSerializer( data=request.data, context={'request': request} ) serializer.is_valid(raise_exception=True) file = serializer.validated_data['file'] try: data = json.load(file) except Exception as error: raise ValidationError(error) else: errors = import_schemas(data) if errors: raise ValidationError(errors) return Response(status=status.HTTP_201_CREATED)
''' Author: Guanghan Ning E-mail: [email protected] October 22th, 2018 Load keypoints from existing openSVAI data format and turn these keypoints into Graph structure for GCN N: # of batch_size M: # of instances within a frame (which is # of human candidates) V: # of graph nodes (which is 15) ''' import numpy as np import sys, os sys.path.append(os.path.abspath("../../")) from utils_json import * from utils_io_folder import * def load_data_for_gcn(dataset_str, dataset_split_str): if dataset_str == "posetrack_18": if dataset_split_str == "train": json_folder_path = "/export/guanghan/Data_2018/posetrack_data/gcn_openSVAI/train" elif dataset_split_str == "val": json_folder_path = "/export/guanghan/Data_2018/posetrack_data/gcn_openSVAI/val" elif dataset_split_str == "test": json_folder_path = "/export/guanghan/Data_2018/posetrack_data/gcn_openSVAI/val" json_file_paths = get_immediate_childfile_paths(json_folder_path) graph_triplet_list_all = [] for json_file_path in json_file_paths: graph_triplet_list = load_graph_triplets_from_json(json_file_path) graph_triplet_list_all.extend(graph_triplet_list) return graph_triplet_list_all def load_graph_triplets_from_json(json_file_path): python_data = read_json_from_file(json_file_path) num_imgs = len(python_data) track_id_dict = {} for track_id in range(100): track_id_dict[track_id] = [] img_id_dict = {} for img_id in range(1000): img_id_dict[img_id] = [] max_track_id = -1 for img_id in range(num_imgs): image_id = python_data[img_id]["image"]["id"] candidates = python_data[img_id]["candidates"] num_candidates = len(candidates) for candidate_id in range(num_candidates): candidate = candidates[candidate_id] track_id = candidate["track_id"] keypoints = candidate["pose_keypoints_2d"] bbox = candidate["det_bbox"] if track_id > max_track_id: max_track_id = track_id candidate_dict = {"track_id": track_id, "img_id": image_id, "bbox": bbox, "keypoints":keypoints} track_id_dict[track_id].append(candidate_dict) img_id_dict[img_id].append(candidate_dict) graph_triplet_list_all = get_graph_triplet(track_id_dict, img_id_dict, max_track_id) return graph_triplet_list_all def get_graph_triplet(track_id_dict, img_id_dict, max_track_id): graph_triplet_list = [] for track_id in range(max_track_id): candidate_dict_list = track_id_dict[track_id] candidate_dict_list_sorted = sorted(candidate_dict_list, key=lambda k:k['img_id']) num_dicts = len(candidate_dict_list_sorted) for dict_id in range(num_dicts - 1): candidate_dict_curr = candidate_dict_list_sorted[dict_id] candidate_dict_next = candidate_dict_list_sorted[dict_id + 1] if candidate_dict_next["img_id"] - candidate_dict_curr["img_id"] >= 3: continue #print("current_dict_imgid: {}, next_dict_imgid: {}".format(candidate_dict_curr["img_id"], candidate_dict_next["img_id"])) keypoints_curr = candidate_dict_curr["keypoints"] keypoints_next = candidate_dict_next["keypoints"] bbox_curr = candidate_dict_curr["bbox"] bbox_next = candidate_dict_next["bbox"] if validate_bbox(bbox_curr) is False: continue if validate_bbox(bbox_next) is False: continue graph_anchor, flag_pass_check = keypoints_to_graph(keypoints_curr, bbox_curr) if flag_pass_check is False: continue graph_positive, flag_pass_check = keypoints_to_graph(keypoints_next, bbox_next) if flag_pass_check is False: continue img_id = candidate_dict_curr["img_id"] candidate_dict_curr_neg = img_id_dict[img_id] for candidate in candidate_dict_curr_neg: if candidate["track_id"] != track_id: keypoints_curr_neg = candidate["keypoints"] bbox_curr_neg = candidate["bbox"] if validate_bbox(bbox_curr_neg) is False: continue graph_negative, flag_pass_check = keypoints_to_graph(keypoints_curr_neg, bbox_curr_neg) if flag_pass_check is False: continue graph_triplet = (graph_anchor, graph_positive, graph_negative) # use tuple over list graph_triplet_list.append(graph_triplet) return graph_triplet_list def validate_bbox(bbox): x0, y0, w, h = bbox if w <= 100 or h <= 100: return False else: return True def keypoints_to_graph(keypoints, bbox): num_elements = len(keypoints) num_keypoints = num_elements/3 assert(num_keypoints == 15) x0, y0, w, h = bbox flag_pass_check = True graph = 15*[(0, 0)] for id in range(15): ''' normalize to [0, 1] ''' x = (keypoints[3*id] - x0)*1.0 / w y = (keypoints[3*id+1] - y0)*1.0 / h if not (x >= 0 and x <= 1): flag_pass_check = False if not (y >= 0 and y <= 1): flag_pass_check = False return graph, flag_pass_check python_data_example = { "version": "1.0", "image": [ { "folder": "images/bonn_5sec/000342_mpii", "name": "00000001.jpg", "id" : 0, } ], "candidates":[ { "det_category" : 1, "det_bbox" : [300,300,100,100], "det_score" : [0.9], "pose_order" : [1,2,3], "pose_keypoints_2d" : [10,10,0.9, 20,20,0.9, 30,30,0.8], "track_id": [0], "track_score": [0.8], }, { "det_category" : 2, "det_bbox" : [300,300,100,100], "det_score" : [0.1], "pose_order" : [1,2,3], "pose_keypoints_2d" : [10,10,0.9, 20,20,0.9, 30,30,0.8], "track_id": [1], "track_score": [0.6], } ] }
# 记录生肖,根据年份来判断生肖 chinese_zodiac = '猴鸡狗猪鼠牛虎兔龙蛇马羊' # print(chinese_zodiac[0:4]) # print(chinese_zodiac[-1]) year = 2018 print(year % 12) print(chinese_zodiac[year % 12]) print('狗' in chinese_zodiac) print(chinese_zodiac + 'abcd') print(chinese_zodiac * 3) # 练习三列表的基本操作 # 定义一个含有5个数字的列表 # 为列表增加一个元素100 # 使用remove()删除一个元素后观察列表的变化 # 使用切片操作分别取出列表的前三个元素,取出列表的最后一个元素 list1 = [1, 2, 3, 4, 5] list1.append(100) print(list1) list1.remove(2) print(list1) print(list1[0:3]) print(list1[-1]) # 练习四元组的基本操作 # 定义一个任意元组,对元组使用append()查看错误信息 # 访问元组中的倒数第二个元素 # 定义一个新的元组,和1的元组连接成一个新的元组 # 计算元组元素个数 tuple1 = ("aa", "bb", "cc") # tuple1.append("dd") print(tuple1[-2]) tuple2 = ("dd", "ee", "ff") tuple3 = tuple1 + tuple2 print(tuple3) print(len(tuple3)) print(tuple3.__len__())
#============================================================================ # This is sample Script for diagram #============================================================================ from sysmpy import * root_process = Process("Root Process") p1, p2 = root_process.Condition("한글 컨디션 입니다. 이것은 테스트 ", "P1", "P2") p_act1 = p1.Action("Action1") p3, p4 = p1.Condition("Condition 2", "P3", "P4") p2_act1 = p2.Action("신규액션1") p_act1 = p3.Action("Action2") p_act2 = p4.Action("Action3") root_process.Action("Action4") root_process.Action("Action5")
import sys import os.path import pprint sys.path.append(os.path.abspath(__file__ + "\..\..")) import ctypes import windows import windows.test from windows.generated_def.winstructs import * remote_code = """ import windows from windows.generated_def.winstructs import * windows.utils.create_console() class YOLOHXBP(windows.debug.HXBreakpoint): def trigger(self, dbg, exc): p = windows.current_process arg_pos = 2 context = dbg.get_exception_context() esp = context.Esp unicode_string_addr = p.read_ptr(esp + (arg_pos + 1) * 4) wstring_addr = p.read_ptr(unicode_string_addr + 4) dll_loaded = p.read_wstring(wstring_addr) print("I AM LOADING <{0}>".format(dll_loaded)) d = windows.debug.LocalDebugger() exp = windows.current_process.peb.modules[1].pe.exports #windows.utils.FixedInteractiveConsole(locals()).interact() ldr = exp["LdrLoadDll"] d.add_bp(YOLOHXBP(ldr)) """ c = windows.test.pop_proc_32(dwCreationFlags=CREATE_SUSPENDED) c.execute_python(remote_code) c.threads[0].resume() import time time.sleep(2) c.exit()
# gene sequence example from @yoch, see # https://github.com/ilanschnell/bitarray/pull/54 from random import choice from timeit import timeit from bitarray import bitarray trans = { "A": bitarray("00"), "T": bitarray("01"), "G": bitarray("10"), "C": bitarray("11") } N = 10000 seq = [choice("ATGC") for _ in range(N)] arr = bitarray() arr.encode(trans, seq) assert arr.decode(trans) == seq # decodage t = timeit(lambda: arr.decode(trans), number=1000) print(t)
# * Copyright (c) 2020-2021. Authors: see NOTICE file. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. # * You may obtain a copy of the License at # * # * http://www.apache.org/licenses/LICENSE-2.0 # * # * Unless required by applicable law or agreed to in writing, software # * distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import csv from copy import deepcopy from enum import Enum from typing import Any, Dict, List, Mapping, Sequence, Tuple, Union from fastapi import params from fastapi.dependencies import utils from pydantic.error_wrappers import ErrorWrapper from pydantic.errors import MissingError from pydantic.fields import ModelField from starlette.datastructures import Headers, QueryParams # Fast API tweaks # Waiting for PR2078 to be merged # https://github.com/tiangolo/fastapi/pull/2078/ # Add support for query parameter serialization styles class QueryStyle(Enum): form = "form" space_delimited = "spaceDelimited" pipe_delimited = "pipeDelimited" # deep_object = "deepObject" # NOT SUPPORTED YET query_style_to_delimiter = { QueryStyle.form: ",", QueryStyle.space_delimited: " ", QueryStyle.pipe_delimited: "|", } # Force our settings in the context of PIMS until PR is merged. query_style = QueryStyle.form query_explode = False def request_params_to_args( required_params: Sequence[ModelField], received_params: Union[Mapping[str, Any], QueryParams, Headers], ) -> Tuple[Dict[str, Any], List[ErrorWrapper]]: values = {} errors = [] for field in required_params: field_info = field.field_info assert isinstance( field_info, params.Param ), "Params must be subclasses of Param" if utils.is_scalar_sequence_field(field) and isinstance( received_params, (QueryParams, Headers) ): if isinstance(field_info, params.Query) and not query_explode: value = received_params.get(field.alias) if value is not None: delimiter = query_style_to_delimiter.get(query_style) value = list(csv.reader([value], delimiter=delimiter))[0] else: value = received_params.getlist(field.alias) or field.default else: value = received_params.get(field.alias) if value is None: if field.required: errors.append( ErrorWrapper( MissingError(), loc=(field_info.in_.value, field.alias) ) ) else: values[field.name] = deepcopy(field.default) continue v_, errors_ = field.validate( value, values, loc=(field_info.in_.value, field.alias) ) if isinstance(errors_, ErrorWrapper): errors.append(errors_) elif isinstance(errors_, list): errors.extend(errors_) else: values[field.name] = v_ return values, errors def apply_fastapi_tweaks(): # Monkey patch Fast API utils.request_params_to_args = request_params_to_args
import bpy from .functions import calc_exposure_value, update_exposure_guide from . import ( camera_presets, ) class PHOTOGRAPHER_PT_Panel(bpy.types.Panel): # bl_idname = "CAMERA_PT_Photographer" bl_label = "Photographer" bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = "data" @classmethod def poll(cls, context): # Add Panel properties to cameras return context.camera def draw(self, context): layout = self.layout settings = context.camera.photographer scene = bpy.context.scene # UI if camera isn't active if scene.camera != bpy.context.active_object: layout.label(text="This is not the Active Camera") row = layout.row() row.operator("photographer.makecamactive", text="Make Active Camera") row.operator("photographer.selectactivecam", text="Select Active Camera") col = layout.column() # Enable UI if Camera is Active if scene.camera != bpy.context.active_object: col.enabled = False col.operator("photographer.updatesettings", text="Apply all Settings") #### CAMERA SETTINGS PANEL #### class PHOTOGRAPHER_PT_ViewPanel_Camera(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = 'Photographer' bl_label = 'Scene Camera and Lens' @classmethod def poll(cls, context): # Add Panel properties to cameras return context.scene.camera def draw_header_preset(self, context): camera_presets.PHOTOGRAPHER_PT_CameraPresets.draw_panel_header(self.layout) def draw(self, context): layout = self.layout layout.use_property_split = True layout.use_property_decorate = False camera = context.scene.camera.data settings = context.scene.camera.data.photographer col = layout.column(align=True) col.prop(settings,'sensor_type') if settings.sensor_type == 'CUSTOM': col.prop(camera,'sensor_width') # col = layout.column(align=True) # col.prop(camera,'clip_start') # col.prop(camera,'clip_end') layout.prop(camera,'lens') layout.prop(camera.dof,'use_dof', text='Enable Depth of Field') # Aperture parameter row = layout.row(align = True) if bpy.context.scene.render.engine == 'LUXCORE': use_dof = context.scene.camera.data.luxcore.use_dof else: use_dof = context.scene.camera.data.dof.use_dof row.enabled = use_dof if not settings.aperture_slider_enable: row.prop(settings, 'aperture_preset', text='Aperture') else: row.prop(settings, 'aperture', slider=True, text='Aperture F-stop') row.prop(settings,'aperture_slider_enable', icon='SETTINGS', text='') col = layout.column(align=True) col.prop(camera.dof,'aperture_ratio', text="Anamorphic Ratio") col.prop(camera.dof,'aperture_blades') col.prop(camera.dof,'aperture_rotation') if camera.dof.use_dof: col.enabled = True else: col.enabled = False #### EXPOSURE PANELS #### def exposure_header_preset(self, context, settings, guide): layout = self.layout layout.enabled = settings.exposure_enabled row = layout.row(align=False) row.alignment = 'RIGHT' ev = calc_exposure_value(self, context, settings) if guide == True: ev_guide = update_exposure_guide(self, context, ev) row.label(text = ev_guide + " - " + "EV: " + str("%.2f" % ev) ) else: row.label(text = "EV: " + str("%.2f" % ev) ) def exposure_header(self, context, settings): self.layout.prop(settings, "exposure_enabled", text="") def exposure_panel(self, context, settings, show_aperture): layout = self.layout scene = bpy.context.scene layout.use_property_split = True layout.use_property_decorate = False layout.enabled = settings.exposure_enabled layout.row().prop(settings, 'exposure_mode',expand=True) # Settings in EV Mode if settings.exposure_mode == 'EV': layout.prop(settings, 'ev', slider=True) layout.prop(settings, 'exposure_compensation', text='Exposure Compensation') # Shutter Speed parameter row = layout.row(align = True) row.enabled = settings.motionblur_enabled if settings.shutter_mode == 'SPEED': if not settings.shutter_speed_slider_enable: row.prop(settings, 'shutter_speed_preset', text='Shutter Speed') else: row.prop(settings, 'shutter_speed', slider=True) row.operator("photographer.setshutterangle",icon="DRIVER_ROTATIONAL_DIFFERENCE", text="") row.prop(settings,'shutter_speed_slider_enable', icon='SETTINGS', text='') if settings.shutter_mode == 'ANGLE': if not settings.shutter_speed_slider_enable: row.prop(settings, 'shutter_angle_preset', text='Shutter Angle') else: row.prop(settings, 'shutter_angle', slider=True) row.operator("photographer.setshutterspeed",icon="PREVIEW_RANGE", text="") row.prop(settings,'shutter_speed_slider_enable', icon='SETTINGS', text='') # Aperture parameter row = layout.row(align = True) if bpy.context.scene.render.engine == 'LUXCORE': use_dof = context.scene.camera.data.luxcore.use_dof else: use_dof = context.scene.camera.data.dof.use_dof row.enabled = use_dof if show_aperture: if not settings.aperture_slider_enable: row.prop(settings, 'aperture_preset', text='Aperture') else: row.prop(settings, 'aperture', slider=True, text='Aperture F-stop / DOF only') row.prop(settings,'aperture_slider_enable', icon='SETTINGS', text='') else: # Shutter Speed parameter if settings.shutter_mode == 'SPEED': row = layout.row(align = True) if not settings.shutter_speed_slider_enable: row.prop(settings, 'shutter_speed_preset', text='Shutter Speed') else: row.prop(settings, 'shutter_speed', slider=True) row.operator("photographer.setshutterangle",icon="DRIVER_ROTATIONAL_DIFFERENCE", text="") row.prop(settings,'shutter_speed_slider_enable', icon='SETTINGS', text='') if settings.shutter_mode == 'ANGLE': row = layout.row(align = True) if not settings.shutter_speed_slider_enable: row.prop(settings, 'shutter_angle_preset', text='Shutter Angle') else: row.prop(settings, 'shutter_angle', slider=True) row.operator("photographer.setshutterspeed",icon="PREVIEW_RANGE", text="") row.prop(settings,'shutter_speed_slider_enable', icon='SETTINGS', text='') # Aperture parameter row = layout.row(align = True) if not settings.aperture_slider_enable: row.prop(settings, 'aperture_preset', text='Aperture') else: row.prop(settings, 'aperture', slider=True, text='Aperture F-stop') row.prop(settings,'aperture_slider_enable', icon='SETTINGS', text='') # ISO parameter row = layout.row(align = True) if not settings.iso_slider_enable: row.prop(settings, 'iso_preset', text='ISO') else: row.prop(settings, 'iso', slider=True) row.prop(settings,'iso_slider_enable', icon='SETTINGS', text='') layout.prop(settings, 'exposure_compensation', text='Exposure Compensation') col = layout.column(align=False) col.prop(settings, 'motionblur_enabled', text='Affect Motion Blur') # Check if the Motion Blur is enabled in the Render Settings if settings.motionblur_enabled and not scene.render.use_motion_blur: row = layout.row(align = True) row.label(icon= 'ERROR', text="Motion Blur is disabled") row.operator("photographer.rendermotionblur", text="Enable Motion Blur") # Hide Affect Depth of Field in 3D View Panel if show_aperture: if bpy.context.scene.render.engine == 'LUXCORE': col.prop(scene.camera.data.luxcore, "use_dof", text='Affect Depth of Field') else: col.prop(scene.camera.data.dof, "use_dof", text='Affect Depth of Field') col.prop(settings, 'falsecolor_enabled', text='False Color') row = layout.row() row.alignment = 'RIGHT' framerate_guide = "FPS : " + str(round(scene.render.fps/scene.render.fps_base,2)) if settings.shutter_mode == 'ANGLE': shutter_speed_guide = " - " + "Shutter Speed : 1/" + str(int(settings.shutter_speed)) + "s" framerate_guide += shutter_speed_guide if settings.shutter_mode == 'SPEED': shutter_angle_guide = " - " + "Shutter Angle : " + str(round(settings.shutter_angle,1)) framerate_guide += shutter_angle_guide row.label(text = framerate_guide) class PHOTOGRAPHER_PT_Panel_Exposure(bpy.types.Panel): bl_label = 'Exposure' bl_parent_id = 'PHOTOGRAPHER_PT_Panel' bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = "data" @classmethod def poll(cls, context): return context.camera def draw_header_preset(self, context): settings = context.camera.photographer exposure_header_preset(self,context,settings, True) def draw_header(self, context): settings = context.camera.photographer exposure_header(self,context,settings) def draw(self, context): settings = context.camera.photographer show_aperture = True exposure_panel(self,context,settings,show_aperture) class PHOTOGRAPHER_PT_ViewPanel_Exposure(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = 'Photographer' bl_label = 'Exposure' @classmethod def poll(cls, context): return context.scene.camera is not None def draw_header_preset(self, context): settings = context.scene.camera.data.photographer exposure_header_preset(self,context,settings, False) def draw_header(self, context): settings = context.scene.camera.data.photographer exposure_header(self,context,settings) def draw(self, context): settings = context.scene.camera.data.photographer show_aperture = False exposure_panel(self,context,settings,show_aperture) #### WHITE BALANCE PANELS #### def whitebalance_header_preset(self, context,use_scene_camera): layout = self.layout row = layout.row(align=True) row.operator("white_balance.picker",text='', icon='EYEDROPPER', emboss=False).use_scene_camera=use_scene_camera row.operator("white_balance.reset", text='', icon='LOOP_BACK', emboss=False).use_scene_camera=use_scene_camera def whitebalance_header(self, context): self.layout.prop(context.scene.view_settings, "use_curve_mapping", text="") def whitebalance_panel(self, context, settings): layout = self.layout scene = bpy.context.scene layout.use_property_split = True layout.use_property_decorate = False # No animation. layout.enabled = context.scene.view_settings.use_curve_mapping row = layout.row(align=True) row.prop(settings, "color_temperature", slider=True) row.prop(settings, "preview_color_temp", text='') row = layout.row(align=True) row.prop(settings, "tint", slider=True) row.prop(settings, "preview_color_tint", text='') class PHOTOGRAPHER_PT_Panel_WhiteBalance(bpy.types.Panel): bl_label = "White Balance" bl_parent_id = "PHOTOGRAPHER_PT_Panel" bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = "data" @classmethod def poll(cls, context): return context.camera def draw_header_preset(self, context): whitebalance_header_preset(self,context,False) def draw_header(self, context): whitebalance_header(self,context) def draw(self, context): settings = context.camera.photographer whitebalance_panel(self,context,settings) class PHOTOGRAPHER_PT_ViewPanel_WhiteBalance(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = 'Photographer' bl_label = 'White Balance' @classmethod def poll(cls, context): return context.scene.camera is not None def draw_header_preset(self, context): whitebalance_header_preset(self,context,True) def draw_header(self, context): whitebalance_header(self,context) def draw(self, context): settings = context.scene.camera.data.photographer whitebalance_panel(self,context,settings) #### RESOLUTION PANELS #### def resolution_header_preset(self, context, settings): layout = self.layout layout.enabled = settings.resolution_enabled row = layout.row(align=True) row.alignment = 'RIGHT' # Resolution resolution_x = str(int(context.scene.render.resolution_x * context.scene.render.resolution_percentage/100)) resolution_y = str(int(context.scene.render.resolution_y * context.scene.render.resolution_percentage/100)) row.label(text = resolution_x + " x " + resolution_y) def resolution_header(self, context, settings): self.layout.prop(settings, "resolution_enabled", text="") def resolution_panel(self, context, settings): layout = self.layout scene = bpy.context.scene layout.use_property_split = True layout.use_property_decorate = False # No animation. layout.enabled = settings.resolution_enabled col = layout.column() col.alignment = 'RIGHT' col.prop(settings, 'resolution_mode') sub = col.column(align=True) if settings.resolution_mode == 'CUSTOM_RES': sub.prop(settings, "resolution_x", text='Resolution X') sub.prop(settings, "resolution_y", text='Y') sub.prop(context.scene.render, "resolution_percentage", text='%') col.row().prop(settings, 'resolution_rotation',expand=True) elif settings.resolution_mode == 'CUSTOM_RATIO': sub.prop(settings, "ratio_x", text='Ratio X') sub.prop(settings, "ratio_y", text='Y') sub.separator() sub.prop(settings, "resolution_x", text='Resolution X') sub.prop(context.scene.render, "resolution_percentage", text='%') col.row().prop(settings, 'resolution_rotation',expand=True) else: sub.prop(settings, "longedge") sub.prop(context.scene.render, "resolution_percentage", text='%') if not settings.resolution_mode == '11': col.row().prop(settings, 'resolution_rotation',expand=True) class PHOTOGRAPHER_PT_Panel_Resolution(bpy.types.Panel): bl_label = "Resolution" bl_parent_id = "PHOTOGRAPHER_PT_Panel" bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = "data" @classmethod def poll(cls, context): return context.camera def draw_header_preset(self, context): settings = context.camera.photographer resolution_header_preset(self,context,settings) def draw_header(self, context): settings = context.camera.photographer resolution_header(self,context,settings) def draw(self, context): settings = context.camera.photographer resolution_panel(self,context,settings) class PHOTOGRAPHER_PT_ViewPanel_Resolution(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = 'Photographer' bl_label = 'Resolution' @classmethod def poll(cls, context): return context.scene.camera is not None def draw_header_preset(self, context): settings = context.scene.camera.data.photographer resolution_header_preset(self,context,settings) def draw_header(self, context): settings = context.scene.camera.data.photographer resolution_header(self,context,settings) def draw(self, context): settings = context.scene.camera.data.photographer resolution_panel(self,context,settings) #### MASTER CAMERA PANEL #### class MASTERCAMERA_PT_ViewPanel(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = 'Photographer' bl_label = "Camera List" def draw(self, context): layout = self.layout scene = context.scene col = layout.column(align=True) # Master Camera master_row = col.row(align=True) master_cam = 'MasterCamera' cam_list=[cam.name for cam in context.scene.collection.all_objects if cam.type=='CAMERA'] if master_cam in cam_list: if context.scene.camera == bpy.data.objects.get(master_cam): master_row.operator("mastercamera.look_through", text=master_cam, icon='OUTLINER_DATA_CAMERA').camera=master_cam else: master_row.operator("mastercamera.look_through", text=master_cam).camera=master_cam master_row.operator("mastercamera.delete_cam", text="", icon="PANEL_CLOSE").camera=master_cam row = col.row(align=True) row.prop(bpy.data.objects.get(master_cam).data.photographer, "match_speed", slider=True) else: master_row.operator("mastercamera.add_mastercam", text='Add Master Camera', icon='OUTLINER_DATA_CAMERA') col = layout.column(align=True) row = col.row(align=True) row.label(text= "Camera List:") row = col.row(align=True) row.operator("mastercamera.add_cam", text='Add Camera') col = layout.column(align=True) if master_cam in cam_list: cam_list.remove('MasterCamera') for cam in cam_list: row = col.row(align=True) if context.scene.camera is not None: if context.scene.camera == bpy.data.objects.get(master_cam): row.operator("view3d.switch_camera", text="", icon="PLAY").camera=cam # row.enabled = not bpy.data.objects.get(master_cam).data.photographer.is_matching if context.scene.camera == bpy.data.objects.get(cam): row.operator("mastercamera.look_through", text="", icon='RESTRICT_RENDER_OFF').camera=cam else: row.operator("mastercamera.look_through", text="", icon='RESTRICT_RENDER_ON').camera=cam row.operator("mastercamera.select_cam", text=cam).camera=cam row.operator("mastercamera.delete_cam", text="", icon="PANEL_CLOSE").camera=cam view = context.space_data render = scene.render if view.lock_camera: icon="LOCKVIEW_ON" else: icon="LOCKVIEW_OFF" row = layout.row() split = row.split(factor=0.7, align=False) split.prop(view, "lock_camera", text="Lock Camera to View", icon=icon ) split.prop(render, "use_border", text="Border") if context.scene.camera: photographer = context.scene.camera.data.photographer layout.operator("photographer.updatesettings", text="Apply Photographer Settings") #### AUTOFOCUS PANELS #### class PHOTOGRAPHER_PT_Panel_Autofocus(bpy.types.Panel): # bl_idname = "CAMERA_PT_Photographer_Autofocus" bl_label = "Continuous Autofocus" bl_parent_id = "PHOTOGRAPHER_PT_Panel" bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = "data" def draw_header(self, context): settings = context.camera.photographer self.layout.prop(settings, "af_continuous_enabled", text="") def draw(self, context): settings = context.camera.photographer layout = self.layout layout.use_property_split = True layout.use_property_decorate = False # No animation. col = layout.column(align=True) col.prop(settings, "af_continuous_interval", slider=True) class PHOTOGRAPHER_PT_ViewPanel_Autofocus(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = 'Photographer' bl_label = "Autofocus" @classmethod def poll(cls, context): return context.scene.camera is not None # def draw_header(self, context): # if not context.scene.camera.data.photographer.af_tracking_enabled: # dof_distance = str(round(context.scene.camera.data.dof.focus_distance*context.scene.unit_settings.scale_length,2)) # if not context.scene.unit_settings.system == 'NONE': # dof_distance = dof_distance + "m" # self.layout.prop(text=dof_distance) def draw(self, context): settings = context.scene.camera.data.photographer layout = self.layout layout.use_property_split = True layout.use_property_decorate = False # No animation. col = layout.column(align=True) if context.scene.camera: if context.scene.camera.type == 'CAMERA': if settings.af_tracking_enabled == False: icon_afs = 'RESTRICT_RENDER_OFF' if settings.af_animate: icon_afs = 'KEYTYPE_KEYFRAME_VEC' col.operator("photographer.focus_single", text="AF-S", icon=icon_afs) if settings.af_tracking_enabled == False: col.operator("photographer.focus_tracking", text="AF-Track", icon='OBJECT_DATA') if settings.af_tracking_enabled: col.operator("photographer.focus_tracking_cancel", text="Cancel AF Tracking", icon='OBJECT_DATA') col.separator() icon_afc = 'HOLDOUT_ON' if settings.af_animate: icon_afc = 'KEYTYPE_KEYFRAME_VEC' col.prop(settings, "af_continuous_enabled", text="Enable AF-C", icon=icon_afc) col_afc_int = col.column(align=True) col_afc_int.enabled = settings.af_continuous_enabled col_afc_int.prop(settings, "af_continuous_interval", slider=True) col.separator() col.prop(settings, "af_animate", text="Animate AF", icon="KEY_HLT" )
# how to run # python bin2png.py --input_dir="./data/input/validation" --output_dir="./" --data_size 1024 import tensorflow as tf import numpy as np import argparse import os import glob import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt SZ = 1024 parser = argparse.ArgumentParser() parser.add_argument("--input_dir", help="path to folder containing images") parser.add_argument("--output_dir", required=True, help="where to put output files") parser.add_argument("--data_size", type=int, default=SZ, help="specify data length 256 or 1024 typically") a = parser.parse_args() def save_plots(inputs, outputs, filename): plt.clf() plt.plot(inputs[0,0,:,0], linestyle='solid') plt.plot(outputs[0,0,:,0], linestyle='dashed') plt.savefig(filename) plt.clf() print(filename, " saved") def load_examples(input_paths): if len(input_paths) == 0: raise Exception("input_dir contains no image files") path_queue = tf.train.string_input_producer(input_paths, shuffle=False, num_epochs=1) reader = tf.WholeFileReader() paths, contents = reader.read(path_queue) raw_input = tf.decode_raw(contents, tf.float32) raw_input = tf.reshape(raw_input,[2,1,tf.constant(a.data_size),1]) a_images = raw_input[0] b_images = raw_input[1] return tf.train.batch([paths, a_images, b_images], batch_size=1) input_paths = glob.glob(os.path.join(a.input_dir, "*.bin")) input_paths.sort() os.makedirs(a.output_dir, exist_ok=True) path_batch, a_batch, b_batch = load_examples(input_paths) with tf.Session() as sess: coord = tf.train.Coordinator() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: while not coord.should_stop(): paths, a_images, b_images = sess.run([path_batch, a_batch, b_batch]) fullpath = paths[0].decode("utf-8") dirname, filename = os.path.split(fullpath) fn = os.path.splitext(filename)[0] + ".png" save_plots(a_images, b_images, os.path.join(a.output_dir, fn)) except tf.errors.OutOfRangeError: print("an epoch finished") finally: coord.request_stop() coord.join(threads)
import random number = random.randrange(1, 100, 1) n_guesses = 10 print("You have", n_guesses, "guesses") user_num = input("Enter a number between 1 and 100:\n") if user_num.isnumeric(): user_num = int(user_num) if 1 <= user_num <= 100: while n_guesses > 0: if user_num > number: print("Lower number please!") n_guesses -= 1 print("You have", n_guesses, "guesses left") user_num = int(input()) if 1 <= user_num <= 100: continue else: print("Please enter a number between 1 and 100") break elif user_num < number: print("Higher number please!") n_guesses -= 1 print("You have", n_guesses, "guesses left") user_num = int(input()) if 1 <= user_num <= 100: continue else: print("Please enter a number between 1 and 100") break else: print("Congrats! You have guessed the right number in", 10-(n_guesses-1), "guesses") break if n_guesses == 0: print("Game Over!") else: print("Please enter a number between 1 and 100") else: print("Invalid input!")
# coding=utf-8 # Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pivot recognition with Bert encoding. Example command line call: $ bazel-bin/cabby/model/landmark_recognition/entity_recognition \ --data_dir ~/data/RVS/Manhattan \ --batch_size 32 \ --epochs 4 \ --max_grad_norm 1.0 \ --region Manhattan --data_dir_touchdown ~/data/Touchdown/ --data_dir_run ~/data/RUN/ --n_samples 5 --pivot_type end_pivot """ from absl import app from absl import flags import enum import os import pandas as pd from torch.utils.data import DataLoader from transformers import BertForTokenClassification from cabby.geo import regions from cabby.model.landmark_recognition import dataset_bert as dataset from cabby.model.landmark_recognition import run from cabby.geo import walk FLAGS = flags.FLAGS landmark_list = walk.LANDMARK_TYPES + [dataset.EXTRACT_ALL_PIVOTS] landmark_msg = "Landmark type: " + ','.join(landmark_list) flags.DEFINE_string("data_dir", None, "The directory from which to load the dataset.") flags.DEFINE_string("model_prefix", None, "The path to save the model.") flags.DEFINE_enum( "pivot_type", None, landmark_list, landmark_msg) flags.DEFINE_integer( 'batch_size', default=32, help=('Batch size.')) flags.DEFINE_integer( 'epochs', default=4, help=('Epochs size.')) flags.DEFINE_float( 'max_grad_norm', default=1.0, help=('Max grad norm.')) flags.DEFINE_enum( "region", None, regions.SUPPORTED_REGION_NAMES, regions.REGION_SUPPORT_MESSAGE) flags.DEFINE_integer( "s2_level", default=18, help=("S2 level of the S2Cells.")) flags.DEFINE_integer( "n_samples", default=5, help=("Number of samples to test on.")) flags.DEFINE_string("data_dir_touchdown", None, "The directory from which to load the Touchdown dataset for testing.") flags.DEFINE_string("data_dir_run", None, "The directory from which to load the RUN dataset for testing.") # Required flags. flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("model_prefix") flags.mark_flag_as_required("pivot_type") def main(argv): del argv # Unused. train_config = FLAGS.flag_values_dict() train_config['model_path'] = FLAGS.model_prefix + "_" + FLAGS.pivot_type + ".pt" train_config = type('Config', (object,), train_config) model = BertForTokenClassification.from_pretrained( "bert-base-cased", num_labels=2, output_attentions=False, output_hidden_states=False ) padSequence = dataset.PadSequence() ds_train, ds_val, ds_test = dataset.create_dataset( FLAGS.data_dir, FLAGS.region, FLAGS.s2_level, FLAGS.pivot_type) train_dataloader = DataLoader( ds_train, batch_size=FLAGS.batch_size, collate_fn=padSequence) val_dataloader = DataLoader(ds_val, batch_size=FLAGS.batch_size, collate_fn=padSequence) test_dataloader = DataLoader(ds_test, batch_size=FLAGS.batch_size, collate_fn=padSequence) model_trained = run.train(model, train_dataloader, val_dataloader, train_config) run.test(model_trained, test_dataloader) print ("\n Samples from RVS:") instructions = ds_test.ds.instructions.sample(FLAGS.n_samples).tolist() run.test_samples( instructions=instructions, tokenizer=dataset.tokenizer, model=model_trained ) print ("\n Samples from RUN:") if os.path.exists(FLAGS.data_dir_run): path_run = os.path.join(FLAGS.data_dir_run, 'dataset.json') if os.path.exists(path_run): run_ds = pd.read_json(path_run, lines=True) instructions = run_ds.instruction.sample(FLAGS.n_samples).tolist() run.test_samples( instructions=instructions, tokenizer=dataset.tokenizer, model=model_trained ) print ("\n Samples from Touchdown:") # Test against Touchdown dataset (no labels for landmarks). if os.path.exists(FLAGS.data_dir_touchdown): path_touchdown = os.path.join(FLAGS.data_dir_touchdown, 'test.json') if os.path.exists(path_touchdown): touchdown_ds = pd.read_json(path_touchdown, lines=True) instructions = touchdown_ds.navigation_text.sample(FLAGS.n_samples).tolist() run.test_samples( instructions=instructions, tokenizer=dataset.tokenizer, model=model_trained ) if __name__ == '__main__': app.run(main)
# Copyright (c) Jupyter Development Team. from jupyter_core.paths import jupyter_data_dir import subprocess import os import errno import stat c = get_config() c.NotebookApp.ip = '*' c.NotebookApp.port = 8888 c.NotebookApp.open_browser = False c.NotebookApp.token = '' # Set a password if PASSWORD is set if 'PASSWORD' in os.environ: from IPython.lib import passwd c.NotebookApp.password = passwd(os.environ['PASSWORD']) del os.environ['PASSWORD']
#!/usr/bin/env python # coding: utf-8 # # Alice's Rose # ## Dependencies # # The `import` statement is special... it imports programmer's wisdom! # The common usage is to acquire access to python packages. # In[1]: import sympy import math # ## A Romantic Introduction to Matematical Optimization and to Python # # Note: this story was originally told in the book [Optimization](https://press.princeton.edu/books/hardcover/9780691102870/optimization) and the protagonist there is called Samantha, but we stick to the alphabetical order... # # As the story goes, Alice receives a beautiful rose. She has nothing but a lemonade glass to hold the rose and becomes very distressed when the ensemble falls down. # # Adding a bit of water helps! Not only that helps the rose, but it also helps the stability: glass, with some water, and rose stands! # # Alice thinks: if a bit of water helps, the let us fill the glass! # # However, it tilts and falls, as in the beginning, just much more wet. # # Alice has a problem to solve: what is the _optimal_ level of water for her rose on a lemonade glass? # # She learns from [Archimedes]( https://en.wikipedia.org/wiki/Archimedes) how to compute the _center of gravity_ of the glass with water, which has height # $h = \frac{m_w}{m_w+m_g} h_w + \frac{m_g}{m_w+m_g} h_g$ with: # # * $m_w$ the mass of water # * $m_g$ the mass of glass # * $h_w$ the height of the center of gravity of the water in the glass # * $h_g$ the height of the center of gravity of the glass without water # # Since Alice's glass is $20$ cm tall, $4$ cm wide and weighs $100$ gram, Alice may fill the glass with water up to height $x$ cm, provided that $0 \leq x \leq 20$ since the water must fit in the glass. # # The volume of water is $\pi r^2 x$ with $r$ the radius of the base, i.e. $r=2$. # The volume is therefore $4\pi x$ cubic centimetres. # # Since the density of water can be [taken](https://en.wikipedia.org/wiki/Gram_per_cubic_centimetre) as being $1$ gram per cubic centimeter we have: # # * $m_w = 4\pi x$ # * $m_g = 100$ # * $h_w = \frac{x}{2}$ # * $h_g = \frac{20}{2} = 10$ # # And from here we finally obtain the following formula for the height of the center of gravity of the glass with water: # # $$ # h = \frac{4\pi x}{4\pi x + 100} \frac{x}{2} + \frac{100}{4\pi x + 100} 10 = \frac{4\pi x^2 + 2000}{8\pi x + 200} # $$ # # Alice's problem is therefore: # # $$ # \begin{array}{rl} # \min & \frac{4\pi x^2 + 2000}{8\pi x + 200} \\ # s.t. & x \geq 0 \\ # & x \leq 20 \\ # \end{array} # $$ # ## Analytical solution # # Alice learns from [Fermat]( https://en.wikipedia.org/wiki/Pierre_de_Fermat) that for a function to reach its highest and lowest points inside its domain the derivative must vanish. # # This is a good moment to play with symbolic mathematics in python, we will use [sympy](https://www.sympy.org/en/index.html). # ### With $\pi$ as a number # In[2]: # x is a symbol and pi is a number x = sympy.Symbol('x') pi = math.pi # h is a function of x, and hprime its derivative h = (4*pi*x**2 + 2000)/(8*pi*x+200) hprime = sympy.diff(h, x) # sol is(are) the value(s) of x that solve hprime(x) == 0 sol = sympy.solveset(hprime, x) sol # Above we see that the equation $h^\prime(x) = 0$ has two solutions: one negative and one positive. # Obviously, only the positive may be feasible for Alice. # And, since its value is between $0$ and $20$, it is indeed feasible. # # You may recall that the sign of the second derivative tells you whether the root of the first derivative is a *maximum*, a *minimum* or a *saddle point*. # In[3]: opt = max(sol) sympy.diff(hprime, x).subs(x,opt).evalf() # Since $h^{\prime\prime}(\text{opt}) > 0$ it is indeed a (local) **minimum**. # ### With $\pi$ as a symbol # In[4]: # now pi is a symbol, just like x pi = sympy.Symbol('pi') # we redefine h using the same right-hand-side code as before, # but now with x and pi as symbols h = (4*pi*x**2 + 2000)/(8*pi*x + 200) # to have the drivative on the symbol pi we need it from the new version of h hprime = sympy.diff(h, x) solution = sympy.solveset(sympy.diff(h, x), x ) solution # ### From a symbolic $\pi$ to a numeric $\pi$ # In[5]: s = max(solution.subs(pi, math.pi).evalf()) print(s) # ### A picture says more than thousand words # In[6]: import matplotlib.pyplot as plt import numpy as np def plot_alice(h, s, start, stop, width=18, height=8): plt.rcParams["figure.figsize"] = (18,8) x = sympy.Symbol('x') f = sympy.lambdify(x, h.subs( pi, math.pi)) x = np.linspace(start=start,stop=stop,num=100) y = f(x) plt.plot(x,y,label='$'+sympy.latex(h)+'$',linewidth=3) plt.plot(s,f(s), 'ro', label='optimum', markersize=12) plt.legend() plt.show() # In[7]: plot_alice( h, s, 0, 20 ) # ## What if we only care about the numerical solution? # ### Introducing `pyomo` # # This is the moment to meet: # * mathematical models expressed in `python`, using `pyomo`, # * powerful numerical optimization algorithms and how to use them. # # We will see that `pyomo` completely separates modeling from solving, which allows us to switch solver without recoding! # ### Notebook dependencies requiring installation on `colab` # # Note that [this notebook](https://nbviewer.jupyter.org/github/jckantor/ND-Pyomo-Cookbook/blob/master/notebooks/01.02-Running-Pyomo-on-Google-Colab.ipynb) explains how to run `Pyomo` on Google Colab. # For a complete overview please check the [cookbook](https://jckantor.github.io/ND-Pyomo-Cookbook/). # In[8]: import shutil if not shutil.which('pyomo'): get_ipython().system('pip install -q pyomo') assert(shutil.which('pyomo')) # In[9]: import pyomo.environ as pyo alice = pyo.ConcreteModel('Alice') alice.h = pyo.Var(bounds=(0,20)) @alice.Objective(sense=pyo.minimize) def cog(m): return (4*math.pi*alice.h**2 + 2000)/(8*math.pi*alice.h + 200) alice.pprint() # We will use `ipopt`. We refer again to [this notebook](https://nbviewer.jupyter.org/github/jckantor/ND-Pyomo-Cookbook/blob/master/notebooks/01.02-Running-Pyomo-on-Google-Colab.ipynb) explains how to run `Pyomo` **and how to install solvers** on Google Colab. For a complete overview please check the [cookbook](https://jckantor.github.io/ND-Pyomo-Cookbook/). # In[10]: import sys if 'google.colab' in sys.modules: get_ipython().system("wget -N -q 'https://ampl.com/dl/open/ipopt/ipopt-linux64.zip'") get_ipython().system('unzip -o -q ipopt-linux64') # In[11]: results = pyo.SolverFactory('ipopt').solve(alice) print(results.solver.status, results.solver.termination_condition ) alice.display() # ## Conclusion # # This notebook shows how to solve Alice's problem: finding the most stable amount of water in a vase. # # The notebook shows how to solve the problem analytically with `sympy`, how to use `matplotlib` to visualize the function and the optimum. And how to model Alice's problem on `pyomo` and solve it with `ipopt` both at [neos](https://neos-server.org/neos/solvers/index.html) and "locally" at your own Colab session. # # ## Last remarks # # This notebook deferred installation of the packages needed to the moment that we needed them. This was deliberate, but subsequent notebooks will normally list all dependencies on their top part, which we often call the _preamble_. Furthermore, the `colab' dependencies will be streamlined in future notebooks. # In[ ]:
# -*- coding: utf-8 -*- # @Time : 2019-08-21 15:53 # @Author : Kai Zhang # @Email : [email protected] # @File : ex1-IslandNumber_2.py # @Software: PyCharm """ 广度优先 https://leetcode-cn.com/problems/number-of-islands/solution/dfs-bfs-bing-cha-ji-python-dai-ma-java-dai-ma-by-l/ """ from typing import List from collections import deque class Solution: # x-1,y # x,y-1 x,y x,y+1 # x+1,y # 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量,这是一个常见的技巧 directions = [(-1, 0), (0, -1), (1, 0), (0, 1)] def numIslands(self, grid: List[List[str]]) -> int: m = len(grid) # 特判 if m == 0: return 0 n = len(grid[0]) marked = [[False for _ in range(n)] for _ in range(m)] count = 0 # 从第 1 行、第 1 格开始,对每一格尝试进行一次 DFS 操作 for i in range(m): for j in range(n): # 只要是陆地,且没有被访问过的,就可以使用 BFS 发现与之相连的陆地,并进行标记 if not marked[i][j] and grid[i][j] == '1': # count 可以理解为连通分量,你可以在广度优先遍历完成以后,再计数, # 即这行代码放在【位置 1】也是可以的 count += 1 queue = deque() queue.append((i, j)) # 注意:这里要标记上已经访问过 marked[i][j] = True while queue: cur_x, cur_y = queue.popleft() # 得到 4 个方向的坐标 for direction in self.directions: new_i = cur_x + direction[0] new_j = cur_y + direction[1] # 如果不越界、没有被访问过、并且还要是陆地,我就继续放入队列,放入队列的同时,要记得标记已经访问过 if 0 <= new_i < m and 0 <= new_j < n and not marked[new_i][new_j] and grid[new_i][new_j] == '1': queue.append((new_i, new_j)) #【特别注意】在放入队列以后,要马上标记成已经访问过,语义也是十分清楚的:反正只要进入了队列,你迟早都会遍历到它 # 而不是在出队列的时候再标记 #【特别注意】如果是出队列的时候再标记,会造成很多重复的结点进入队列,造成重复的操作,这句话如果你没有写对地方,代码会严重超时的 marked[new_i][new_j] = True #【位置 1】 return count if __name__ == '__main__': grid = [['1', '1', '1', '1', '0'], ['1', '1', '0', '1', '0'], ['1', '1', '0', '0', '0'], ['0', '0', '0', '0', '0']] # grid = [["1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "0", "1", "1"], # ["0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "1", "0"], # ["1", "0", "1", "1", "1", "0", "0", "1", "1", "0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "1", "1", "1", "0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "0", "0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "0", "1", "1", "1", "1", "1", "1", "0", "1", "1", "1", "0", "1", "1", "1", "0", "1", "1", "1"], # ["0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "1", "0", "1", "1", "1", "1"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "0", "1", "1"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["0", "1", "1", "1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "1", "1"], # ["1", "0", "1", "1", "1", "1", "1", "0", "1", "1", "1", "0", "1", "1", "1", "1", "0", "1", "1", "1"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "1", "1", "0"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "1", "1", "1", "0", "0"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"], # ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"]] solution = Solution() result = solution.numIslands(grid) print(result)
#!/usr/bin/env python # -*- coding: utf-8 -*- # File : src/ampel/contrib/hu/examples/t0/ExampleFilter.py # License : BSD-3-Clause # Author : vb <[email protected]> # Date : 14.12.2017 # Last Modified Date: 22.09.2018 # Last Modified By : vb <[email protected]> from ampel.base.abstract.AbsAlertFilter import AbsAlertFilter from ampel.pipeline.logging.AmpelLogger import AmpelLogger class ExampleFilter(AbsAlertFilter): """ REQUIREMENTS: ------------- A T0 filter class must (otherwise exception will be throwed): * inherit the abstract parent class 'AbsAlertFilter' * implement the following two functions: -> __init__(self, on_match_t2_units, base_config=None, run_config=None, logger=None) -> apply(self, ampel_alert) """ def __init__(self, on_match_t2_units, base_config=None, run_config=None, logger=None): """ Mandatory implementation. See the jupyter notebook "Understanding T0 Filters" Parameters: 'on_match_t2_units': list of t2 unit ids (strings) 'base_config': dict instance loaded from the ampel config 'run_config': dict instance loaded from the ampel config 'logger': logger instance (python module logging) """ # Instance variable holding reference to provider logger self.logger = AmpelLogger.get_logger() if logger is None else logger # Logging example self.logger.info("Please use this logger object for logging purposes") self.logger.debug("The log entries emitted by this logger will be stored into the Ampel DB") self.logger.debug("This logger is to be used 'as is', please don't change anything :)") # TODO: explain self.on_match_t2_units = on_match_t2_units # Example: 'magpsf' (see the jupyter notebook "Understanding T0 Filters") self.filter_field = base_config['attrName'] # Example: 18 (see the jupyter notebook "Understanding T0 Filters") self.threshold = run_config['threshold'] def apply(self, ampel_alert): """ Mandatory implementation. To exclude the alert, return *None* To accept it, either * return self.on_match_t2_units * return a custom list of t2 unit ids (strings) -> the custom list must be a subset of self.on_match_t2_units, it cannot contain other/new t2 unit ids. In this example filter, any measurement of a transient brighter than a fixed magnitude threshold will result in this transient being inserted into ampel. """ # One way of filtering alerts based on fixed mag threshold # (for other means of achiving the same results, please # see the jupyter notebook "Understanding T0 Filters") for pp in ampel_alert.get_photopoints(): # Example: # self.filter_field: "magpsf" # self.threshold: 18 if pp[self.filter_field] < self.threshold : return self.on_match_t2_units return None
# Code generated by lark_sdk_gen. DO NOT EDIT. from pylark.lark_request import RawRequestReq, _new_method_option from pylark import lark_type, lark_type_sheet, lark_type_approval import attr import typing import io @attr.s class GetAttendanceUserAllowedRemedyReq(object): employee_type: lark_type.EmployeeType = attr.ib( factory=lambda: lark_type.EmployeeType(), metadata={"req_type": "query", "key": "employee_type"}, ) # 请求体中的 user_id 的员工工号类型,必选字段,可用值:【employee_id(员工employeeId),employee_no(员工工号)】,示例值:"employee_id" user_id: str = attr.ib( default="", metadata={"req_type": "json", "key": "user_id"} ) # 用户 ID remedy_date: int = attr.ib( default=0, metadata={"req_type": "json", "key": "remedy_date"} ) # 查询补卡的日期 @attr.s class GetAttendanceUserAllowedRemedyRespUserAllowedRemedys(object): user_id: str = attr.ib( default="", metadata={"req_type": "json", "key": "user_id"} ) # 用户 ID remedy_date: int = attr.ib( default=0, metadata={"req_type": "json", "key": "remedy_date"} ) # 补卡日期 is_free_punch: bool = attr.ib( factory=lambda: bool(), metadata={"req_type": "json", "key": "is_free_punch"} ) # 是否为自由班次,若为自由班次,则不用选择考虑第几次上下班,直接选择补卡时间即可 punch_no: int = attr.ib( default=0, metadata={"req_type": "json", "key": "punch_no"} ) # 第几次上下班,可用值:【0(第 1 次上下班),1(第 2 次上下班),2(第 3 次上下班)】 work_type: int = attr.ib( default=0, metadata={"req_type": "json", "key": "work_type"} ) # 上班/下班,1:上班,2:下班 punch_status: str = attr.ib( default="", metadata={"req_type": "json", "key": "punch_status"} ) # 打卡状态,可用值【Early(早退),Late(迟到),Lack(缺卡)】 normal_punch_time: str = attr.ib( default="", metadata={"req_type": "json", "key": "normal_punch_time"} ) # 正常的应打卡时间,时间格式为 yyyy-MM-dd HH:mm remedy_start_time: str = attr.ib( default="", metadata={"req_type": "json", "key": "remedy_start_time"} ) # 可选的补卡时间的最小值,时间格式为 yyyy-MM-dd HH:mm remedy_end_time: str = attr.ib( default="", metadata={"req_type": "json", "key": "remedy_end_time"} ) # 可选的补卡时间的最大值,时间格式为 yyyy-MM-dd HH:mm @attr.s class GetAttendanceUserAllowedRemedyResp(object): user_allowed_remedys: GetAttendanceUserAllowedRemedyRespUserAllowedRemedys = ( attr.ib( default=None, metadata={"req_type": "json", "key": "user_allowed_remedys"} ) ) def _gen_get_attendance_user_allowed_remedy_req(request, options) -> RawRequestReq: return RawRequestReq( dataclass=GetAttendanceUserAllowedRemedyResp, scope="Attendance", api="GetAttendanceUserAllowedRemedy", method="POST", url="https://open.feishu.cn/open-apis/attendance/v1/user_task_remedys/query_user_allowed_remedys", body=request, method_option=_new_method_option(options), need_tenant_access_token=True, )
from flask import jsonify from flask_testing import TestCase from src.models import db from src.app import create_app from src.models.task import Task class TaskResourceTestCase(TestCase): def create_app(self): return create_app() def setUp(self): db.create_all() # example task t = Task() t.name = 'test added task' db.session.add(t) db.session.commit() self.task = Task.query.filter_by(name=t.name).first() def tearDown(self): db.session.remove() db.drop_all() def test_task_list_with_none_argument(self): response = self.client.get('/tasks') self.assertEqual(200, response.status_code) def test_task_list_with_bounded_offset(self): response = self.client.get('/tasks/0') self.assertEqual(200, response.status_code) def test_task_list_with_unbounded_offset(self): response = self.client.get('/tasks/10000') self.assertEqual(404, response.status_code)
# coding: utf-8 # /*########################################################################## # # Copyright (c) 2004-2019 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """ :mod:`silx.gui.plot.actions.io` provides a set of QAction relative of inputs and outputs for a :class:`.PlotWidget`. The following QAction are available: - :class:`CopyAction` - :class:`PrintAction` - :class:`SaveAction` """ from __future__ import division __authors__ = ["V.A. Sole", "T. Vincent", "P. Knobel"] __license__ = "MIT" __date__ = "12/07/2018" from . import PlotAction from silx.io.utils import save1D, savespec, NEXUS_HDF5_EXT from silx.io.nxdata import save_NXdata import logging import sys import os.path from collections import OrderedDict import traceback import numpy from silx.utils.deprecation import deprecated from silx.gui import qt, printer from silx.gui.dialog.GroupDialog import GroupDialog from silx.third_party.EdfFile import EdfFile from silx.third_party.TiffIO import TiffIO from ...utils.image import convertArrayToQImage if sys.version_info[0] == 3: from io import BytesIO else: import cStringIO as _StringIO BytesIO = _StringIO.StringIO _logger = logging.getLogger(__name__) _NEXUS_HDF5_EXT_STR = ' '.join(['*' + ext for ext in NEXUS_HDF5_EXT]) def selectOutputGroup(h5filename): """Open a dialog to prompt the user to select a group in which to output data. :param str h5filename: name of an existing HDF5 file :rtype: str :return: Name of output group, or None if the dialog was cancelled """ dialog = GroupDialog() dialog.addFile(h5filename) dialog.setWindowTitle("Select an output group") if not dialog.exec_(): return None return dialog.getSelectedDataUrl().data_path() class SaveAction(PlotAction): """QAction for saving Plot content. It opens a Save as... dialog. :param plot: :class:`.PlotWidget` instance on which to operate. :param parent: See :class:`QAction`. """ SNAPSHOT_FILTER_SVG = 'Plot Snapshot as SVG (*.svg)' SNAPSHOT_FILTER_PNG = 'Plot Snapshot as PNG (*.png)' DEFAULT_ALL_FILTERS = (SNAPSHOT_FILTER_PNG, SNAPSHOT_FILTER_SVG) # Dict of curve filters with CSV-like format # Using ordered dict to guarantee filters order # Note: '%.18e' is numpy.savetxt default format CURVE_FILTERS_TXT = OrderedDict(( ('Curve as Raw ASCII (*.txt)', {'fmt': '%.18e', 'delimiter': ' ', 'header': False}), ('Curve as ";"-separated CSV (*.csv)', {'fmt': '%.18e', 'delimiter': ';', 'header': True}), ('Curve as ","-separated CSV (*.csv)', {'fmt': '%.18e', 'delimiter': ',', 'header': True}), ('Curve as tab-separated CSV (*.csv)', {'fmt': '%.18e', 'delimiter': '\t', 'header': True}), ('Curve as OMNIC CSV (*.csv)', {'fmt': '%.7E', 'delimiter': ',', 'header': False}), ('Curve as SpecFile (*.dat)', {'fmt': '%.10g', 'delimiter': '', 'header': False}) )) CURVE_FILTER_NPY = 'Curve as NumPy binary file (*.npy)' CURVE_FILTER_NXDATA = 'Curve as NXdata (%s)' % _NEXUS_HDF5_EXT_STR DEFAULT_CURVE_FILTERS = list(CURVE_FILTERS_TXT.keys()) + [ CURVE_FILTER_NPY, CURVE_FILTER_NXDATA] DEFAULT_ALL_CURVES_FILTERS = ("All curves as SpecFile (*.dat)",) IMAGE_FILTER_EDF = 'Image data as EDF (*.edf)' IMAGE_FILTER_TIFF = 'Image data as TIFF (*.tif)' IMAGE_FILTER_NUMPY = 'Image data as NumPy binary file (*.npy)' IMAGE_FILTER_ASCII = 'Image data as ASCII (*.dat)' IMAGE_FILTER_CSV_COMMA = 'Image data as ,-separated CSV (*.csv)' IMAGE_FILTER_CSV_SEMICOLON = 'Image data as ;-separated CSV (*.csv)' IMAGE_FILTER_CSV_TAB = 'Image data as tab-separated CSV (*.csv)' IMAGE_FILTER_RGB_PNG = 'Image as PNG (*.png)' IMAGE_FILTER_NXDATA = 'Image as NXdata (%s)' % _NEXUS_HDF5_EXT_STR DEFAULT_IMAGE_FILTERS = (IMAGE_FILTER_EDF, IMAGE_FILTER_TIFF, IMAGE_FILTER_NUMPY, IMAGE_FILTER_ASCII, IMAGE_FILTER_CSV_COMMA, IMAGE_FILTER_CSV_SEMICOLON, IMAGE_FILTER_CSV_TAB, IMAGE_FILTER_RGB_PNG, IMAGE_FILTER_NXDATA) SCATTER_FILTER_NXDATA = 'Scatter as NXdata (%s)' % _NEXUS_HDF5_EXT_STR DEFAULT_SCATTER_FILTERS = (SCATTER_FILTER_NXDATA,) # filters for which we don't want an "overwrite existing file" warning DEFAULT_APPEND_FILTERS = (CURVE_FILTER_NXDATA, IMAGE_FILTER_NXDATA, SCATTER_FILTER_NXDATA) def __init__(self, plot, parent=None): self._filters = { 'all': OrderedDict(), 'curve': OrderedDict(), 'curves': OrderedDict(), 'image': OrderedDict(), 'scatter': OrderedDict()} self._appendFilters = list(self.DEFAULT_APPEND_FILTERS) # Initialize filters for nameFilter in self.DEFAULT_ALL_FILTERS: self.setFileFilter( dataKind='all', nameFilter=nameFilter, func=self._saveSnapshot) for nameFilter in self.DEFAULT_CURVE_FILTERS: self.setFileFilter( dataKind='curve', nameFilter=nameFilter, func=self._saveCurve) for nameFilter in self.DEFAULT_ALL_CURVES_FILTERS: self.setFileFilter( dataKind='curves', nameFilter=nameFilter, func=self._saveCurves) for nameFilter in self.DEFAULT_IMAGE_FILTERS: self.setFileFilter( dataKind='image', nameFilter=nameFilter, func=self._saveImage) for nameFilter in self.DEFAULT_SCATTER_FILTERS: self.setFileFilter( dataKind='scatter', nameFilter=nameFilter, func=self._saveScatter) super(SaveAction, self).__init__( plot, icon='document-save', text='Save as...', tooltip='Save curve/image/plot snapshot dialog', triggered=self._actionTriggered, checkable=False, parent=parent) self.setShortcut(qt.QKeySequence.Save) self.setShortcutContext(qt.Qt.WidgetShortcut) @staticmethod def _errorMessage(informativeText='', parent=None): """Display an error message.""" # TODO issue with QMessageBox size fixed and too small msg = qt.QMessageBox(parent) msg.setIcon(qt.QMessageBox.Critical) msg.setInformativeText(informativeText + ' ' + str(sys.exc_info()[1])) msg.setDetailedText(traceback.format_exc()) msg.exec_() def _saveSnapshot(self, plot, filename, nameFilter): """Save a snapshot of the :class:`PlotWindow` widget. :param str filename: The name of the file to write :param str nameFilter: The selected name filter :return: False if format is not supported or save failed, True otherwise. """ if nameFilter == self.SNAPSHOT_FILTER_PNG: fileFormat = 'png' elif nameFilter == self.SNAPSHOT_FILTER_SVG: fileFormat = 'svg' else: # Format not supported _logger.error( 'Saving plot snapshot failed: format not supported') return False plot.saveGraph(filename, fileFormat=fileFormat) return True def _getAxesLabels(self, item): # If curve has no associated label, get the default from the plot xlabel = item.getXLabel() or self.plot.getXAxis().getLabel() ylabel = item.getYLabel() or self.plot.getYAxis().getLabel() return xlabel, ylabel @staticmethod def _selectWriteableOutputGroup(filename, parent): if os.path.exists(filename) and os.path.isfile(filename) \ and os.access(filename, os.W_OK): entryPath = selectOutputGroup(filename) if entryPath is None: _logger.info("Save operation cancelled") return None return entryPath elif not os.path.exists(filename): # create new entry in new file return "/entry" else: SaveAction._errorMessage('Save failed (file access issue)\n', parent=parent) return None def _saveCurveAsNXdata(self, curve, filename): entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot) if entryPath is None: return False xlabel, ylabel = self._getAxesLabels(curve) return save_NXdata( filename, nxentry_name=entryPath, signal=curve.getYData(copy=False), axes=[curve.getXData(copy=False)], signal_name="y", axes_names=["x"], signal_long_name=ylabel, axes_long_names=[xlabel], signal_errors=curve.getYErrorData(copy=False), axes_errors=[curve.getXErrorData(copy=True)], title=self.plot.getGraphTitle()) def _saveCurve(self, plot, filename, nameFilter): """Save a curve from the plot. :param str filename: The name of the file to write :param str nameFilter: The selected name filter :return: False if format is not supported or save failed, True otherwise. """ if nameFilter not in self.DEFAULT_CURVE_FILTERS: return False # Check if a curve is to be saved curve = plot.getActiveCurve() # before calling _saveCurve, if there is no selected curve, we # make sure there is only one curve on the graph if curve is None: curves = plot.getAllCurves() if not curves: self._errorMessage("No curve to be saved", parent=self.plot) return False curve = curves[0] if nameFilter in self.CURVE_FILTERS_TXT: filter_ = self.CURVE_FILTERS_TXT[nameFilter] fmt = filter_['fmt'] csvdelim = filter_['delimiter'] autoheader = filter_['header'] else: # .npy or nxdata fmt, csvdelim, autoheader = ("", "", False) xlabel, ylabel = self._getAxesLabels(curve) if nameFilter == self.CURVE_FILTER_NXDATA: return self._saveCurveAsNXdata(curve, filename) try: save1D(filename, curve.getXData(copy=False), curve.getYData(copy=False), xlabel, [ylabel], fmt=fmt, csvdelim=csvdelim, autoheader=autoheader) except IOError: self._errorMessage('Save failed\n', parent=self.plot) return False return True def _saveCurves(self, plot, filename, nameFilter): """Save all curves from the plot. :param str filename: The name of the file to write :param str nameFilter: The selected name filter :return: False if format is not supported or save failed, True otherwise. """ if nameFilter not in self.DEFAULT_ALL_CURVES_FILTERS: return False curves = plot.getAllCurves() if not curves: self._errorMessage("No curves to be saved", parent=self.plot) return False curve = curves[0] scanno = 1 try: xlabel = curve.getXLabel() or plot.getGraphXLabel() ylabel = curve.getYLabel() or plot.getGraphYLabel(curve.getYAxis()) specfile = savespec(filename, curve.getXData(copy=False), curve.getYData(copy=False), xlabel, ylabel, fmt="%.7g", scan_number=1, mode="w", write_file_header=True, close_file=False) except IOError: self._errorMessage('Save failed\n', parent=self.plot) return False for curve in curves[1:]: try: scanno += 1 xlabel = curve.getXLabel() or plot.getGraphXLabel() ylabel = curve.getYLabel() or plot.getGraphYLabel(curve.getYAxis()) specfile = savespec(specfile, curve.getXData(copy=False), curve.getYData(copy=False), xlabel, ylabel, fmt="%.7g", scan_number=scanno, write_file_header=False, close_file=False) except IOError: self._errorMessage('Save failed\n', parent=self.plot) return False specfile.close() return True def _saveImage(self, plot, filename, nameFilter): """Save an image from the plot. :param str filename: The name of the file to write :param str nameFilter: The selected name filter :return: False if format is not supported or save failed, True otherwise. """ if nameFilter not in self.DEFAULT_IMAGE_FILTERS: return False image = plot.getActiveImage() if image is None: qt.QMessageBox.warning( plot, "No Data", "No image to be saved") return False data = image.getData(copy=False) # TODO Use silx.io for writing files if nameFilter == self.IMAGE_FILTER_EDF: edfFile = EdfFile(filename, access="w+") edfFile.WriteImage({}, data, Append=0) return True elif nameFilter == self.IMAGE_FILTER_TIFF: tiffFile = TiffIO(filename, mode='w') tiffFile.writeImage(data, software='silx') return True elif nameFilter == self.IMAGE_FILTER_NUMPY: try: numpy.save(filename, data) except IOError: self._errorMessage('Save failed\n', parent=self.plot) return False return True elif nameFilter == self.IMAGE_FILTER_NXDATA: entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot) if entryPath is None: return False xorigin, yorigin = image.getOrigin() xscale, yscale = image.getScale() xaxis = xorigin + xscale * numpy.arange(data.shape[1]) yaxis = yorigin + yscale * numpy.arange(data.shape[0]) xlabel, ylabel = self._getAxesLabels(image) interpretation = "image" if len(data.shape) == 2 else "rgba-image" return save_NXdata(filename, nxentry_name=entryPath, signal=data, axes=[yaxis, xaxis], signal_name="image", axes_names=["y", "x"], axes_long_names=[ylabel, xlabel], title=plot.getGraphTitle(), interpretation=interpretation) elif nameFilter in (self.IMAGE_FILTER_ASCII, self.IMAGE_FILTER_CSV_COMMA, self.IMAGE_FILTER_CSV_SEMICOLON, self.IMAGE_FILTER_CSV_TAB): csvdelim, filetype = { self.IMAGE_FILTER_ASCII: (' ', 'txt'), self.IMAGE_FILTER_CSV_COMMA: (',', 'csv'), self.IMAGE_FILTER_CSV_SEMICOLON: (';', 'csv'), self.IMAGE_FILTER_CSV_TAB: ('\t', 'csv'), }[nameFilter] height, width = data.shape rows, cols = numpy.mgrid[0:height, 0:width] try: save1D(filename, rows.ravel(), (cols.ravel(), data.ravel()), filetype=filetype, xlabel='row', ylabels=['column', 'value'], csvdelim=csvdelim, autoheader=True) except IOError: self._errorMessage('Save failed\n', parent=self.plot) return False return True elif nameFilter == self.IMAGE_FILTER_RGB_PNG: # Get displayed image rgbaImage = image.getRgbaImageData(copy=False) # Convert RGB QImage qimage = convertArrayToQImage(rgbaImage[:, :, :3]) if qimage.save(filename, 'PNG'): return True else: _logger.error('Failed to save image as %s', filename) qt.QMessageBox.critical( self.parent(), 'Save image as', 'Failed to save image') return False def _saveScatter(self, plot, filename, nameFilter): """Save an image from the plot. :param str filename: The name of the file to write :param str nameFilter: The selected name filter :return: False if format is not supported or save failed, True otherwise. """ if nameFilter not in self.DEFAULT_SCATTER_FILTERS: return False if nameFilter == self.SCATTER_FILTER_NXDATA: entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot) if entryPath is None: return False scatter = plot.getScatter() x = scatter.getXData(copy=False) y = scatter.getYData(copy=False) z = scatter.getValueData(copy=False) xerror = scatter.getXErrorData(copy=False) if isinstance(xerror, float): xerror = xerror * numpy.ones(x.shape, dtype=numpy.float32) yerror = scatter.getYErrorData(copy=False) if isinstance(yerror, float): yerror = yerror * numpy.ones(x.shape, dtype=numpy.float32) xlabel = plot.getGraphXLabel() ylabel = plot.getGraphYLabel() return save_NXdata( filename, nxentry_name=entryPath, signal=z, axes=[x, y], signal_name="values", axes_names=["x", "y"], axes_long_names=[xlabel, ylabel], axes_errors=[xerror, yerror], title=plot.getGraphTitle()) def setFileFilter(self, dataKind, nameFilter, func, index=None, appendToFile=False): """Set a name filter to add/replace a file format support :param str dataKind: The kind of data for which the provided filter is valid. One of: 'all', 'curve', 'curves', 'image', 'scatter' :param str nameFilter: The name filter in the QFileDialog. See :meth:`QFileDialog.setNameFilters`. :param callable func: The function to call to perform saving. Expected signature is: bool func(PlotWidget plot, str filename, str nameFilter) :param bool appendToFile: True to append the data into the selected file. :param integer index: Index of the filter in the final list (or None) """ assert dataKind in ('all', 'curve', 'curves', 'image', 'scatter') if appendToFile: self._appendFilters.append(nameFilter) # first append or replace the new filter to prevent colissions self._filters[dataKind][nameFilter] = func if index is None: # we are already done return # get the current ordered list of keys keyList = list(self._filters[dataKind].keys()) # deal with negative indices if index < 0: index = len(keyList) + index if index < 0: index = 0 if index >= len(keyList): # nothing to be done, already at the end txt = 'Requested index %d impossible, already at the end' % index _logger.info(txt) return # get the new ordered list oldIndex = keyList.index(nameFilter) del keyList[oldIndex] keyList.insert(index, nameFilter) # build the new filters newFilters = OrderedDict() for key in keyList: newFilters[key] = self._filters[dataKind][key] # and update the filters self._filters[dataKind] = newFilters return def getFileFilters(self, dataKind): """Returns the nameFilter and associated function for a kind of data. :param str dataKind: The kind of data for which the provided filter is valid. On of: 'all', 'curve', 'curves', 'image', 'scatter' :return: {nameFilter: function} associations. :rtype: collections.OrderedDict """ assert dataKind in ('all', 'curve', 'curves', 'image', 'scatter') return self._filters[dataKind].copy() def _actionTriggered(self, checked=False): """Handle save action.""" # Set-up filters filters = OrderedDict() # Add image filters if there is an active image if self.plot.getActiveImage() is not None: filters.update(self._filters['image'].items()) # Add curve filters if there is a curve to save if (self.plot.getActiveCurve() is not None or len(self.plot.getAllCurves()) == 1): filters.update(self._filters['curve'].items()) if len(self.plot.getAllCurves()) >= 1: filters.update(self._filters['curves'].items()) # Add scatter filters if there is a scatter # todo: CSV if self.plot.getScatter() is not None: filters.update(self._filters['scatter'].items()) filters.update(self._filters['all'].items()) # Create and run File dialog dialog = qt.QFileDialog(self.plot) dialog.setOption(dialog.DontUseNativeDialog) dialog.setWindowTitle("Output File Selection") dialog.setModal(1) dialog.setNameFilters(list(filters.keys())) dialog.setFileMode(dialog.AnyFile) dialog.setAcceptMode(dialog.AcceptSave) def onFilterSelection(filt_): # disable overwrite confirmation for NXdata types, # because we append the data to existing files if filt_ in self._appendFilters: dialog.setOption(dialog.DontConfirmOverwrite) else: dialog.setOption(dialog.DontConfirmOverwrite, False) dialog.filterSelected.connect(onFilterSelection) if not dialog.exec_(): return False nameFilter = dialog.selectedNameFilter() filename = dialog.selectedFiles()[0] dialog.close() if '(' in nameFilter and ')' == nameFilter.strip()[-1]: # Check for correct file extension # Extract file extensions as .something extensions = [ext[ext.find('.'):] for ext in nameFilter[nameFilter.find('(')+1:-1].split()] for ext in extensions: if (len(filename) > len(ext) and filename[-len(ext):].lower() == ext.lower()): break else: # filename has no extension supported in nameFilter, add one if len(extensions) >= 1: filename += extensions[0] # Handle save func = filters.get(nameFilter, None) if func is not None: return func(self.plot, filename, nameFilter) else: _logger.error('Unsupported file filter: %s', nameFilter) return False def _plotAsPNG(plot): """Save a :class:`Plot` as PNG and return the payload. :param plot: The :class:`Plot` to save """ pngFile = BytesIO() plot.saveGraph(pngFile, fileFormat='png') pngFile.flush() pngFile.seek(0) data = pngFile.read() pngFile.close() return data class PrintAction(PlotAction): """QAction for printing the plot. It opens a Print dialog. Current implementation print a bitmap of the plot area and not vector graphics, so printing quality is not great. :param plot: :class:`.PlotWidget` instance on which to operate. :param parent: See :class:`QAction`. """ def __init__(self, plot, parent=None): super(PrintAction, self).__init__( plot, icon='document-print', text='Print...', tooltip='Open print dialog', triggered=self.printPlot, checkable=False, parent=parent) self.setShortcut(qt.QKeySequence.Print) self.setShortcutContext(qt.Qt.WidgetShortcut) def getPrinter(self): """The QPrinter instance used by the PrintAction. :rtype: QPrinter """ return printer.getDefaultPrinter() @property @deprecated(replacement="getPrinter()", since_version="0.8.0") def printer(self): return self.getPrinter() def printPlotAsWidget(self): """Open the print dialog and print the plot. Use :meth:`QWidget.render` to print the plot :return: True if successful """ dialog = qt.QPrintDialog(self.getPrinter(), self.plot) dialog.setWindowTitle('Print Plot') if not dialog.exec_(): return False # Print a snapshot of the plot widget at the top of the page widget = self.plot.centralWidget() painter = qt.QPainter() if not painter.begin(self.getPrinter()): return False pageRect = self.getPrinter().pageRect() xScale = pageRect.width() / widget.width() yScale = pageRect.height() / widget.height() scale = min(xScale, yScale) painter.translate(pageRect.width() / 2., 0.) painter.scale(scale, scale) painter.translate(-widget.width() / 2., 0.) widget.render(painter) painter.end() return True def printPlot(self): """Open the print dialog and print the plot. Use :meth:`Plot.saveGraph` to print the plot. :return: True if successful """ # Init printer and start printer dialog dialog = qt.QPrintDialog(self.getPrinter(), self.plot) dialog.setWindowTitle('Print Plot') if not dialog.exec_(): return False # Save Plot as PNG and make a pixmap from it with default dpi pngData = _plotAsPNG(self.plot) pixmap = qt.QPixmap() pixmap.loadFromData(pngData, 'png') xScale = self.getPrinter().pageRect().width() / pixmap.width() yScale = self.getPrinter().pageRect().height() / pixmap.height() scale = min(xScale, yScale) # Draw pixmap with painter painter = qt.QPainter() if not painter.begin(self.getPrinter()): return False painter.drawPixmap(0, 0, pixmap.width() * scale, pixmap.height() * scale, pixmap) painter.end() return True class CopyAction(PlotAction): """QAction to copy :class:`.PlotWidget` content to clipboard. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(CopyAction, self).__init__( plot, icon='edit-copy', text='Copy plot', tooltip='Copy a snapshot of the plot into the clipboard', triggered=self.copyPlot, checkable=False, parent=parent) self.setShortcut(qt.QKeySequence.Copy) self.setShortcutContext(qt.Qt.WidgetShortcut) def copyPlot(self): """Copy plot content to the clipboard as a bitmap.""" # Save Plot as PNG and make a QImage from it with default dpi pngData = _plotAsPNG(self.plot) image = qt.QImage.fromData(pngData, 'png') qt.QApplication.clipboard().setImage(image)
def leiaint(msg): ok=False while True: n=str(input(msg)) if n.isnumeric(): valor=int ok=True else: print('\033[0;31mErro! Digite um número inteiro valido:\033[m ') if ok: break return valor n=leiaint('Digite um número: ') print(f'Você acabou de digitar o número {n}')
"""middlewares used with python social auth.""" from django.conf import settings from cms.models import Page from cms.utils import get_language_from_request from social_django.middleware import ( # isort:skip SocialAuthExceptionMiddleware as BaseSocialAuthExceptionMiddleware, ) SOCIAL_ERROR_REVERSE_ID = getattr(settings, "SOCIAL_ERROR_REVERSE_ID", None) class SocialAuthExceptionMiddleware(BaseSocialAuthExceptionMiddleware): """Middleware extending social-auth middleware, overriding get_redirect_uri.""" def get_redirect_uri(self, request, exception): """ Check if an error page exists and returns its url. Fallback on parent function otherwise. """ try: page = Page.objects.get(reverse_id=SOCIAL_ERROR_REVERSE_ID) except Page.DoesNotExist: return super.get_redirect_uri(request, exception) else: return page.get_absolute_url(language=get_language_from_request(request))
from django.apps import AppConfig class HqqReportConfig(AppConfig): name = 'hqq_report'
""" [2015-04-24] Challenge #211 [Hard] Hungry puppies https://www.reddit.com/r/dailyprogrammer/comments/33ow0c/20150424_challenge_211_hard_hungry_puppies/ #Description Annie has a whole bunch of puppies. They're lovable but also very rambunctious. One day, spur of the moment, Annie decides to get them all treats. She is looking forward to how happy they will all be, and getting ready to serve them the treats, when she realizes: the treats are not all the same size! This is disastrous! The puppies, knowing the drill, have already lined themselves up in a neat line to receive their treats, so Annie must figure out how to best distribute the unevenly-sized treats so as to make as many puppies happy as possible. The puppies' jealous reactions to uneven treat distribution is straightforward: - If a puppy receives a bigger treat than both its neighbors do, it is happy (+1 happiness). - If a puppy receives a smaller treat than both its neighbors do, it is sad (-1 happiness). - If a puppy does not fit in either of the above categories, it is merely content. This means any puppy with at least one neighbor with the same size treat, or any puppy with one neighbor with a bigger treat and one with a smaller treat. Note that the puppies on either end of the line only have a single neighbor to look at, so in their case their mood depends on whether that single neighbor received a bigger, smaller, or equal treat. Write a program for Annie to recommend a treat distribution that maximizes puppy happiness. #Formal inputs &amp; outputs #Input The input is a single line of positive integers representing the sizes of the treats Annie purchased. For example: 1 1 1 1 1 2 2 3 Assume there are as many puppies as there are treats. In this case, there are 8 puppies to be served 8 treats of 3 different sizes. #Output The output must provide two facts. First, it must display what the maximum achievable happiness is, as a single integer on its own line 3 Then, it must specify a treat ordering that achieves this number. 2 1 1 2 1 1 1 3 The puppies on either end of the queue get bigger treats than their sole neighbors, so they are happy. The one in the middle receives a bigger treat than both its neighbors, so it as well is happy. No puppy received a treat that is smaller than both its neighbors', so no puppies are unhappy. Thus, 3 happy puppies minus 0 unhappy puppies results in 3 happiness. Pictorally: 2 1 1 2 1 1 1 3 :) :| :| :) :| :| :| :) An example of a bad solution would be: 1 2 2 1 1 1 3 1 The puppies on either end of the line are sad, since their only neighbors have bigger treats, while there is a single happy puppy (the one with the size 3 treat), since it was the only one that had a treat bigger than its neighbors'. This results in a sub-optimal score of -1. Again, pictorally: 1 2 2 1 1 1 3 1 :( :| :| :| :| :| :) :( Note that it may be possible for there to be several different orderings of the treats that give the maximum happiness. As long as you print out one of them, it doesn't matter *which* one. #Example inputs and outputs ##Input 1: 1 2 2 3 3 3 4 ##Output 1 2 3 1 3 2 2 3 4 ##Input 2: 1 1 2 3 3 3 3 4 5 5 ##Output 2: 4 5 3 3 5 3 3 4 1 1 2 #Challenge inputs ##Challenge input 1 1 1 2 3 3 3 3 4 5 5 ##Challenge input 2 1 1 2 2 3 4 4 5 5 5 6 6 #Bonus 1 1 2 2 2 2 2 2 3 4 4 4 5 5 5 6 6 6 7 7 8 8 9 9 9 9 9 9 9 9 #Finally This lovely little problem was submitted by /u/Blackshell to /r/dailyprogrammer_ideas, and for his hard work, he has been rewarded with with a gold medal! That means he's a pretty cool dude! Do you want to be as cool as /u/Blackshell? Head on over to /r/dailyprogrammer_ideas, and add a suggestion for a challenge! """ def main(): pass if __name__ == "__main__": main()
"""Protocol definitions.""" # http://gearman.org/protocol/ import struct REQ = b'\x00REQ' RES = b'\x00RES' CAN_DO = 1 CANT_DO = 2 RESET_ABILITIES = 3 PRE_SLEEP = 4 # unused NOOP = 6 SUBMIT_JOB = 7 JOB_CREATED = 8 GRAB_JOB = 9 NO_JOB = 10 JOB_ASSIGN = 11 WORK_STATUS = 12 WORK_COMPLETE = 13 WORK_FAIL = 14 GET_STATUS = 15 ECHO_REQ = 16 ECHO_RES = 17 SUBMIT_JOB_BG = 18 ERROR = 19 STATUS_RES = 20 SUBMIT_JOB_HIGH = 21 SET_CLIENT_ID = 22 CAN_DO_TIMEOUT = 23 ALL_YOURS = 24 WORK_EXCEPTION = 25 OPTION_REQ = 26 OPTION_RES = 27 WORK_DATA = 28 WORK_WARNING = 29 GRAB_JOB_UNIQ = 30 JOB_ASSIGN_UNIQ = 31 SUBMIT_JOB_HIGH_BG = 32 SUBMIT_JOB_LOW = 33 SUBMIT_JOB_LOW_BG = 34 SUBMIT_JOB_SCHED = 35 SUBMIT_JOB_EPOCH = 36 # Magic, type, data size PKT_FMT = ">III" HEADER_LEN = struct.calcsize(PKT_FMT) NULL = b'\00'
from django.shortcuts import render from .models import Image, Location, Category # Create your views here. def home(request): dispimages = Image.objects.all().order_by('id') dispBylocation = Location.objects.all() dispBycategory = Category.objects.all() return render(request, "index.html", {'images': dispimages, 'locations': dispBylocation, 'categories': dispBycategory}) def image(request, image_id): modimage = Image.objects.get(id=image_id) modtitle = image return render(request, 'index.html', {'image': modimage, 'title': modtitle}) def search(request): if 'search' in request.GET and request.GET["search"]: # search by lowercase searched_term = request.GET.get("search").lower() searched_images = Image.filter_by_category(searched_term) message = f"{searched_term}" locations = Location.objects.all() return render(request, 'result.html', {"message": message, "images": searched_images, 'locations': locations}) else: locations = Location.objects.all() message = "Sorry we have found '0' search result" return render(request, 'result.html', {"message": message, 'locations': locations})
"""Tox envlist manipulation.""" import os try: from tox.reporter import warning except ImportError: warning = lambda s: None from tox.config import _split_env as split_env from tox.config import ParseIni, SectionReader, testenvprefix BEFORE = 1 AFTER = 2 class _ParseIniWrapper(ParseIni): def __init__(self, config, ini): self.config = config self._cfg = ini def _filter_factors(name, factors): name_items = set(split_env(name)) new_factors = [] for factor in factors: if factor not in name_items: name_items.add(factor) new_factors.append(factor) return new_factors def AFTER(name, factors): factors = _filter_factors(name, factors) return name + "-" + "-".join(factors) def BEFORE(name, factors): factors = _filter_factors(name, factors) return "-".join(factors) + "-" + name def _add_to_each(s, factors, position=AFTER): items = split_env(s) items = [position(item, factors) for item in items] return ",".join(items) def add_factors(config, factors, position=AFTER): ini = config._cfg config_ini = _ParseIniWrapper(config, ini) for envname, envconfig in list(config.envconfigs.items()): newname = position(envname, factors) env_factors = envconfig.factors.copy() for factor in factors: env_factors.add(factor) reader = SectionReader( envname, ini, fallbacksections=["testenv"], factors=env_factors ) reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir) reader.addsubstitutions(toxworkdir=config.toxworkdir) reader.addsubstitutions(distdir=config.distdir) reader.addsubstitutions(distshare=config.distshare) if hasattr(config, "temp_dir"): reader.addsubstitutions(temp_dir=config.temp_dir) section = "{}{}".format(testenvprefix, envname) newenv = config_ini.make_envconfig( newname, section, reader._subs, config, replace=True ) config.envconfigs[newname] = newenv # envlist if "TOXENV" in os.environ: os.environ["TOXENV"] = _add_to_each(os.environ["TOXENV"], factors, position) if config.option.env: config.option.env = [ _add_to_each(item, factors, position) for item in config.option.env ] config.envlist = [position(item, factors) for item in config.envlist] if hasattr(config, "envlist_default"): config.envlist_default = [ position(item, factors) for item in config.envlist_default ]
#!/usr/bin/env python3 """Merge and process CESAR output files. After the CESAR part there would appear numerous files. This script applies parse_cesar_bdb function to each file and then does the following: 1) Bed annotation track for query. 2) Nucleotide and protein fasta files. 3) Saves exons metadata into a tsv file. 4) Saves a list of problematic projections. """ import sys import argparse import os from collections import defaultdict try: from modules.parse_cesar_output import classify_exon from modules.common import eprint from modules.common import die from modules.common import split_proj_name except ImportError: from parse_cesar_output import classify_exon from common import eprint from common import die from common import split_proj_name __author__ = "Bogdan Kirilenko, 2020." __version__ = "1.0" __email__ = "[email protected]" __credits__ = ["Michael Hiller", "Virag Sharma", "David Jebb"] # constants MAX_SCORE = 1000 MAX_COLOR = 255 PID_HQ_THR = 65 BLOSUM_HQ_THR = 35 Q_HEADER_FIELDS_NUM = 12 BLACK = "0,0,0" DEFAULT_SCORE = 1000 FRAGM_ID = -1 FRAGM_ID_TEXT = "FRAGMENT" CHROM_NONE = "None" # header for exons meta data file META_HEADER = "\t".join( "gene exon_num chain_id act_region exp_region" " in_exp pid blosum gap class paralog q_mark".split() ) def parse_args(): """Read args, check.""" app = argparse.ArgumentParser() app.add_argument("input_dir", help="Directory containing output BDB files") app.add_argument("output_bed", help="Save pre_final bed12 file to...") app.add_argument("output_fasta", help="Save fasta fasta to...") app.add_argument("meta_data", help="Save exons metadata to...") app.add_argument("prot_fasta", help="Save protein fasta to...") app.add_argument("codon_fasta", help="Save codon alignment fasta to...") app.add_argument("skipped", help="Save skipped genes") app.add_argument("--output_trash", default=None, help="Save deleted exons") app.add_argument( "--fragm_data", default=None, help="For each bed fragment save range of included exons", ) app.add_argument( "--exclude", default=None, help="File containing a list of transcripts to exclude", ) # print help if there are no args if len(sys.argv) < 2: app.print_help() sys.exit(0) args = app.parse_args() # print help if there are no args return args def read_fasta(fasta_line, v=False): """Read fasta, return dict and type.""" fasta_data = fasta_line.split(">") eprint(f"fasta_data[0] is:\n{fasta_data[0]}") if v else None eprint(f"fasta_data[1] is:\n{fasta_data[1]}") if v else None if fasta_data[0] != "": # this is a bug eprint("ERROR! Cesar output is corrupted") # eprint(f"Issue detected in the following string:\n{fasta_line}") eprint(f"fasta_data[0]: {fasta_data[0]}") die("Abort") del fasta_data[0] # remove it "" we don't need that sequences = {} # accumulate data here order = [] # to have ordered list # there is no guarantee that dict will contain elements in the # same order as they were added for elem in fasta_data: raw_lines = elem.split("\n") # it must be first ['capHir1', 'ATGCCGCGCCAATTCCCCAAGCTGA... ] header = raw_lines[0] # separate nucleotide-containing lines lines = [x for x in raw_lines[1:] if x != "" and not x.startswith("!")] if len(lines) == 0: # it is a mistake - empty sequence --> get rid of continue fasta_content = "".join(lines) sequences[header] = fasta_content order.append(header) return sequences, order def read_region(region): """Return convenient region representation.""" try: chrom, grange = region.split(":") start = int(grange.split("-")[0]) end = int(grange.split("-")[1]) return {"chrom": chrom, "start": start, "end": end} except ValueError: return {"chrom": "N", "start": 1, "end": 1} def split_ex_reg_in_chrom__direction(exon_list, curr_key): """Split_ex_reg_in_chrom function helper. Fill result dict for a direction.""" ret = defaultdict(list) ret[curr_key].append(exon_list[0]) pieces_num = len(exon_list) chrom, chrom_n = curr_key for i in range(1, pieces_num): prev = exon_list[i - 1] curr = exon_list[i] if prev[1] < curr[1]: # this is fine, the same order pass else: # initiate new bucket chrom_n += 1 curr_key = (chrom, chrom_n) ret[curr_key].append(curr) return ret, curr_key def split_ex_reg_in_chrom(exon_regions): """For fragmented genes split exons in different buckets. Use chromosome and ordering. """ # first: split to chrom: regions dict chrom_to_pieces = defaultdict(list) for ex_num, ex_reg in exon_regions.items(): chrom = ex_reg["chrom"] start = ex_reg["start"] end = ex_reg["end"] piece = (ex_num, start, end) chrom_to_pieces[chrom].append(piece) chrom_n_to_pieces = defaultdict(list) # second: fix wrong scaffold assemblies # a toy example, let's say we have (exon_num, start_pos) # (1, 1000), (2, 1500), (3, 4000), (5, 100), (6, 500) # exons 5 and 6 start earlier than exon 1 # -> [1, 2, 3] and [5, 6] go to different buckets for chrom, pieces in chrom_to_pieces.items(): chrom_n = 1 curr_key = (chrom, chrom_n) if len(pieces) == 1: chrom_n_to_pieces[curr_key] = pieces continue direct_pieces = sorted([x for x in pieces if x[1] < x[2]], key=lambda x: x[0]) revert_pieces = sorted( [x for x in pieces if x[1] > x[2]], key=lambda x: x[0], reverse=True ) both_dirs = direct_pieces and revert_pieces if direct_pieces: dir_dct, curr_key = split_ex_reg_in_chrom__direction( direct_pieces, curr_key ) chrom_n_to_pieces.update(dir_dct) if revert_pieces: if both_dirs: # need to add another sequence chrom_n = curr_key[1] chrom_n += 1 curr_key = (chrom, chrom_n) rev_dct, curr_key = split_ex_reg_in_chrom__direction( revert_pieces, curr_key ) chrom_n_to_pieces.update(rev_dct) return chrom_n_to_pieces def parse_cesar_bdb(arg_input, v=False, exclude_arg=None): """Parse CESAR bdb file core function.""" in_ = open(arg_input, "r") # read cesar bdb file # two \n\n divide each unit of information content = [x for x in in_.read().split("#") if x] in_.close() # GLP-related data is already filtered out by cesar_runner # get set of excluded genes exclude = set() if exclude_arg is None else exclude_arg # initiate collectors bed_lines = [] # save bed lines here skipped = [] # save skipper projections here pred_seq_chain = {} # for nucleotide sequences to fasta t_exon_seqs = defaultdict(dict) # reference exon sequences wrong_exons = [] # exons that are predicted but actually deleted/missing all_meta_data = [META_HEADER] # to collect exons meta data prot_data = [] # protein sequences codon_data = [] # codon sequences bed_track_and_exon_nums = [] # for fragments: keep list of saved exons for elem in content: # one elem - one CESAR call (one ref transcript and >=1 chains) elem_lines = [x for x in elem.split("\n") if x != ""] # now loop gene-by-gene gene = elem_lines[0].replace("#", "") if gene in exclude: skipped.append(f"{gene}\tfound in the exclude list") continue eprint(f"Reading gene {gene}") if v else None cesar_out = "\n".join(elem_lines[1:]) # basically this is a fasta file with headers # saturated with different information sequences, order = read_fasta(cesar_out, v=v) # initiate dicts to fill later ranges_chain, chain_dir = defaultdict(dict), {} pred_seq_chain[gene] = defaultdict(dict) # split fasta headers in different classes # query, ref and prot sequence headers are explicitly marked query_headers = [h for h in order if h.endswith("query_exon")] ref_headers = [h for h in order if h.endswith("reference_exon")] prot_ids = [h for h in order if "PROT" in h] codon_ids = [h for h in order if "CODON" in h] # parse reference exons, quite simple for header in ref_headers: # one header for one exon # fields look like this: # FIELD_1 | FIELD_2 | FIELD_3\n header_fields = [s.replace(" ", "") for s in header.split("|")] exon_num = int(header_fields[1]) # 0-based! exon_seq = sequences[header].replace( "-", "" ) # header is also a key for seq dict t_exon_seqs[gene][exon_num] = exon_seq # save protein data for prot_id in prot_ids: prot_seq = sequences[prot_id] prot_line = f">{prot_id}\n{prot_seq}\n" prot_data.append(prot_line) # save codon alignment data for codon_id in codon_ids: codon_seq = sequences[codon_id] codon_line_line = f">{codon_id}\n{codon_seq}\n" codon_data.append(codon_line_line) # get gene: exons dict to trace deleted exons gene_chain_exon_status = defaultdict(dict) # parse query headers for header in query_headers: # the most complicatd part: here we extract not only the # nucleotide sequence but also coordinates and other features header_fields = [s.replace(" ", "") for s in header.split("|")] if len(header_fields) != Q_HEADER_FIELDS_NUM: continue # ref exon? # extract metadata, parse query header trans = header_fields[0] exon_num = int(header_fields[1]) chain_id = int(header_fields[2]) exon_region = read_region(header_fields[3]) pid = float(header_fields[4]) # nucleotide %ID blosum = float(header_fields[5]) is_gap = header_fields[6] # asm gap in the expected region exon_class = header_fields[7] # how it aligns to chain exp_region_str = header_fields[8] # expected region in_exp = header_fields[9] # detected in the expected region or not in_exp_b = True if in_exp == "INC" else False # mark that it's paralogous projection: para_annot = True if header_fields[10] == "True" else False stat_key = (trans, chain_id) # projection ID # classify exon, check whether it's deleted/missing exon_decision, q_mark = classify_exon(exon_class, in_exp_b, pid, blosum) # TODO: if region starts with NONE: it also must be deleted if exon_decision is False: # exon is deleted/missing wrong_exons.append(header) # save this data gene_chain_exon_status[stat_key][exon_num] = False else: # exon is not deleted # get/write necessary info gene_chain_exon_status[stat_key][exon_num] = True chain_dir[chain_id] = exon_region["end"] > exon_region["start"] ranges_chain[chain_id][exon_num] = exon_region pred_seq_chain[gene][chain_id][exon_num] = sequences[header] # collect exon meta-data -> write to file later meta_data = "\t".join( [ gene, header_fields[1], header_fields[2], header_fields[3], exp_region_str, in_exp, header_fields[4], header_fields[5], is_gap, exon_class, str(para_annot), q_mark, ] ) all_meta_data.append(meta_data) # check if there are any exons for name, stat in gene_chain_exon_status.items(): any_exons_left = any(stat.values()) if any_exons_left: continue # projection has no exons: log it name_ = f"{name[0]}.{name[1]}" skipped.append(f"{name_}\tall exons are deleted.") # make bed tracks # bit different recipes for fragmented and normal projections if FRAGM_ID in chain_dir: # extract bed file from fragmented gene name = f"{gene}.{FRAGM_ID}" exon_regions = ranges_chain[FRAGM_ID] # exon_regions is a dict exon_num: region_dict # region dict has keys: chrom, start, end # this transcript is split over different chroms/scaffolds # let's look what scaffolds we have, and make a bed file for each chrom_to_pieces = split_ex_reg_in_chrom(exon_regions) for chrom_, pieces in chrom_to_pieces.items(): chrom, _ = chrom_ # same chrom might appear twice block_starts = [] block_sizes = [] # create a bed track for exons on this scaffold # get strand on this particular scaffold of course direct = pieces[0][1] < pieces[0][2] exon_nums_not_sort = [p[0] for p in pieces] exon_nums = ( sorted(exon_nums_not_sort) if direct else sorted(exon_nums_not_sort, reverse=True) ) # chrom_start = exon_regions[exon_nums[0]]["start"] if direct else exon_regions[exon_nums[0]]["end"] # chrom_end = exon_regions[exon_nums[-1]]["end"] if direct else exon_regions[exon_nums[-1]]["start"] regions_here = [exon_regions[i] for i in exon_nums] all_points = [r["start"] for r in regions_here] + [ r["end"] for r in regions_here ] chrom_start = min(all_points) chrom_end = max(all_points) # we do not predict UTRs: thickStart/End = chrom_start/End thickStart = chrom_start thick_end = chrom_end strand = "+" if direct else "-" block_count = len(pieces) # need to convert to "block starts" \ "block sizes" format for exon_num in exon_nums: ex_range = exon_regions[exon_num] block_sizes.append(abs(ex_range["end"] - ex_range["start"])) blockStart = ( ex_range["start"] - chrom_start if direct else ex_range["end"] - chrom_start ) block_starts.append(blockStart) # need this as strings to save it in a text file block_starts_str = ",".join(map(str, block_starts)) + "," block_sizes_str = ",".join(map(str, block_sizes)) + "," # join in a bed line bed_list = map( str, [ chrom, chrom_start, chrom_end, name, DEFAULT_SCORE, strand, thickStart, thick_end, BLACK, block_count, block_sizes_str, block_starts_str, ], ) bed_line = "\t".join(bed_list) bed_lines.append(bed_line) exon_nums_one_based_all = [x + 1 for x in exon_nums] exon_num_first = min(exon_nums_one_based_all) exon_num_last = max(exon_nums_one_based_all) exons_range = f"{exon_num_first}-{exon_num_last}" bed_track_to_exons_lst = map( str, [chrom, chrom_start, chrom_end, name, exons_range] ) bed_track_to_exons = "\t".join(bed_track_to_exons_lst) bed_track_and_exon_nums.append(bed_track_to_exons) # ordinary branch: one chain --> one projection for chain_id in chain_dir.keys(): if chain_id == FRAGM_ID: # chain_id = -1 means its' a assembled from fragments continue # go projection-by-projection: fixed gene, loop over chains block_starts = [] block_sizes = [] ranges = { k: v for k, v in ranges_chain[chain_id].items() if v["chrom"] != CHROM_NONE } name = f"{gene}.{chain_id}" # projection name for bed file if len(ranges) == 0: # this projection is completely missing skipped.append(f"{name}\tall exons are deleted.") continue direct = chain_dir[chain_id] exon_nums = ( sorted(ranges.keys()) if direct else sorted(ranges.keys(), reverse=True) ) # get basic coordinates chrom = ranges[exon_nums[0]]["chrom"] chrom_start = ( ranges[exon_nums[0]]["start"] if direct else ranges[exon_nums[0]]["end"] ) chrom_end = ( ranges[exon_nums[-1]]["end"] if direct else ranges[exon_nums[-1]]["start"] ) # we do not predict UTRs: thickStart/End = chrom_start/End thickStart = chrom_start thick_end = chrom_end strand = "+" if direct else "-" block_count = len(exon_nums) # need to convert to "block starts" \ "block sizes" format for exon_num in exon_nums: ex_range = ranges[exon_num] block_sizes.append(abs(ex_range["end"] - ex_range["start"])) blockStart = ( ex_range["start"] - chrom_start if direct else ex_range["end"] - chrom_start ) block_starts.append(blockStart) # need this as strings to save it in a text file block_starts_str = ",".join(map(str, block_starts)) + "," block_sizes_str = ",".join(map(str, block_sizes)) + "," # join in a bed line bed_list = map( str, [ chrom, chrom_start, chrom_end, name, DEFAULT_SCORE, strand, thickStart, thick_end, BLACK, block_count, block_sizes_str, block_starts_str, ], ) bed_line = "\t".join(bed_list) bed_lines.append(bed_line) # arrange fasta content fasta_lines_lst = [] for gene, chain_exon_seq in pred_seq_chain.items(): # write target gene info t_gene_seq_dct = t_exon_seqs.get(gene) if t_gene_seq_dct is None: # no sequence data for this transcript? eprint(f"Warning! Missing data for {gene}") skipped.append(f"{gene}\tmissing data after cesar stage") continue # We have sequence fragments split between different exons t_exon_nums = sorted(t_gene_seq_dct.keys()) t_header = ">ref_{0}\n".format(gene) t_seq = "".join([t_gene_seq_dct[num] for num in t_exon_nums]) + "\n" # append data to fasta strings fasta_lines_lst.append(t_header) fasta_lines_lst.append(t_seq) # and query info for chain_id, exon_seq in chain_exon_seq.items(): track_header = ">{0}.{1}\n".format(gene, chain_id) exon_nums = sorted(exon_seq.keys()) # also need to assemble different exon sequences seq = "".join([exon_seq[num] for num in exon_nums]) + "\n" fasta_lines_lst.append(track_header) fasta_lines_lst.append(seq) # save corrupted exons as bed-6 track # to make it possible to save them and visualize in the browser trash_exons = [] for elem in wrong_exons: elem_fields = [s.replace(" ", "") for s in elem.split("|")] # need to fill the following: # chrom, start, end, name, score, strand gene_name = elem_fields[0] exon_num = elem_fields[1] chain_id = elem_fields[2] label = ".".join([gene_name, exon_num, chain_id]) grange = elem_fields[3].split(":") try: chrom, (start, end) = grange[0], grange[1].split("-") except ValueError: # wrongly mapped exon continue strand = "+" score = str(int(float(elem_fields[4]) * 10)) bed_6 = "\t".join([chrom, start, end, label, score, strand]) + "\n" trash_exons.append(bed_6) # join output strings meta_str = "\n".join(all_meta_data) + "\n" skipped_str = "\n".join(skipped) + "\n" prot_fasta = "".join(prot_data) codon_fasta = "".join(codon_data) fasta_lines = "".join(fasta_lines_lst) fragm_bed_exons_str = "\n".join(bed_track_and_exon_nums) + "\n" ret = ( bed_lines, trash_exons, fasta_lines, meta_str, prot_fasta, codon_fasta, skipped_str, fragm_bed_exons_str, ) return ret def get_excluded_genes(exc_arg): """Load set of transcripts to be excluded.""" if exc_arg: f = open(exc_arg, "r") exclude = set(x.rstrip() for x in f) f.close() return exclude else: return set() def merge_cesar_output( input_dir, output_bed, output_fasta, meta_data_arg, skipped_arg, prot_arg, codon_arg, output_trash, fragm_data=None, exclude=None, ): """Merge multiple CESAR output files.""" # check that input dir is correct die(f"Error! {input_dir} is not a dir!") if not os.path.isdir(input_dir) else None # get list of bdb files (output of CESAR part) bdbs = [x for x in os.listdir(input_dir) if x.endswith(".txt")] # get list of excluded transcripts excluded_genes = get_excluded_genes(exclude) # initiate lists for different types of output: bed_summary = [] fasta_summary = [] trash_summary = [] meta_summary = [] prot_summary = [] codon_summary = [] skipped = [] fragm_genes_summary = [] crashed_status = [] task_size = len(bdbs) # extract data for all the files for num, bdb_file in enumerate(bdbs): # parse bdb files one by one bdb_path = os.path.join(input_dir, bdb_file) # check whether this file exists if not os.path.isfile(bdb_path): stat = (bdb_path, "file doesn't exist!") crashed_status.append(stat) continue # and check that this file has size > 0 elif os.stat(bdb_path).st_size == 0: # stat = (bdb_path, "file is empty!") # crashed_status.append(stat) # ok, no output: if something crashed, we will find out continue try: # try to parse data parsed_data = parse_cesar_bdb(bdb_path, exclude_arg=excluded_genes) except AssertionError: # if this happened: some assertion was violated # probably CESAR output data is corrupted parsed_data = (None,) sys.exit(f"Error! Failed reading file {bdb_file}") # unpack parsed data tuple: bed_lines = parsed_data[0] trash_exons = parsed_data[1] fasta_lines = parsed_data[2] meta_data = parsed_data[3] prot_fasta = parsed_data[4] codon_fasta = parsed_data[5] skip = parsed_data[6] fragm_bed_exons = parsed_data[7] # if len(bed_lines) == 0: # # actually should not happen, but can # eprint(f"Warning! Cannot extract bed from {bdb_file}") # stat = (bdb_path, "Could not extract bed track") # crashed_status.append(stat) # continue # it is empty # append data to lists bed_summary.append("\n".join(bed_lines) + "\n") fasta_summary.append(fasta_lines) trash_summary.append("".join(trash_exons)) meta_summary.append(meta_data) skipped.append(skip) prot_summary.append(prot_fasta) codon_summary.append(codon_fasta) fragm_genes_summary.append(fragm_bed_exons) eprint(f"Reading file {num + 1}/{task_size}", end="\r") # save output eprint("Saving the output") if len(bed_summary) == 0: # if so, no need to continue eprint("! merge_cesar_output.py:") die("No projections found! Abort.") # save bed, fasta and the rest with open(output_bed, "w") as f: f.write("".join(bed_summary)) with open(output_fasta, "w") as f: f.write("".join(fasta_summary)) with open(meta_data_arg, "w") as f: f.write("\n".join(meta_summary)) with open(skipped_arg, "w") as f: f.write("\n".join(skipped)) with open(prot_arg, "w") as f: f.write("\n".join(prot_summary)) with open(codon_arg, "w") as f: f.write("\n".join(codon_summary)) if output_trash: # if requested: provide trash annotation f = open(output_trash, "w") f.write("".join(trash_summary)) f.close() if fragm_data: # if requested: provide trash annotation f = open(fragm_data, "w") f.write("".join(fragm_genes_summary)) f.close() return crashed_status def main(): """Entry point.""" args = parse_args() merge_cesar_output( args.input_dir, args.output_bed, args.output_fasta, args.meta_data, args.skipped, args.prot_fasta, args.codon_fasta, args.output_trash, fragm_data=args.fragm_data, exclude=args.exclude, ) if __name__ == "__main__": main()
#!/usr/bin/env python3 # Copyright (C) 2017-2019 The btclib developers # # This file is part of btclib. It is subject to the license terms in the # LICENSE file found in the top-level directory of this distribution. # # No part of btclib including this file, may be copied, modified, propagated, # or distributed except according to the terms contained in the LICENSE file. import unittest import os import json from btclib import bip32 from btclib import electrum class TestMnemonicDictionaries(unittest.TestCase): def test_mnemonic(self): lang = "en" entropy = 0x110aaaa03974d093eda670121023cd0772 eversion = 'standard' mnemonic = electrum.mnemonic_from_entropy(entropy, lang, eversion) entr = int(electrum.entropy_from_mnemonic(mnemonic, lang), 2) self.assertLess(entr-entropy, 0xfff) passphrase = '' xversion = b'\x04\x88\xAD\xE4' rootxprv = electrum.rootxprv_from_mnemonic(mnemonic, passphrase, xversion) # TODO: compare with the rootxprv generated by electrum rootxprv2 = electrum.rootxprv_from_entropy(entropy, passphrase, lang, xversion) self.assertEqual(rootxprv2, rootxprv) # unmanaged electrum mnemonic version mnemonic = "ability awful fetch liberty company spatial panda \ hat then canal ball cross video" self.assertRaises(ValueError, electrum.rootxprv_from_mnemonic, mnemonic, passphrase, xversion) #electrum_rootxprv_from_mnemonic(mnemonic, passphrase, xversion) # mnemonic version not in electrum allowed mnemonic versions eversion = 'std' self.assertRaises(ValueError, electrum.mnemonic_from_entropy, entropy, lang, eversion) #electrum_mnemonic_from_entropy(entropy, lang, eversion) def test_vectors(self): filename = "electrum_test_vectors.json" path_to_filename = os.path.join(os.path.dirname(__file__), "./data/", filename) with open(path_to_filename, 'r') as f: test_vectors = json.load(f) f.closed for test_vector in test_vectors: version = test_vector[0] mnemonic = test_vector[1] passphrase = test_vector[2] xpub = test_vector[3] xversion = bip32.PRV_VERSION[0] #FIXME: version / xversion rootxprv = electrum.rootxprv_from_mnemonic(mnemonic, passphrase, xversion) xpub2 = bip32.xpub_from_xprv(rootxprv).decode() self.assertEqual(xpub2, xpub) lang = "en" entr = int(electrum.entropy_from_mnemonic(mnemonic, lang), 2) mnem = electrum.mnemonic_from_entropy(entr, lang, version) self.assertEqual(mnem, mnemonic) if __name__ == "__main__": # execute only if run as a script unittest.main()
# -*- coding: utf-8 -*- from ExtractFQDNFromUrlAndEmail import extract_fqdn import pytest @pytest.mark.parametrize('input,fqdn', [ # noqa: E501 disable-secrets-detection ('http://this.is.test.com', 'this.is.test.com'), ('https://caseapi.phishlabs.com', 'caseapi.phishlabs.com'), # output needs to be bytes string utf-8 encoded (otherwise python loop demisto.results fails) (u'www.bücher.de', u'www.bücher.de'.encode('utf-8')), ('https://urldefense.proofpoint.com/v2/url?u=http-3A__go.getpostman.com_y4wULsdG0h0DDMY0Dv00100&d=DwMFaQ&c=ywDJJevdGcjv4rm9P3FcNg&r=s5kA2oIAQRXsacJiBKmTORIWyRN39ZKhobje2GyRgNs&m=vN1dVSiZvEoM9oExtQqEptm9Dbvq9tnjACDZzrBLaWI&s=zroN7KQdBCPBOfhOmv5SP1DDzZKZ1y9I3x4STS5PbHA&e=', 'go.getpostman.com'), # noqa: E501 ('hxxps://www[.]demisto[.]com', 'www.demisto.com'), ('https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftwitter.com%2FPhilipsBeLux&data=02|01||cb2462dc8640484baf7608d638d2a698|1a407a2d76754d178692b3ac285306e4|0|0|636758874714819880&sdata=dnJiphWFhnAKsk5Ps0bj0p%2FvXVo8TpidtGZcW6t8lDQ%3D&reserved=0%3E%5bcid:[email protected]%5d%3C', ''), # noqa: E501 disable-secrets-detection ]) # noqa: E124 def test_extract_domain(input, fqdn): extracted_fqdn = extract_fqdn(input) assert extracted_fqdn == fqdn
# -*- coding: utf-8 -*- from __future__ import unicode_literals # Symlinks to create symlinks = {"Applications": "/Applications"} # Volume icon badge_icon = "icons/kolibri.icns" files = ["dist/Kolibri.app"] # Where to put the icons icon_locations = { "Kolibri.app": (185, 120), "Applications": (550, 120), } background = "icons/Layout.png" window_rect = ((0, 1000), (734, 550))
""" @author Jacob Xie @time 3/11/2021 """ from typing import Optional from sqlalchemy import Table from sqlalchemy.ext.declarative import declarative_base from .connector import Connector Base = declarative_base() class Loader(object): def __init__(self, connector: Connector) -> None: self._conn = connector def table(self, name: str) -> Optional[Table]: return self._conn.meta.tables.get(name) def table_cls(self, name: str) -> Optional[Base]: t = self.table(name) if t is None: return None class Cls(Base): __table__ = t return Cls
import os import re from enum import Enum from canvasapi.canvas import Course, File from canvasapi.exceptions import ResourceDoesNotExist, Unauthorized from colorama import Back, Fore, Style from config import Config from course_files import CourseFiles from utils import file_regex __all__ = ['FileOrganizer'] file_manager = CourseFiles() class FileOrganizer: class By(Enum): MODULE_WITH_FILE = "module_with_file" FILE = "file" MODULE = "module" def __init__(self, course: Course, by: By, config: Config): assert isinstance(by, self.By) self._course = course self._by = by self._config = config def get(self) -> (File, str): if self._by == self.By.MODULE_WITH_FILE: for (file, path) in self._by_module_with_file(): yield (file, path) elif self._by == self.By.FILE: for (file, path) in self._by_file(): yield (file, path) elif self._by == self.By.MODULE: for (file, path) in self._by_module(): yield (file, path) def _by_file(self) -> (File, str): folders = { folder.id: folder.full_name for folder in self._course.get_folders() } for file in file_manager.get_files(self._course): folder = folders[file.folder_id] + "/" if folder.startswith("course files/"): folder = folder[len("course files/"):] yield (file, folder) def _by_module(self) -> (File, str): for module in self._course.get_modules(): # NAME name = re.sub(file_regex, "_", module.name.replace("(", "(").replace(")", ")")) if self._config.CONSOLIDATE_MODULE_SPACE: name = " ".join(name.split()) # IDX module_item_position = module.position - 1 # it begins with 1 idx = str(module_item_position + self._config.MODULE_FOLDER_IDX_BEGIN_WITH) # FORMAT module_name = self._config.MODULE_FOLDER_TEMPLATE.format(NAME=name, IDX=idx) module_item_count = module.items_count print(f" Module {Fore.CYAN}{module_name} ({module_item_count} items){Style.RESET_ALL}") for item in module.get_module_items(): if item.type == "File": yield file_manager.get_file(self._course, item.content_id), module_name elif item.type in ["Page", "Discussion", "Assignment"]: _page_url = item.html_url yield Link(item.title, _page_url), module_name elif item.type == "ExternalUrl": _page_url = item.external_url yield Link(item.title, _page_url), module_name elif item.type == "SubHeader": pass else: if self._config.VERBOSE_MODE: print(f" {Fore.YELLOW}Unsupported item type: {item.type}{Style.RESET_ALL}") def _by_module_with_file(self) -> (File, str): module_files_id = [] try: for (file, path) in self._by_module(): yield (file, path) module_files_id.append(file.id) except (ResourceDoesNotExist, Unauthorized): pass print(f" {Fore.CYAN}File not in module{Style.RESET_ALL}") module_files_id = set(module_files_id) for (file, path) in self._by_file(): if file.id not in module_files_id: yield (file, os.path.join("unmoduled", path)) class Link: def __init__(self, title, url): self.display_name = title self.url = url self.id = None
""" pytest_oot ~~~~~~~~~~~~~~~ Helpers for defining a simple format for object-oriented system-level testing The design is implemented taking pytest_yamlwsgi as a reference. :copyright: 2014-2015 Steven LI <[email protected]> :license: MIT This module is presented as a py.test plugin. It provides two main features: 1. Provide a method to easy write test scripts in a plain format, easy to review and work, and 2. Provide some options for test steps to simplify the test scripts writing. It could make 10 lines python code in one line How it works ------------ py.test has a number of hooks that can be accessed to extend its behaviour. This plugin uses the `pytest_collect_file` hook which is called wih every file available in the test target. The first action of the hook-handler is that it checks if the filename is of the form `test*.oot`, here oot means object-oriented test since in the script file, every step is based on an object to take an action and compare the result. If it finds an appropriate file, the hook-handler will generate a number of tests based on that file. py.test provides a hierarchical system of tests. The top-level of this hierarchy is an object representing the oot file itself. The lower components are represented by further test containers, and subsequent individual tests. The test*.oot file is just like a test*.py file, but in different format, Let's see an example first. (you can get the code from the package source) # Any words after # in a line are just comments test_suite: Trial1 # Identify the test bed file, currently .py file is supported # similar as 'import testbed.py' in test*.py file test_bed: testbed # A case starts from a case_idString, the description is in the bracket # This is to define one case, just like a function or method in a .py file # case_id1 means the function name is "id1" case_id1 (NumberBase add function): # under a case, there could be multiple test steps, one step in one line # step format: obj.method(parameters) operator expected_result options # obj/methods are defined in test bed file # operator supports: # ==(equal to), !=(not equal to), >(larger than), <(less than), >=, <=, # =~(for string, contains, e.g. "hello world" =~ "llo", regex allowed # !~ (not contain) num1.add(3,4,5,6) == 23 -t 3 num1.add(var1, var2, var3) == 18 case_id2 (NumberBase multiple function): num1.multiple(2,4,5) == 100 case_id3 (NumberChange test): # Every line under the case line is a step of a case # there could be multiple lines; each line follows the format: # obj.method([parameter1 [,parameter 2 [, ...]]] operator ExpectedValue -options # For details, see guidance .... # options: # --timeout 30 == -t 30: fail if the step could not complete in 30 seconds # --repeat 30 == -r 30: repeat per second if fail until pass, timeout in 30s # --duration 30 == -d 30: duration of the step is 30s, if completed early, just wait until 30s # --expectedfail == -x true == -x: If step fail, then report pass # --skip == -s: just skip this step # num1.add(4) num2.add(3,4,5,6) == 478 num2.multiple(4,5) == 460 -x True -t 12 -r 10 num3.add(3,4,var2) == 1000 --skip -t 25 case_id4 (Reverse String test): string1.range(1,4) == 'dlr' -d 6 case_async1 (To test async actions - timeout) num_async.addw(var100, var100) == 100 num_async.data_sync() -t 15 num_async.get_value() == 300 case_async2 (To test async actions - repeat) num_async.addw(var100, var100) >= 300 num_async.get_value() == 500 --repeat 20 Each case of the test suite should be a subclass of:class:`py.test.collect.Item`, which is a direct subclass, for leaf tests, implementing `runtest()`. """ __author__ = 'Steven LI' import test_steps import pytest import re def pytest_configure(config): test_steps.auto_func_detection(False) def pytest_runtest_setup(item): test_steps.log_new_func(item.name, str(item.fspath)) def pytest_collect_file(parent, path): if path.ext == ".oot" and path.basename.startswith("test"): return TestCaseFile(path, parent) class TestCaseFile(pytest.File): # test_bed: the test bed file # test_cases: an array of the test cases def collect(self): suite_content = self.fspath.open().read() self.__parse_suite(suite_content) # Import objects in the test bed if self.test_bed: # self.objs = __import__(self.test_bed, globals()) # import importlib # self.objs = importlib.import_module(self.test_bed) self.objs = test_steps.init_testbed(self.test_bed) current_line_number = 0 case_number = 0 for case in self.cases: case_id = case[0:case.find(' ')].lstrip() current_line_number += self.case_lines[case_number] case_number += 1 yield TestCaseItem(case_id, self, case, current_line_number) def __parse_suite(self, suite_content): ''' To parse the system case file, see detail of the system test file example :param suite_content: The content of the test case file. :return: no return, exception will be raised if the format is not right ''' # split cases, notice that the first element of the cases is about the test suite description # cases = suite_content.split("\ncase_") cases = re.compile('\n[cC]ase[_:]').split(suite_content) self.case_lines = [c.count('\n') + 1 for c in cases] # Deal with the test suite description; cases[0] is about the test suite summary self.name = 'unknown' self.test_bed = None self.suite_attr = {} self.cases = cases[1:] # cases[0] is not a case, actually it's the suite header header = cases[0].split("\n") for line in header: line = line.strip() if len(line) == 0: continue if line[0] == '#': continue colon = line.find(":") if colon == -1: continue # no ':', it is not a key-value pair, ignore it (name, spec) = (line[0:colon], line[colon + 1:].strip()) if name == 'test_suite': self.name = spec elif name == 'test_bed': self.test_bed = spec else: self.suite_attr[name] = spec class TestCaseItem(pytest.Item): ''' A case structure, one case can contain multiple steps ''' # def __init__(self, name, parent, case_dec, steps): # super(TestCaseItem, self).__init__(name, parent) # self.case_dec = case_dec # self.steps = steps def __init__(self, case_id, parent, case_string, line_number): super(TestCaseItem, self).__init__(case_id, parent) header_end = case_string.find('\n') self.case_header = case_string[0:header_end].strip() self.steps = case_string[header_end:].split('\n') self.first_line = line_number self.parent = parent # m = re.match(r'(\w+)\s*\((.*)\)', line1) # (case_id, case_dec) = m.group(1,2) def runtest(self): self.current_step = 0 for step_string in self.steps: # step_string = step_string.strip() # if len(step_string) == 0: continue # if step_string[0] == '#': continue # step_obj = TestStep(step, self) # To create a step_obj with parsing # step_obj.execute() test_steps.steps(step_string, self.parent.objs.__dict__) self.current_step += 1 def repr_failure(self, excinfo): """ called when self.runtest() raises an exception. """ failed_line_number = self.first_line + self.current_step + 1 fail_string = "case_" + self.case_header + "\n".join(self.steps[0:self.current_step]) cur_step_str = self.steps[self.current_step] # fail_string += '\n>' + self.steps[self.current_step][1:] fail_string += '\n>' + cur_step_str[1:] fail_string += "\nE" + ' ' * (len(cur_step_str) - len(cur_step_str.lstrip()) - 1) + excinfo.value.args[1] fail_string += "\n\n%s:" % self.parent.fspath + "%d" % failed_line_number # if isinstance(excinfo.value, TestStepFail): # fail_string += ": TestStepFail" # # elif isinstance(excinfo.value, TestRunTimeError): # fail_string += ": TestRunTimeError" fail_string += ": " + excinfo.value.__class__.__name__ return fail_string def reportinfo(self): ''' Called when there is a failure as a failed case title''' return self.fspath, 0, "%s" % self.case_header
# -*- coding: utf-8 -*- # Created by hkh at 2019-02-19 import lucene from java.io import File from org.apache.lucene.analysis.standard import StandardAnalyzer from org.apache.lucene.store import FSDirectory, RAMDirectory from org.apache.lucene.index import IndexWriter from org.apache.lucene.index import IndexWriterConfig from org.apache.lucene.document import Document from org.apache.lucene.document import Field from org.apache.lucene.document import TextField from org.apache.lucene.index import DirectoryReader from org.apache.lucene.search import IndexSearcher from org.apache.lucene.queryparser.classic import QueryParser from org.apache.lucene.search import ScoreDoc from org.apache.lucene.search.similarities import BM25Similarity import nltk from nltk.tokenize.api import StringTokenizer nltk.download('gutenberg') # nltk.corpus.gutenberg.fileids() # 53,996 sentences - 5400 documents gutenberg_list = ['austen-emma.txt', 'austen-persuasion.txt', 'austen-sense.txt', 'bible-kjv.txt', 'blake-poems.txt', 'bryant-stories.txt', 'burgess-busterbrown.txt', 'carroll-alice.txt', 'chesterton-ball.txt', 'chesterton-brown.txt', 'chesterton-thursday.txt', 'edgeworth-parents.txt', 'melville-moby_dick.txt', 'milton-paradise.txt', 'shakespeare-caesar.txt', 'shakespeare-hamlet.txt', 'shakespeare-macbeth.txt', 'whitman-leaves.txt'] lucene.initVM(vmargs=['-Djava.awt.headless=true']) analyzer = StandardAnalyzer() # store the index in memory # directory = RAMDirectory() # # store the index in File System file = File("/home/hkh/data/gutenbergindex") directory = FSDirectory.open(file.toPath()) config = IndexWriterConfig(analyzer) iwriter = IndexWriter(directory, config) # for txtName in gutenberg_list: # words = nltk.corpus.gutenberg.words(txtName) # sents = " ".join(words).split(".") # print(sents[:100]) # # print("Indexing ", txtName, "...") # # for i in range(0, len(sents), 10): # # text = " ".join(sents[i:i+10]) # # doc = Document() # # doc.add(Field("fieldname", text, TextField.TYPE_STORED)) # # iwriter.addDocument(doc) # # iwriter.close() # now search the index ireader = DirectoryReader.open(directory) isearcher = IndexSearcher(ireader) # set similarity method bm25 = BM25Similarity() isearcher.setSimilarity(bm25) # parse a simple query that searches for "text" parser = QueryParser("fieldname", analyzer) query = parser.parse("her sister was reading") hits = isearcher.search(query, 5).scoreDocs print(len(hits)) for hit in hits: result = isearcher.doc(hit.doc) print("[%8.4f] %s" % (hit.score, result.get("fieldname")))
import os import sys class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' GOOD = '\033[92m' WARNING = '\033[93m' ERROR = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' # Printing With Colors Functions def print_good(text): print bcolors.GOOD + "[*]" + bcolors.ENDC + " " + text def print_nomatch(text): print bcolors.ERROR + "[*]" + bcolors.ENDC + " " + text def print_error(text): print bcolors.ERROR + "\n[*] " + text + bcolors.ENDC + "\n" def print_status(text): print "[*] " + text def printv_good(text, verbose): if verbose: print bcolors.GOOD + "[*]" + bcolors.ENDC + " " + text def printv_nomatch(text, verbose): if verbose: print bcolors.ERROR + "[*]" + bcolors.ENDC + " " + text def printv_error(text, verbose): if verbose: print bcolors.ERROR + "\n[*] " + text + bcolors.ENDC + "\n" def printv_status(text, verbose): if verbose: print "[*] " + text # Input File Checking def check_file(filename): if os.path.exists(filename) == False: print "\n[*] File " + filename + " Does Not Exist!" sys.exit(4) # Show parser help and exit def help_exit(parser): parser.print_help() sys.exit(1) # Print a message, show parser help and exit def print_exit(message, parser): print_error(message) parser.print_help() sys.exit(1)
#!/usr/bin/env python import turbotutils.network import requests import urllib.parse import json def get_grants(turbot_host, turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, namespace, api_version): account_list = [] api_method = "GET" api_url = "/api/%s/resources/%s/grants" % (api_version, namespace) response = requests.request( api_method, urllib.parse.urljoin(turbot_host, api_url), auth=(turbot_api_access_key, turbot_api_secret_key), verify=turbot_host_certificate_verification, headers={ 'content-type': "application/json", 'cache-control': "no-cache" } ) # Convert the response JSON into a Python object responseObj = json.loads(response.text) for obj in responseObj['items']: if 'user' in obj: common_name = obj['user']['displayName'] else: # Not all accounts have user displayname, most commonly nathan@turbot's dummy, common_name = obj['identityUrn'].split('::user:') if '_DELETED' in common_name: print('Former employee %s found in account %s' % (common_name, namespace)) account_list.append(common_name) return account_list if __name__ == '__main__': # Set to False if you do not have a valid certificate for your Turbot Host turbot_host_certificate_verification = True # Set to your Turbot Host URL turbot_host = turbotutils.get_turbot_host() # Get the access and secret key pairs (turbot_api_access_key,turbot_api_secret_key) = turbotutils.get_turbot_access_keys() # Get the turbot version api_version = turbotutils.get_api_version() cluster_id = turbotutils.cluster.get_cluster_id(turbot_host, turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, api_version) # Get the turbot account numbers accounts = turbotutils.cluster.get_turbot_account_ids(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host, api_version) top_level = 'urn:turbot' turbot_account_list = get_grants(turbot_host, turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, top_level, api_version) cluster_account_list = get_grants(turbot_host, turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, cluster_id, api_version) for account_id in cluster_account_list: if account_id not in turbot_account_list: turbot_account_list.append(account_id) for turbot_account in accounts: namespace = cluster_id + ':' + turbot_account account_list = get_grants(turbot_host, turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, namespace, api_version) for account_id in account_list: if account_id not in turbot_account_list: turbot_account_list.append(account_id) #turbot_account_list = sorted(set(turbot_account_list))
from. Udon_classes import * # IGNORE_LINE def _onPlayerJoined(playerApi: VRCSDKBaseVRCPlayerApi): UnityEngineDebug.Log(SystemObject(playerApi))
import botocore import pytest from pytest_mock import MockerFixture from dynamodb_session_web import SessionManager from .utility import get_dynamo_resource, TABLE_NAME @pytest.fixture(scope='function') def dynamodb_table(docker_services): # pylint: disable=unused-argument dynamodb = get_dynamo_resource() # Remove table (if it exists) # noinspection PyUnresolvedReferences try: table = dynamodb.Table(TABLE_NAME) table.delete() except botocore.exceptions.ClientError: pass # Create the DynamoDB table. table = dynamodb.create_table( TableName=TABLE_NAME, KeySchema=[{ 'AttributeName': 'id', 'KeyType': 'HASH' }], AttributeDefinitions=[{ 'AttributeName': 'id', 'AttributeType': 'S' }], ProvisionedThroughput={ 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } ) # Wait until the table exists. table.meta.client.get_waiter('table_exists').wait(TableName=TABLE_NAME) yield table.delete() @pytest.fixture def mock_dynamo_set(mocker: MockerFixture): return mocker.patch.object(SessionManager, '_dynamo_set') @pytest.fixture def mock_dynamo_get(mocker: MockerFixture): return mocker.patch.object(SessionManager, '_dynamo_get')
#!/usr/bin/env python # -*- coding: utf-8 -*- # author: Irwin Zaid # vispy: gallery 2:40:4 """ Animate an Image ================ Use a timer to trigger updating an image. This example demonstrates a 3D Texture. The volume contains noise that is smoothed in the z-direction. Shown is one slice through that volume to give the effect of "morphing" noise. """ import numpy as np from vispy.util.transforms import ortho from vispy import gloo from vispy import app from vispy.visuals.shaders import ModularProgram # Shape of image to be displayed D, H, W = 30, 60, 90 # Modulated image img_array = np.random.uniform(0, 0.1, (D, H, W, 3)).astype(np.float32) # Depth slices are dark->light img_array[...] += np.linspace(0, 0.9, D)[:, np.newaxis, np.newaxis, np.newaxis] # Make vertical direction more green moving upward img_array[..., 1] *= np.linspace(0, 1, H)[np.newaxis, :, np.newaxis] # Make horizontal direction more red moving rightward img_array[..., 0] *= np.linspace(0, 1, W)[np.newaxis, np.newaxis, :] # A simple texture quad data = np.zeros(4, dtype=[('a_position', np.float32, 2), ('a_texcoord', np.float32, 2)]) data['a_position'] = np.array([[0, 0], [W, 0], [0, H], [W, H]]) data['a_texcoord'] = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) VERT_SHADER = """ // Uniforms uniform mat4 u_model; uniform mat4 u_view; uniform mat4 u_projection; // Attributes attribute vec2 a_position; attribute vec2 a_texcoord; // Varyings varying vec2 v_texcoord; // Main void main (void) { v_texcoord = a_texcoord; gl_Position = u_projection * u_view * u_model * vec4(a_position,0.0,1.0); } """ FRAG_SHADER = """ uniform $sampler_type u_texture; uniform float i; varying vec2 v_texcoord; void main() { // step through gradient with i, note that slice (depth) comes last here! gl_FragColor = $sample(u_texture, vec3(v_texcoord, i)); gl_FragColor.a = 1.0; } """ class Canvas(app.Canvas): def __init__(self, emulate3d=True): app.Canvas.__init__(self, keys='interactive', size=((W*5), (H*5))) if emulate3d: tex_cls = gloo.TextureEmulated3D else: tex_cls = gloo.Texture3D self.texture = tex_cls(img_array, interpolation='nearest', wrapping='clamp_to_edge') self.program = ModularProgram(VERT_SHADER, FRAG_SHADER) self.program.frag['sampler_type'] = self.texture.glsl_sampler_type self.program.frag['sample'] = self.texture.glsl_sample self.program['u_texture'] = self.texture self.program['i'] = 0.0 self.program.bind(gloo.VertexBuffer(data)) self.view = np.eye(4, dtype=np.float32) self.model = np.eye(4, dtype=np.float32) self.projection = np.eye(4, dtype=np.float32) self.program['u_model'] = self.model self.program['u_view'] = self.view self.projection = ortho(0, W, 0, H, -1, 1) self.program['u_projection'] = self.projection self.i = 0 gloo.set_clear_color('white') self._timer = app.Timer('auto', connect=self.on_timer, start=True) self.show() def on_resize(self, event): width, height = event.physical_size gloo.set_viewport(0, 0, width, height) self.projection = ortho(0, width, 0, height, -100, 100) self.program['u_projection'] = self.projection # Compute the new size of the quad r = width / float(height) R = W / float(H) if r < R: w, h = width, width / R x, y = 0, int((height - h) / 2) else: w, h = height * R, height x, y = int((width - w) / 2), 0 data['a_position'] = np.array( [[x, y], [x + w, y], [x, y + h], [x + w, y + h]]) self.program.bind(gloo.VertexBuffer(data)) def on_timer(self, event): # cycle every 2 sec self.i = (self.i + 1./120.) % 1.0 self.update() def on_draw(self, event): gloo.clear(color=True, depth=True) self.program['i'] = 1.9 * np.abs(0.5 - self.i) self.program.draw('triangle_strip') if __name__ == '__main__': # Use emulated3d to switch from an emulated 3D texture to an actual one canvas = Canvas(emulate3d=True) app.run()
import tomodachi import uuid from typing import Any from tomodachi import schedule, aws_sns_sqs_publish from tomodachi.protocol import JsonBase @tomodachi.service class ServiceSendMessage(tomodachi.Service): name = 'example_service_send_message' message_protocol = JsonBase options = { 'aws_sns_sqs': { 'region_name': None, # specify AWS region (example: 'eu-west-1') 'aws_access_key_id': None, # specify AWS access key (example: 'AKIAXNTIENCJIY2STOCI') 'aws_secret_access_key': None # specify AWS secret key }, 'aws_endpoint_urls': { 'sns': None, # For example 'http://localhost:4575' if localstack is used for testing 'sqs': None # For example 'http://localhost:4576' if localstack is used for testing } } @schedule(interval=10, immediately=True) async def send_message_interval(self) -> None: data = str(uuid.uuid4()) self.log('Publishing message "{}" on topic "example-pubsub-new-message"'.format(data)) await aws_sns_sqs_publish(self, data, topic='example-pubsub-new-message', wait=True)
from __future__ import absolute_import, division, print_function, unicode_literals __VERSION__ = "0.0.a7"
import six import hashlib def bytes_to_str(s, encoding='utf-8'): """Returns a str if a bytes object is given.""" if six.PY3 and isinstance(s, bytes): return s.decode(encoding) return s def get_default_id(request, spider): url = request.url return hashlib.md5(url).hexdigest()
# # Dates and Times in Python: Dates & Time # Python Techdegree # # Created by Dulio Denis on 12/24/18. # Copyright (c) 2018 ddApps. All rights reserved. # ------------------------------------------------ # Challenge 4: strftime & strptime # ------------------------------------------------ # Challenge Task 1 of 2 # Create a function named to_string that takes a # datetime and gives back a string in the format # "24 September 2012". # ## Examples # to_string(datetime_object) => "24 September 2012" # from_string("09/24/12 18:30", "%m/%d/%y %H:%M") => datetime import datetime def to_string(dt): return dt.strftime("%d %B %Y") # TEST dt = datetime.datetime.now() print(to_string(dt)) # Challenge Task 2 of 2 # Create a new function named from_string that takes two arguments: # a date as a string and an strftime-compatible format string, and # returns a datetime created from them. def from_string(date_string, format_string): return datetime.datetime.strptime(date_string, format_string) # TEST date_string = "24 December 2018" date_format = "%d %B %Y" print(from_string(date_string, date_format))
import logging class NothingExecutor(object): def __init__(self, models, accountId, trader=None, logger=None): if logger is None: logger = logging.getLogger() self.logger = logger pass def handleOpenLong(self, lot): self.logger.warn('Requested opening long position, lot={lot}, ignored.' .format(lot=lot)) return True def handleOpenShort(self, lot): self.logger.warn('Requested opening short position, lot={lot}, ignored.' .format(lot=lot)) return True def handleClose(self, position): self.logger.warn('Requested closing position={p}, ignored' .format(p=position)) return True
from urllib.parse import urlencode, parse_qsl import requests from django.http import JsonResponse, HttpResponseForbidden from mozilla_django_oidc.views import OIDCAuthenticationCallbackView, OIDCAuthenticationRequestView, get_next_url, \ OIDCLogoutView def health(request): return JsonResponse({'result': 'OK'}) class CustomOIDCAuthCallbackView(OIDCAuthenticationCallbackView): def get(self, request): """Override to return the access_token for frontend authentication""" redirect = super().get(request) if redirect.url == self.failure_url: return HttpResponseForbidden() return JsonResponse({'access_token': self.request.session['oidc_access_token']}) class CustomOIDCAuthRequestView(OIDCAuthenticationRequestView): def get(self, request): """Override to return url and session id instead of redirect""" resp = super().get(request) url, query = resp.url.split('?') query = parse_qsl(query) for i in range(len(query)): if query[i][0] == 'redirect_uri': query[i] = ('redirect_uri', self.get_settings('OIDC_REDIRECT_URI')) redirect_url = '{url}?{query}'.format(url=url, query=urlencode(query)) if not request.session.exists(request.session.session_key): request.session.create() return JsonResponse({'url': redirect_url, 'sessionid': request.session.session_key}) class CustomOIDCLogoutView(OIDCLogoutView): def revoke_token(self, request): token = request.headers['Authorization'].split(' ')[1] payload = { 'client_id': self.get_settings('OIDC_RP_CLIENT_ID'), 'client_secret': self.get_settings('OIDC_RP_CLIENT_SECRET'), 'token': token, 'token_type_hint': 'access_token', } response = requests.post( self.get_settings('OIDC_OP_REVOCATION_ENDPOINT'), data=payload, verify=self.get_settings('OIDC_VERIFY_SSL', True), timeout=self.get_settings('OIDC_TIMEOUT', None), proxies=self.get_settings('OIDC_PROXY', None)) response.raise_for_status() print(response) def post(self, request): """Override to revoke token on logout""" super().post(request) self.revoke_token(request) return JsonResponse({'msg': 'Logged out.'})
import os from ply import lex, yacc class MyParser(object): """ Base class for a lexer/parser that has the rules defined as methods. Class extracted from PLY examples """ tokens = () precedence = () def __init__(self, **kw): """ constructior for parser lex, can receive a debug parameter :param kw: parameters, for now, it receives 'debug' """ self.debug = kw.get('debug', 0) self.names = {} try: modname = os.path.split(os.path.splitext(__file__)[0])[ 1] + "_" + self.__class__.__name__ except: modname = "parser" + "_" + self.__class__.__name__ self.debugfile = modname + ".dbg" self.tabmodule = modname + "_" + "parsetab" # print self.debugfile, self.tabmodule # Build the lexer and parser lex.lex(module=self, debug=self.debug) yacc.yacc(module=self, debug=self.debug, debugfile=self.debugfile, tabmodule=self.tabmodule) def run(self): """ run the lexer yacc parser by receiving inline inputs """ while 1: try: s = input('calc > ') except EOFError: break if not s: continue yacc.parse(s) def run_file(self, file_name: str): """ run the lexer yacc parser by receiving a file wih instructions :param file_name: the path and name to the file """ pass
from PIL import Image from io import BytesIO import base64 import cStringIO from subprocess32 import call from tornado.options import options from resizeimage import resizeimage, imageexceptions import os def generate_screenshot(url): call([options.chrome_path, "--headless", "--force-device-scale-factor", "--hide-scrollbars", "--disable-gpu", "--screenshot=screenshots/" + options.name + ".png", "--window-size=1280,800", url]) im = Image.open("screenshots/" + options.name + ".png") buffer = cStringIO.StringIO() im.save(buffer, format="PNG") return base64.b64encode(buffer.getvalue()) def open_png(filename): """ Open image. :param filename: Input filename (PNG image). :return: Base64 encoded representation of the PNG image. """ im = Image.open(filename) buffer = cStringIO.StringIO() im.save(buffer, format="PNG") return base64.b64encode(buffer.getvalue()) def resize_uploaded_image(pngb64): # Open image img = Image.open(BytesIO(base64.b64decode(pngb64))) # Save image img.save("screenshots/" + options.name + ".png") # Resize image try: TARGET_WIDTH = 1280 # Resize the image to the specified width adjusting height to keep the ratio the same (original width must be equal or greater than the specified width) img = resizeimage.resize_width(img, TARGET_WIDTH) except imageexceptions.ImageSizeError: # Do nothing (use the original image) pass # Return image buffer = cStringIO.StringIO() img.save(buffer, format="PNG") return base64.b64encode(buffer.getvalue()) def convert_png_to_jpg_b64(pngb64): im = Image.open(BytesIO(base64.b64decode(pngb64))) im = im.convert("RGB") buffer = cStringIO.StringIO() im.save(buffer, format="JPEG") return base64.b64encode(buffer.getvalue()) def get_screenshot_size(): return os.path.getsize("screenshots/" + options.name + ".png")
import argparse import logging from create_dataset import * if __name__ == '__main__': logging.basicConfig(format='%(asctime)s %(message)s', datefmt='[%Y/%m/%d][%I:%M:%S %p] ', level=logging.INFO) parser = argparse.ArgumentParser(description='Data Loader') parser.add_argument('-name', help='database name') parser.add_argument('-type', help='pickle or lmdb') parser.add_argument('-train', help='training dir') parser.add_argument('-test', help='testing dir') parser.add_argument('-resize', help='resize image') parser.add_argument('-output', help='output dataset dir') parser.add_argument('-split', help='split train/test ratio') parser.add_argument('-channel', help='3 for color img / 1 for grayscale') args = parser.parse_args() if not args.train or not args.output or not args.type: parser.print_help() elif not args.test and not args.split: print 'Error: Need to specify the test dir or split ratio' else: train, test = None, None db_name = args.name train = image_loader(args.train, int(args.resize), int(args.channel)) if not args.test: # split from training data train, test = split_testing(train, float(args.split)) elif not args.split: # load from testing folder test = image_loader(args.test, int(args.resize), int(args.channel)) # Make sure the train and test are holded if not train or not test: print 'Error: training data and testing data are not holded' # Save to different formats if args.type == 'lmdb': create_lmdb(train, test, args.output + '/' + db_name) elif args.type == 'pickle': create_pickle(train, test, args.output + '/' + db_name)
# -*- coding: utf-8 -*- """ Created on Fri Nov 23 13:43:02 2018 @author: Florian Ulrich Jehn """ import matplotlib.pylab as plt from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures import pca import read_attributes_signatures import pandas as pd import seaborn as sns import matplotlib.patches as mpatches def calc_poly_linear_regression(independent, dependent): """ Calculates the coefficient of determination of a linear regresssion for an independent and an dependent variable and returns it """ # Reshape for sklearn independent = independent.values.reshape(-1,1) dependent = dependent.values.reshape(-1,1) # Make the whole thing poly poly = PolynomialFeatures(degree=2) independent_ = poly.fit_transform(independent) # Do the linear regression model = LinearRegression() model.fit(independent_, dependent) # Calculate R2 return model.score(independent_, dependent) def cat_to_num(att_df): """ Changes categorical variables in a dataframe to numerical """ att_df_encode = att_df.copy(deep=True) for att in att_df_encode.columns: if att_df_encode[att].dtype != float: att_df_encode[att] = pd.Categorical(att_df_encode[att]) att_df_encode[att] = att_df_encode[att].cat.codes return att_df_encode def calc_all_linear_regressions(pca_df, att_df): """ Calculates all linear regressions between two dataframes """ r2_df = pd.DataFrame() for j, pc in enumerate(pca_df.columns): # Calculate all the r2 for a principal component pc_list = [] for att in att_df.columns: # Calculate the single r2 score for one principal component # and one catchment attribute r2_score = calc_poly_linear_regression(att_df[att], pca_df[pc]) pc_list.append(r2_score) # Create the dataframe that contains the results current_pc = pd.DataFrame.from_records([pc_list], columns=att_df.columns) r2_df = pd.concat([r2_df, current_pc], axis=0) r2_df.index = pca_df.columns return r2_df def weight_regression_by_var(r2_df, var_percents): """ Weight the r2 scores of the different variables with principal components by the explained variance in percent """ # Transpose for easier looping r2_df = r2_df.transpose() for pc, var in zip(r2_df.columns, var_percents): r2_df[pc] *= var r2_df["r2_weighted"] = r2_df.sum(axis=1) return r2_df["r2_weighted"] def plot_regressions(r2_df_weighted, describer, color_dict, cols_classes): """ Plots the weighted coefficient of determination """ alpha=0.5 ax = plt.gca() r2_df_weighted.sort_values().plot(ax=ax, kind="barh", color="#4C72B0",zorder=3) fig = plt.gcf() fig.tight_layout() fig.set_size_inches(10, 11.7) plt.xlim(0,1) for tick_label in ax.axes.get_yticklabels(): tick_text = tick_label.get_text() tick_label.set_fontsize(14) tick_label.set_color(color_dict[tick_text]) ax.xaxis.grid(color="grey", zorder=0) ax.set_facecolor("white") plt.xlabel("Weigthed Coefficient of Determination", alpha=alpha, fontsize=14) plt.ylabel("Catchment Attributes", alpha=alpha, fontsize=14) plt.setp(ax.get_xticklabels(), alpha=alpha) # Remove the borders for spine in ax.spines.values(): spine.set_visible(False) ax.tick_params(axis=u'both', which=u'both',length=0) # Create the legend handles = [] for att, color in cols_classes.items(): handles.append(mpatches.Patch(color=color, label=att)) legend = ax.legend(handles=handles, frameon=True, fancybox=True, facecolor="white", edgecolor="grey", fontsize=14) for text in legend.get_texts(): text.set_color("grey") fig.set_size_inches(15,10) plt.savefig("r2_scores_ " + describer + ".png", bbox_inches="tight", dpi=300) if __name__ == "__main__": # Dictionary for the broad categories color_dict = {"Area": "#D64139", "Mean elevation": "#D64139", "Mean slope": "#D64139", "Fraction of precipitation\nfalling as snow": "royalblue", "Aridity": "royalblue", "Frequency of high\nprecipitation events": "royalblue", "Precipitation seasonality":"royalblue", "Depth to bedrock": "#D6BD39", "Sand fraction": "#D6BD39", "Clay fraction": "#D6BD39", "Forest fraction": "forestgreen", "LAI maximum": "forestgreen", "Green vegetation\nfraction maximum": "forestgreen", "Dominant geological class": "grey", "Subsurface porosity": "grey", "Subsurface permeability": "grey"} cols_classes = {"Climate": "royalblue", "Geology": "grey", "Soil": "#D6BD39", "Topography": "#D64139", "Vegetation": "forestgreen"} variance = 0.8 pca_df = pca.pca_signatures(variance) meta_df = read_attributes_signatures.read_meta() att_df, sig_df = read_attributes_signatures.seperate_attributes_signatures(meta_df) att_df_encode = cat_to_num(att_df) r2_df = calc_all_linear_regressions(pca_df, att_df_encode) var_percents = [0.74567053, 0.18828154] r2_df_weighted = weight_regression_by_var(r2_df, var_percents) plot_regressions(r2_df_weighted, "all", color_dict, cols_classes)
import logging from abc import ABC, abstractmethod from copy import deepcopy from time import monotonic from types import MethodType from typing import Optional from board import BOARD_SQUARES, Board, Loc, PlayerColor from solver import solve_game # type: ignore from termcolor import colored """ Definitions and functions related to Othello players. Attributes ---------- PlayerABC : ABC Abstract base class for Othello players. """ class PlayerABC(ABC): """ ABC for Othello players. Attributes ---------- color : PlayerColor This player's color. logger : logging.Logger A logger for formatting and printing this player's output. """ _initialized = False def initialize(self, color: PlayerColor, ms_total: Optional[int]) -> None: """ Initialize a player's attributes. Used by the game framework; do not overwrite. Parameters ---------- color : PlayerColor This player's color. ms_left : int or None Milliseconds total in this bot's time budget. If None, unlimited time is available. """ self._initialized = True self.color: PlayerColor = color self.ms_total: Optional[int] = ms_total log_name = f"{self.__class__.__name__} ({self.color.value})" self.logger: logging.Logger = logging.getLogger(log_name) if not self.logger.handlers: log_handler = logging.StreamHandler() log_handler.setLevel(logging.DEBUG) log_handler.setFormatter( logging.Formatter( fmt=f"{log_name} %(levelname)s >>- %(message)s", datefmt="%H:%M:%S" ) ) self.logger.addHandler(log_handler) self.logger.setLevel(logging.DEBUG) if ms_total: self.logger.debug( f"Time per move: {2 * ms_total / (BOARD_SQUARES * 1000):.2f} s." ) self.logger.debug("Finished initializing player.") def get_move( self, board: Board, opponent_move: Optional[Loc], ms_left: Optional[int] ) -> Loc: assert self._initialized opp_fmt = "pass" if opponent_move is None else opponent_move.__repr__() self.logger.info(f"Opponent's move: {colored(opp_fmt, 'red')}.") t1 = monotonic() move = self._get_move(board, opponent_move, ms_left) t2 = monotonic() move_count = board.white.popcount + board.black.popcount - 3 move_format = colored(move.__repr__(), "yellow") time_format = colored(f"{t2 - t1:.2f} s", "green") self.logger.info(f"Move {move_count}: {move_format} (time: {time_format}).\n") return move @abstractmethod def _get_move( self, board: Board, opponent_move: Optional[Loc], ms_left: Optional[int] ) -> Loc: """ Get this player's next move. If there are no legal moves, the player must return Loc(-1, -1). Parameters ---------- board : Board The current board. opponent_move : Loc or None Opponent's last move, if applicable. If this is the first move of the game or the opponent passed, this will be None. ms_left : int or None Milliseconds left in this bot's time budget. If None, unlimited time is available. Returns ------- Loc The player's next move. """ raise NotImplementedError def with_depth_solver(self, depth: int, time: int) -> "PlayerABC": """ Wrap a player such that after few enough spots are left empty, moves are made with the endgame solver. Parameters ---------- depth : int Depth to start solving endgame at. time : int How many milliseconds to reserve for endgame solving. Returns ------- PlayerABC A copy of this player with a different _get_move function. """ player = deepcopy(self) __get_move = player._get_move def _get_move( self: PlayerABC, board: Board, opponent_move: Optional[Loc], ms_left: Optional[int], ) -> Loc: empties = BOARD_SQUARES - board.white.popcount - board.black.popcount if empties <= depth: if not board.has_moves(self.color): return Loc.pass_loc() self.logger.info( f"Running solver at depth: {colored(str(empties), 'cyan')}." ) mine, opp = board.player_view(self.color) x, y, _ = solve_game(mine.piecearray, opp.piecearray) return Loc(x, y) if ms_left: ms_left -= time return __get_move(board, opponent_move, ms_left) player._get_move = MethodType(_get_move, player) # type: ignore return player
def longestUniqueSubsttr(string): dic = {} max_len = 0 start = 0 for end in range(len(string)): if string[end] in dic: start = max(start, dic[string[end]] + 1) dic[string[end]] = end max_len = max(max_len, end-start + 1) return max_len
import time import serial from serial.tools import list_ports import BlinkyTape import IcspUtils # reset a strip to the booloader def resetToBootloader(portName): print "Resetting to bootloader on %s"%portName bt = BlinkyTape.BlinkyTape(portName) bt.displayColor(20,0,20) bt.resetToBootloader() # Write the latest firmware to the strip def flashFirmware(portName): print "Writing firmware on %s"%portName result = IcspUtils.loadFlash(portName, "firmware/BlinkyTape-Application.hex", "avr109") if(result[0] != 0): print result pass def showTestPattern(portName): print "Showing test pattern on %s"%portName try: bt = BlinkyTape.BlinkyTape(portName) while True: bt.displayColor(255,0,0) time.sleep(1) bt.displayColor(0,255,0) time.sleep(1) bt.displayColor(0,0,255) time.sleep(1) except: pass def runSearch(): ports = list_ports.comports() time.sleep(.5) for port in ports: if port[2] == "USB VID:PID=2341:8036": resetToBootloader(port[0]) elif port[2] == "USB VID:PID=1d50:606c": flashFirmware(port[0]) elif port[2] == "USB VID:PID=1d50:605e": showTestPattern(port[0]) else: print "Unrecognized port %s"%port[2] time.sleep(1) while True: runSearch()
""" Communication Utility """ # Copyright (c) 2020. Lightly AG and its affiliates. # All Rights Reserved import io import os import time import random import numpy as np from PIL import Image, ImageFilter # the following two lines are needed because # PIL misidentifies certain jpeg images as MPOs from PIL import JpegImagePlugin JpegImagePlugin._getmp = lambda: None MAXIMUM_FILENAME_LENGTH = 255 RETRY_MAX_BACKOFF = 32 RETRY_MAX_RETRIES = 5 def retry(func, *args, **kwargs): """Repeats a function until it completes successfully or fails too often. Args: func: The function call to repeat. args: The arguments which are passed to the function. kwargs: Key-word arguments which are passed to the function. Returns: What func returns. Exceptions: RuntimeError when number of retries has been exceeded. """ # config backoff = 1. + random.random() * 0.1 max_backoff = RETRY_MAX_BACKOFF max_retries = RETRY_MAX_RETRIES # try to make the request current_retries = 0 while True: try: # return on success return func(*args, **kwargs) except Exception as e: # sleep on failure time.sleep(backoff) backoff = 2 * backoff if backoff < max_backoff else backoff current_retries += 1 # max retries exceeded if current_retries >= max_retries: raise RuntimeError(f'Maximum retries exceeded! Original exception: {type(e)}: {str(e)}') from e def getenv(key: str, default: str): """Return the value of the environment variable key if it exists, or default if it doesn’t. """ try: return os.getenvb(key.encode(), default.encode()).decode() except Exception: pass try: return os.getenv(key, default) except Exception: pass return default def PIL_to_bytes(img, ext: str = 'png', quality: int = None): """Return the PIL image as byte stream. Useful to send image via requests. """ bytes_io = io.BytesIO() if quality is not None: img.save(bytes_io, format=ext, quality=quality) else: subsampling = -1 if ext.lower() in ['jpg', 'jpeg'] else 0 img.save(bytes_io, format=ext, quality=100, subsampling=subsampling) bytes_io.seek(0) return bytes_io def check_filename(basename): """Checks the length of the filename. Args: basename: Basename of the file. """ return len(basename) <= MAXIMUM_FILENAME_LENGTH def build_azure_signed_url_write_headers(content_length: str, x_ms_blob_type: str = 'BlockBlob', accept: str = '*/*', accept_encoding: str = '*'): """Builds the headers required for a SAS PUT to Azure blob storage. Args: content_length: Length of the content in bytes as string. x_ms_blob_type: Blob type (one of BlockBlob, PageBlob, AppendBlob) accept: Indicates which content types the client is able to understand. accept_encoding: Indicates the content encoding that the client can understand. Returns: Formatted header which should be passed to the PUT request. """ headers = { 'x-ms-blob-type': x_ms_blob_type, 'Accept': accept, 'Content-Length': content_length, 'x-ms-original-content-length': content_length, 'Accept-Encoding': accept_encoding, } return headers
import re emailRegex = re.compile(r''' #example : #[email protected] (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\]) ''', re.VERBOSE) #Extacting Emails tex = ''' {"_id":"Sportsman Hotel","email":[""],"email0":["\"[]\""]} {"_id":"Morningside Festival Association","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Slingshot","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"GenreCon.","email":[""],"email0":["\"[]\""]} {"_id":"Johnny Ringo's Queensland","email":[""],"email0":["\"[]\""]} {"_id":"Brisbane Writers Festival","email":["","\"[\"[email protected]\""],"email0":["\"[\"[email protected]\"]\"","\"[email protected]\""]} {"_id":"Stones Corner Hotel","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Alderley Arms Hotel","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"McDonald's Queen Street","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Jetts Fortitude Valley 24/7","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Luxury Japan Travel","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"South Bank's Christmas Village","email":["\"[]\""],"email0":[""]} {"_id":"Science Of Fitness","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Blute's Bar","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Royal Queensland Show - Ekka","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"AwesomeX Karaoke Hire","email":[""],"email0":["\"[\"//[email protected]\""]} {"_id":"Brisbane Exhibition Grounds","email":[""],"email0":["\"[]\""]} {"_id":"Sussan (Brisbane CBD)","email":["\"[\"//cdn.jsdelivr.net/npm/[email protected]\""],"email0":["\"//cdn.jsdelivr.net/npm/[email protected]\""]} {"_id":"The Paddo","email":["\"[\"//cdn.jsdelivr.net/npm/[email protected]\""],"email0":["\"//cdn.jsdelivr.net/npm/[email protected]\""]} {"_id":"Strike Bowling Wintergarden","email":["\"[\"//unpkg.com/[email protected]\""],"email0":["\"//unpkg.com/[email protected]\"]\""]} {"_id":"Indoz Festival","email":[""],"email0":[""]} {"_id":"OMFGs Adult Lounge","email":["\"[]\""],"email0":["\"[\"[email protected]\""]} {"_id":"Fate Fitness","email":[""],"email0":[""]} {"_id":"Taiwan Festival","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Jetts Brisbane CBD","email":[""],"email0":["\"[]\""]} {"_id":"F45 Training","email":[""],"email0":["\"[\"//cdn.jsdelivr.net/npm/[email protected]\""]} {"_id":"Brisbane Powerhouse","email":["\"[\"/favicon/[email protected]\"]\""],"email0":["\"[\"/favicon/[email protected]\"]\""]} {"_id":"ZIMMERMANN","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Little Big House","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"Cue Wintergarden","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Witchery Toombul","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Big Fork Theatre","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Slams Karaoke Jukebox Hire","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Shadow Lounge","email":[""],"email0":["\"[]\""]} {"_id":"Le Festival - Brisbane French Festival","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Jacaranda Gin Fest","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Sportsgirl","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"South Bank Fitness Centre","email":["\"[]\""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Fat Louies","email":[""],"email0":["\"[]\""]} {"_id":"永乐汇KTV","email":[""],"email0":[""]} {"_id":"The Milk Factory Kitchen & Bar","email":["\"[\"[email protected]\""],"email0":["\"//[email protected]\""]} {"_id":"Hungry Jack's Burgers Beak House","email":[""],"email0":["\"[\"/assets/images/[email protected]\"]\""]} {"_id":"Sit Down Comedy Club","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"Digby's Menswear","email":[""],"email0":["\"[]\""]} {"_id":"Anytime Fitness (Brisbane CBD)","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Retro's Fortitude Valley","email":[""],"email0":["\"[\"//[email protected]\""]} {"_id":"Dymocks Brisbane","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Snap Fitness 24/7 Brisbane CBD","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Shaver Shop","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Albatross Tours","email":[""],"email0":["\"[]\""]} {"_id":"The Beat Megaclub","email":["\"[\"[email protected]\"]\""],"email0":[""]} {"_id":"Katies","email":[""],"email0":["\"[\"//cdn.jsdelivr.net/npm/[email protected]\""]} {"_id":"The Palace Karaoke and Lounge Bar","email":["\"[\"[email protected]\"]\""],"email0":["\"[]\""]} {"_id":"The Comedy Empire","email":["\"[]\""],"email0":["\"[\"[email protected]\""]} {"_id":"Witchery Fortitude Valley","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Hanwoori Restaurant","email":[""],"email0":["\"[]\""]} {"_id":"DISSH","email":[""],"email0":["\"[]\""]} {"_id":"F1rst Class Fitness","email":["\"[\"[email protected]\"","\"[\"[email protected]\""],"email0":["\"[email protected]\"]\"","\"[email protected]\"]\""]} {"_id":"bounce fitness","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Newmarket Hotel","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Contra Store","email":[""],"email0":["\"[]\""]} {"_id":"Sky and Lotus","email":[""],"email0":["\"[]\""]} {"_id":"URBBANA Brisbane","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Music City","email":[""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Snap Fitness 24/7 Brisbane City","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Dizzy - Comedy Hypnotist","email":[""],"email0":["\"[]\""]} {"_id":"BWS MacArthur Central","email":[""],"email0":[""]} {"_id":"Cotton On","email":[""],"email0":["\"[]\""]} {"_id":"Brisbane Comedy Festival","email":[""],"email0":["\"[\"/dist/images/logo/[email protected]\""]} {"_id":"Nest Nappies","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"T.M.Lewin","email":["\"[\"[email protected]\"]\""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Fitness First Brisbane CBD","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Vinnies","email":[""],"email0":[""]} {"_id":"Figure Fitness","email":[""],"email0":[""]} {"_id":"Lost Bar and Nightclub","email":[""],"email0":["\"[]\""]} {"_id":"colette by colette hayman - Queen Street","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Sunrover Tours","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Sunlit Sounds music festival","email":[""],"email0":["\"[]\""]} {"_id":"BWS George Street","email":[""],"email0":[""]} {"_id":"Noritor Bar and Restaurant","email":[""],"email0":[""]} {"_id":"Callan Shirts & Callan Designs","email":[""],"email0":[""]} {"_id":"Tree of Life (Brisbane)","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Izabel + Sebastian","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Queensland Poetry Festival","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"The Tivoli","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Smiggle","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Calamvale Carnival","email":[""],"email0":[""]} {"_id":"sass & bide queensplaza","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Australia the Gift","email":["\"[]\"","\"[]\""],"email0":["\"[]\"","\"[]\""]} {"_id":"SHEIKE Wintergarden","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"InsideAsia Tours Australia Branch","email":[""],"email0":[""]} {"_id":"Olympia Body Transformation Sanctuary","email":["\"[\"[email protected]\"]\""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Ally Fashion (Brisbane CBD)","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Exhibition Street","email":[""],"email0":[""]} {"_id":"Contrasts of Brisbane Walking Tour","email":[""],"email0":[""]} {"_id":"Brisbane 4WD Day Tours","email":["\"[\"[email protected]\"]\""],"email0":["\"[]\""]} {"_id":"Kangaroo Segway Tours","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"The Met","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"MW Tours Pty Ltd","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Queensland Music Festival","email":["\"[\"//[email protected]\""],"email0":["\"//static.parastorage.com/unpkg/[email protected]\""]} {"_id":"Soleil Pool Bar","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"World Expeditions","email":[""],"email0":["\"[]\""]} {"_id":"lululemon","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Ollie's Place Brisbane DFO","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Nyanda Cultural Tours","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Italian Week","email":[""],"email0":[""]} {"_id":"Brisbane Trike Tours","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Furlani Fitness","email":["\"[\"//[email protected]\""],"email0":["\"//static.parastorage.com/unpkg/[email protected]\""]} {"_id":"The Reject Shop Queen St","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Ghost Tours","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Jimmy's On The Mall","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"Walk Brisbane","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Botanica Real Food","email":["\"[\"[email protected]\"]\"",""],"email0":["\"[]\"","\"[]\""]} {"_id":"East West Food","email":[""],"email0":[""]} {"_id":"Brisbane Sightseeing Tours - Urban Adventures","email":[""],"email0":[""]} {"_id":"Buffalo Bar","email":["\"[\"//buffalobar.com.au/wp-content/uploads/2018/08/[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Laruche","email":[""],"email0":["\"[]\""]} {"_id":"Tattersall's Arcade","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"GPO Hotel","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Guest House Paradiso","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Birdees","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Bubblebubs Modern Cloth Nappies","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Queensland Performing Arts Centre","email":[""],"email0":["\"[]\""]} {"_id":"Drunken Monkey","email":[""],"email0":["\"[]\""]} {"_id":"Alannah Hill - Broadway","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"tookoonooka crater","email":[""],"email0":[""]} {"_id":"Lennons Restaurant & Bar","email":[""],"email0":["\"[]\""]} {"_id":"Brisbane Jazz Club","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"George's Paragon Seafood Restaurant","email":[""],"email0":["\"[]\""]} {"_id":"Mix Karaoke","email":[""],"email0":[""]} {"_id":"Candy Club","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"Prohibition Brisbane","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Comedy Hypnotist Andy Vening","email":[""],"email0":["\"[\"//[email protected]\""]} {"_id":"X-Wing Australia Pty Ltd","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Press Club","email":["\"[]\""],"email0":[""]} {"_id":"Anytime Fitness","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"FAMOUS","email":[""],"email0":["\"[\"//[email protected]\""]} {"_id":"Players Brisbane","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"The Victory Hotel","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"AQUITAINE","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Roti Place","email":[""],"email0":["\"[]\""]} {"_id":"The Brunswick Hotel","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Black Fire Restaurant Brisbane","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Stokehouse Q","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Donna Chang","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Ally Fashion","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"The Pool Terrace + Bar","email":[""],"email0":["\"[]\""]} {"_id":"Grill'd Albert Street","email":["\"[\"/images/icons/[email protected]\"]\""],"email0":["\"[\"/images/icons/[email protected]\"]\""]} {"_id":"Bacchus","email":["\"[\"[email protected]\"]\""],"email0":["\"[]\""]} {"_id":"Einbunpin Festival","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"The Gresham Bar","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Super Whatnot","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"Verve Restaurant & Ciderhouse","email":["\"[\"//[email protected]\""],"email0":["\"//static.parastorage.com/unpkg/[email protected]\""]} {"_id":"Red Rooster","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"JPT Tour Group","email":[""],"email0":["\"[]\""]} {"_id":"Twelfth Night Theatre","email":[""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Mr. Bunz","email":[""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Public Quarter","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Kadoya Japanese Restaurant","email":[""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Club XOXO","email":[""],"email0":[""]} {"_id":"An Cafe Vietnamese Street Food","email":[""],"email0":["\"[\"//[email protected]\""]} {"_id":"Queensland Club","email":[""],"email0":["\"[]\""]} {"_id":"Frequent Fitness","email":[""],"email0":[""]} {"_id":"Kinn-Imm Thai Hawker Food","email":[""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Audio Tours Australia: Brisbane","email":[""],"email0":[""]} {"_id":"The Walnut Restaurant and Lounge Bar","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Soul Origin","email":["\"[]\"","\"[\"[email protected]\""],"email0":["\"[]\"","\"[email protected]\""]} {"_id":"Fraser Explorer Tours","email":[""],"email0":[""]} {"_id":"charsmy Food & Bottle Shop","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"FunkGlam","email":[""],"email0":[""]} {"_id":"The Lab","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"The Pancake Manor","email":["\"[\"//[email protected]\""],"email0":["\"//static.parastorage.com/unpkg/[email protected]\""]} {"_id":"Fat Noodle","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"Rock N Roll Kebab Pizza - Low Price Pizza","email":["http://www.rocknrollkebabpizza.com.au/"],"email0":["https://maps.google.com/?cid=405749267078995819"]} {"_id":"Embassy Bar & Kitchen","email":["\"[\"[email protected]\"]\""],"email0":["\"[]\""]} {"_id":"LiveWire 24/7 Sports Bar","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"TURQUOISE Kebab & Pizza","email":[""],"email0":["\"[]\""]} {"_id":"Anywhere Festival","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Lexington Queen","email":[""],"email0":[""]} {"_id":"Beach House Bar & Grill CBD","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Cross Country Tours","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Moo Moo The Wine Bar and Grill","email":[""],"email0":["\"[\"brisbane/wp-content/themes/moo-moo/static/img/[email protected]\""]} {"_id":"Club Vixen - Gentleman's Bar and Lounge","email":[""],"email0":["\"[\"//[email protected]\""]} {"_id":"charber 29 Cruise Club","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Treasury Casino and Hotel Brisbane","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Botanic Bar","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} {"_id":"Priceline Pharmacy King George Square","email":[""],"email0":[""]} {"_id":"Fab Kebabs","email":[""],"email0":[""]} {"_id":"Blackmarket Bar & Grill","email":["\"[\"//[email protected]\""],"email0":["\"//static.parastorage.com/unpkg/[email protected]\""]} {"_id":"QUT Gardens Point Fitness and Aquatic","email":[""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"KG Bar","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Irish Murphy's","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"st Fitness Professionals-Sports-Fitness-Coach-Personal-Strength-Training-School-Brisbane City","email":[""],"email0":[""]} {"_id":"Bull Bar restaurant","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"Forever New","email":["\"[]\""],"email0":["\"[]\""]} {"_id":"On The Go Tours","email":[""],"email0":["\"[\"[email protected]\""]} {"_id":"Walter's Steakhouse and Wine Bar","email":["\"[\"[email protected]\"]\""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"Down Under Bar & Grill","email":[""],"email0":[""]} {"_id":"Tattersall's Club","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\""]} {"_id":"Ryan's On The Park Bar and Restaurant","email":["\"[\"[email protected]\"]\""],"email0":["\"[\"[email protected]\"]\""]} {"_id":"The Caxton Hotel","email":["\"[\"[email protected]\""],"email0":["\"//[email protected]\""]} {"_id":"The Pav Bar & Courtyard","email":["\"[\"[email protected]\""],"email0":["\"[email protected]\"]\""]} ''' text =''' ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]',, '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'] ''' def extractEmailsFromUrlText(urlText): extractedEmail = emailRegex.findall(urlText.replace('%20','')) extractedEmail = emailRegex.findall(urlText.replace('\'','')) return extractedEmail def removeDuplicates(chars): mail = [] for char in range(len(chars)): if chars[char] not in mail: mail.append(chars[char]) return mail email = extractEmailsFromUrlText(text) email = removeDuplicates(email) print (email) size=len(email) print(size)
# Author: Ankush Gupta # Date: 2015 """ Entry-point for generating synthetic text images, as described in: @InProceedings{Gupta16, author = "Gupta, A. and Vedaldi, A. and Zisserman, A.", title = "Synthetic Data for Text Localisation in Natural Images", booktitle = "IEEE Conference on Computer Vision and Pattern Recognition", year = "2016", } """ import numpy as np import h5py import os, sys, traceback import os.path as osp from synthgen import * from common import * import wget, tarfile import _pickle as cp import matplotlib.pyplot as plt from math import * import cv2 import glob def get_data(): """ Download the image,depth and segmentation data: Returns, the h5 database. """ if not osp.exists(DB_FNAME): try: colorprint(Color.BLUE,'\tdownloading data (56 M) from: '+DATA_URL,bold=True) print() sys.stdout.flush() out_fname = 'data.tar.gz' wget.download(DATA_URL,out=out_fname) tar = tarfile.open(out_fname) tar.extractall() tar.close() os.remove(out_fname) colorprint(Color.BLUE,'\n\tdata saved at:'+DB_FNAME,bold=True) sys.stdout.flush() except: print (colorize(Color.RED,'Data not found and have problems downloading.',bold=True)) sys.stdout.flush() sys.exit(-1) # open the h5 file and return: return h5py.File(DB_FNAME,'r') def generate_TR_perspective(res, imgid, vertical=0): ninstance = len(res) for i in range(ninstance): img = res[i]['img'] img_vis = img.copy() cross_flag = 0 H, W, _ = img.shape lbl_candi = [] texts = iter(res[i]['txt']) for id in range(res[i]['wordBB'].shape[2]): p1, p2, p3, p4 = [res[i]['wordBB'][:, j, id] for j in range(4)] # visualize wordBB polypts = res[i]['wordBB'][:,:,id].transpose().reshape(-1,1,2).astype(np.int32) cv2.polylines(img_vis, [polypts], True, (0,0,255), thickness=2) # judge valid if (p1[0] - p2[0]) * (p4[0] - p3[0]) < 0 or (p1[1] - p4[1]) * (p2[1] - p3[1]) < 0: cross_flag = 1 break if len(lbl_candi) == 0: lbl_candi = next(texts).split('\n') lbl = lbl_candi[0] lbl_candi.pop(0) # preprocess for perspective transformation W_bb = 0.5 * (np.linalg.norm(p1-p2) + np.linalg.norm(p3-p4)) H_bb = 0.5 * (np.linalg.norm(p1-p4) + np.linalg.norm(p2-p3)) H_new = int(H_bb + 0.5) W_new = int(W_bb + 0.5) # perspective transformation tarpts = np.float32([[0, 0], [W_new, 0], [W_new, H_new], [0, H_new]]) M = cv2.getPerspectiveTransform(res[i]['wordBB'][:,:,id].transpose(), tarpts) tarimg = cv2.warpPerspective(img, M, (W_new, H_new)) # save results outpath = osp.join(output_dir, 'Recog', str(imgid//100)) if not osp.exists(outpath): os.makedirs(outpath) outname = osp.join(outpath,str(imgid)+'_'+str(i) +'_'+str(id)+'_' +lbl + '.jpg') if vertical: stat = cv2.imwrite(outname, np.rot90(tarimg[:,:,::-1], -1)) else: stat = cv2.imwrite(outname, tarimg[:,:,::-1]) if not stat: print(colorize(Color.RED,'Failed to write the image..',bold=True)) print('corresponding corners are: ', p1, p2, p3, p4) print('the label is: ', lbl) os.remove(outname) if cross_flag: invalid_list = glob.glob(osp.join(output_dir, 'Recog', str(imgid//100), str(imgid)+'_'+str(i) +'*.jpg')) for ele in invalid_list: os.remove(ele) Origin_outpath = osp.join(output_dir, 'Origin', str(imgid//1000)) if not osp.exists(Origin_outpath): os.makedirs(Origin_outpath) if vertical: cv2.imwrite(osp.join(Origin_outpath, str(imgid) + '_' + str(i) + '.jpg'), np.rot90(img_vis[:, :, ::-1], -1)) else: cv2.imwrite(osp.join(Origin_outpath, str(imgid) + '_' + str(i) + '.jpg'), img_vis[:,:,::-1]) def generate_TR_rotation(res, vertical=0): ninstance = len(res) for i in range(ninstance): img = res[i]['img'] H, W, _ = img.shape lbl_candi = [] texts = iter(res[i]['txt']) for id in range(res[i]['wordBB'].shape[2]): flag = 0 p1,p2,p3,p4 = [res[i]['wordBB'][:, j, id] for j in range(4)] if len(lbl_candi) == 0: lbl_candi = next(texts).split('\n') lbl = lbl_candi[0] lbl_candi.pop(0) # calculate the rotation matrix rad = 0.5*(atan2(p2[1]-p1[1], p2[0]-p1[0]) + atan2(p3[1]-p4[1], p3[0]-p4[0])) rotateMtrix = cv2.getRotationMatrix2D((W/2, H/2), degrees(rad), 1) Hnew = int(W * fabs(sin(rad)) + H * fabs(cos(rad))) Wnew = int(H * fabs(sin(rad)) + W * fabs(cos(rad))) rotateMtrix[0,2] += (Wnew - W) / 2 rotateMtrix[1,2] += (Hnew - H) / 2 imgRotated = cv2.warpAffine(img, rotateMtrix, (Wnew, Hnew), borderValue=(0,0,0)) # the four vertex of the new rect [[p1[0]], [p1[1]]] = np.dot(rotateMtrix, np.array([[p1[0]], [p1[1]], [1]])) [[p3[0]], [p3[1]]] = np.dot(rotateMtrix, np.array([[p3[0]], [p3[1]], [1]])) [[p2[0]], [p2[1]]] = np.dot(rotateMtrix, np.array([[p2[0]], [p2[1]], [1]])) [[p4[0]], [p4[1]]] = np.dot(rotateMtrix, np.array([[p4[0]], [p4[1]], [1]])) if p1[1] > p4[1]: # handling the reversed cases p1,p4 = p4,p1 p2,p3 = p3,p2 cropped = imgRotated[int(max(0, min(p1[1], p2[1]))):min(Hnew, int(max(p3[1], p4[1]))), int(max(0, min(p1[0], p4[0]))):min(Wnew, int(max(p2[0], p3[0])))][::-1,:,:] flag = 1 else: cropped = imgRotated[int(max(0, min(p1[1], p2[1]))):min(Hnew, int(max(p3[1], p4[1]))), int(max(0, min(p1[0], p4[0]))):min(Wnew, int(max(p2[0], p3[0])))] if p1[0] > p2[0] and p2[1] > p3[1]: # there is an unreasonable case(rare) that I just skip it. continue outname = osp.join(output_dir, lbl + '.jpg') stat = cv2.imwrite(outname, cropped[:,:,::-1]) if not stat: print('wrong cropping:', p1, p2, p3, p4) print(lbl) print(imgRotated.shape) print(flag) def add_res_to_db(imgname,res,db): """ Add the synthetically generated text image instance and other metadata to the dataset. """ ninstance = len(res) for i in range(ninstance): dname = "%s_%d"%(imgname, i) db['data'].create_dataset(dname,data=res[i]['img']) db['data'][dname].attrs['charBB'] = res[i]['charBB'] db['data'][dname].attrs['wordBB'] = res[i]['wordBB'] db['data'][dname].attrs['txt'] = res[i]['txt'] #L = res[i]['txt'] #L = [n.encode("utf-8", "ignore") for n in L] #db['data'][dname].attrs['txt'] = L ## Define some configuration variables: NUM_IMG = 200 # no. of images to use for generation (-1 to use all available): INSTANCE_PER_IMAGE = 1 # no. of times to use the same image SECS_PER_IMG = None #max time per image in seconds vertical_FLAG = 1 # added by ruifeng, whether to create vertical lines. # path to the data-file, containing image, depth and segmentation: DATA_PATH = 'data' DB_FNAME = osp.join(DATA_PATH,'dset.h5') # url of the data (google-drive public file): DATA_URL = 'http://www.robots.ox.ac.uk/~ankush/data.tar.gz' OUT_FILE = 'results/SynthText.h5' ##### declared by ruifeng##### im_dir = './data/bg_img' depth_dir = './data/depth.h5' seg_dir = './data/seg.h5' filtered_dir = 'imnames.cp' output_dir = './data/generated/' ########### end ############## def main(viz=False): # open databases: print (colorize(Color.BLUE,'getting data..',bold=True)) ##db = get_data() ############## added by ruifeng############## depth_db = h5py.File(depth_dir, 'r') seg_db = h5py.File(seg_dir, 'r') imnames = sorted(depth_db.keys()) with open(filtered_dir, 'rb') as f: filtered_imnames = set(cp.load(f)) ################## end ###################### print (colorize(Color.BLUE,'\t-> done',bold=True)) # open the output h5 file: #out_db = h5py.File(OUT_FILE,'w') #out_db.create_group('/data') print (colorize(Color.GREEN,'Storing the output in: '+output_dir, bold=True)) # get the names of the image files in the dataset: N = len(imnames) global NUM_IMG if NUM_IMG < 0: NUM_IMG = N start_idx,end_idx = 0,min(NUM_IMG, N) RV3 = RendererV3(DATA_PATH,max_time=SECS_PER_IMG) for i in range(start_idx,end_idx): imname = imnames[i] # ignore if not in filetered list: if imname not in filtered_imnames: continue try: # get the image: #img = Image.fromarray(db['image'][imname][:]) img = Image.open(osp.join(im_dir, imname)).convert('RGB') # get the pre-computed depth: # there are 2 estimates of depth (represented as 2 "channels") # here we are using the second one (in some cases it might be # useful to use the other one): depth = depth_db[imname][:].T depth = depth[:, :, 0] # get segmentation: seg = seg_db['mask'][imname][:].astype('float32') area = seg_db['mask'][imname].attrs['area'] label = seg_db['mask'][imname].attrs['label'] # re-size uniformly: sz = depth.shape[:2][::-1] img = np.array(img.resize(sz, Image.ANTIALIAS)) seg = np.array(Image.fromarray(seg).resize(sz, Image.NEAREST)) if vertical_FLAG: depth = np.rot90(depth) seg = np.rot90(seg) img = np.rot90(img) print (colorize(Color.RED,'%d of %d'%(i,end_idx-1), bold=True)) res = RV3.render_text(img,depth,seg,area,label, ninstance=INSTANCE_PER_IMAGE,viz=viz) if len(res) > 0: # non-empty : successful in placing text: generate_TR_perspective(res, i, vertical_FLAG) #add_res_to_db(imname,res,out_db) # visualize the output: if viz: if 'q' in input(colorize(Color.RED,'continue? (enter to continue, q to exit): ',True)): break except: traceback.print_exc() print (colorize(Color.GREEN,'>>>> CONTINUING....', bold=True)) continue depth_db.close() seg_db.close() #out_db.close() if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description='Genereate Synthetic Scene-Text Images') parser.add_argument('--viz',action='store_true',default=False,help='flag for turning on visualizations') args = parser.parse_args() main(args.viz)
from django.test import Client from django.urls import reverse def test_index(): client = Client() url = reverse('index') response = client.get(url) assert response.status_code == 200
import requests, responses, unittest, json import lateral.api class ApiTest(unittest.TestCase): def setUp(self): self.url = "http://test.io" self.api = lateral.api.Api("009b64acf288f20816ecfbbd20000000", url=self.url, ignore=[666]) self.jsonX = {"id": 1} self.X = json.dumps(self.jsonX) def tearDown(self): pass @responses.activate def test_request(self): responses.add(responses.GET, 'http://test.io/2xx', status=299, body=self.X) responses.add(responses.GET, 'http://test.io/666', status=666, body="") try: self.api._request('get', 'http://test.io/2xx') self.api._request('get', 'http://test.io/666') except: self.fail("Function _request throws exception on legal input.") responses.add(responses.GET, 'http://test.io/4xx', status=499, body=self.X) with self.assertRaises(requests.exceptions.HTTPError): self.api._request('get', 'http://test.io/4xx') responses.add(responses.GET, 'http://test.io/200', status=200, body='"x": 3}') with self.assertRaises(ValueError): self.api._request('get', 'http://test.io/200') ###################### # Documents @responses.activate def test_get_documents(self): responses.add(responses.GET, 'http://test.io/documents', status=200, body=self.X) r = self.api.get_documents() r = self.api.get_documents('lorem', page=3, per_page=5) # keywords + pagination assert responses.calls[1].request.url.find("keywords=lorem") > 0 r = self.api.get_documents(page=3, per_page=5) # pagination alone assert responses.calls[2].request.url.find("page=3") > 0 assert responses.calls[2].request.url.find("per_page=5") > 0 @responses.activate def test_post_document(self): responses.add(responses.POST, 'http://test.io/documents', status=201, body=self.X) r = self.api.post_document('Fat black cat', {"title": "Lorem ipsum"}) @responses.activate def test_get_document(self): responses.add(responses.GET, 'http://test.io/documents/docx', status=201, body=self.X) r = self.api.get_document('docx') @responses.activate def test_put_document(self): responses.add(responses.PUT, 'http://test.io/documents/docx', status=200, body=self.X) r = self.api.put_document('docx', 'Fat black cat', {"title": "Lorem ipsum"}) @responses.activate def test_delete_document(self): responses.add(responses.DELETE, 'http://test.io/documents/docx', status=201, body=self.X) r = self.api.delete_document('docx') @responses.activate def test_get_documents_preferences(self): responses.add(responses.GET, 'http://test.io/documents/docx/preferences', status=200, body=self.X) r = self.api.get_documents_preferences('docx') r = self.api.get_documents_preferences('docx', page=3, per_page=5) assert responses.calls[1].request.url.find("page=3") > 0 assert responses.calls[1].request.url.find("per_page=5") > 0 @responses.activate def test_get_documents_similar(self): responses.add(responses.GET, 'http://test.io/documents/docx/similar', status=200, body=self.X) r = self.api.get_documents_similar('docx') @responses.activate def test_post_documents_similar_to_text(self): responses.add(responses.POST, 'http://test.io/documents/similar-to-text', status=200, body=self.X) r = self.api.post_documents_similar_to_text('docx') ###################### # Users @responses.activate def test_post_user(self): responses.add(responses.POST, 'http://test.io/users', status=201, body=self.X) r = self.api.post_user() @responses.activate def test_get_users(self): responses.add(responses.GET, 'http://test.io/users', status=200, body=self.X) r = self.api.get_users() r = self.api.get_users(page=3, per_page=5) assert responses.calls[1].request.url.find("page=3") > 0 assert responses.calls[1].request.url.find("per_page=5") > 0 @responses.activate def test_get_user(self): responses.add(responses.GET, 'http://test.io/users/userx', status=201, body=self.X) r = self.api.get_user('userx') @responses.activate def test_delete_user(self): responses.add(responses.DELETE, 'http://test.io/users/userx', status=201, body=self.X) r = self.api.delete_user('userx') @responses.activate def test_get_user_recommendations(self): responses.add(responses.GET, 'http://test.io/users/user_all/recommendations', status=201, body=self.X) r = self.api.get_user_recommendations('user_all', 10) responses.add(responses.GET, 'http://test.io/users/user_sel/recommendations', status=201, body=self.X) r = self.api.get_user_recommendations('user_sel', 10, [1, 2, 3, 4]) ###################### # Preferences @responses.activate def test_get_users_preferences(self): responses.add(responses.GET, 'http://test.io/users/userx/preferences', status=200, body=self.X) r = self.api.get_users_preferences('userx') @responses.activate def test_get_users_preference(self): responses.add(responses.GET, 'http://test.io/users/userx/preferences/docx', status=201, body=self.X) r = self.api.get_users_preference('userx', 'docx') @responses.activate def test_post_users_preference(self): responses.add(responses.POST, 'http://test.io/users/userx/preferences/docx', status=201, body=self.X) r = self.api.post_users_preference('userx', 'docx') @responses.activate def test_delete_users_preference(self): responses.add(responses.DELETE, 'http://test.io/users/userx/preferences/docx', status=200, body=self.X) r = self.api.delete_users_preference('userx', 'docx') ###################### # Clusters @responses.activate def test_get_cluster_models(self): responses.add(responses.GET, 'http://test.io/cluster-models', status=200, body=self.X) r = self.api.get_cluster_models() r = self.api.get_cluster_models(page=3, per_page=5) assert responses.calls[1].request.url.find("page=3") > 0 assert responses.calls[1].request.url.find("per_page=5") > 0 @responses.activate def test_post_cluster_model(self): responses.add(responses.POST, 'http://test.io/cluster-models', status=201, body=self.X) r = self.api.post_cluster_model(10) @responses.activate def test_get_cluster_model(self): responses.add(responses.GET, 'http://test.io/cluster-models/modelx', status=201, body=self.X) r = self.api.get_cluster_model('modelx') @responses.activate def test_delete_cluster_model(self): responses.add(responses.DELETE, 'http://test.io/cluster-models/modelx', status=201, body=self.X) r = self.api.delete_cluster_model('modelx') @responses.activate def test_get_clusters(self): responses.add(responses.GET, 'http://test.io/cluster-models/modelx/clusters', status=200, body=self.X) r = self.api.get_clusters('modelx') @responses.activate def test_get_clusters_documents(self): responses.add(responses.GET, 'http://test.io/cluster-models/modelx/clusters/clustx/documents', status=200, body=self.X) r = self.api.get_clusters_documents('modelx', 'clustx') @responses.activate def test_get_clusters_words(self): responses.add(responses.GET, 'http://test.io/cluster-models/modelx/clusters/clustx/words', status=200, body=self.X) r = self.api.get_clusters_words('modelx', 'clustx') @responses.activate def test_get_clusters_word_cloud(self): responses.add(responses.GET, 'http://test.io/cluster-models/modelx/clusters/clustx/words', status=200, body=self.X) r = self.api.get_clusters_words('modelx', 'clustx')
from .flow_viz import flow_to_image from .frame_utils import writeFlow
import os import re import pandas as pd import numpy as np import logging import argparse import csv from time import sleep import kfserving import tensorflow as tf from typing import List, Dict from kfserving import storage from prometheus_client import CollectorRegistry, Gauge, push_to_gateway import boto3 from botocore.client import Config import botocore from kubernetes import client as k8s_client from kubernetes.client import rest as k8s_rest from kubernetes import config as k8s_config from kubernetes.client.rest import ApiException logging.basicConfig() logging.getLogger().setLevel(logging.INFO) _GCS_PREFIX = "gs://" _S3_PREFIX = "s3://" class Upload(object): def __init__(self): k8s_config.load_incluster_config() self.api_client = k8s_client.CoreV1Api() self.minio_service_endpoint = None self.boto_client = boto3.client('s3', endpoint_url=self.get_minio_endpoint(), aws_access_key_id="minio", aws_secret_access_key="minio123", config=Config(signature_version='s3v4'), region_name="us-east-1", use_ssl=False) def get_minio_endpoint(self): try: self.minio_service_endpoint = self.api_client.read_namespaced_service(name='minio-service', namespace='kubeflow').spec.cluster_ip self.minio_service_enpoint_port=self.api_client.read_namespaced_service(name='minio-service', namespace='kubeflow').spec.ports[0].port except ApiException as e: if e.status == 403: logging.warning(f"The service account doesn't have sufficient privileges " f"to get the kubeflow minio-service. " f"You will have to manually enter the minio cluster-ip. " f"To make this function work ask someone with cluster " f"priveleges to create an appropriate " f"clusterrolebinding by running a command.\n" f"kubectl create --namespace=kubeflow rolebinding " "--clusterrole=kubeflow-view " "--serviceaccount=${NAMESPACE}:default-editor " "${NAMESPACE}-minio-view") logging.error("API access denied with reason: {e.reason}") self.minio_endpoint = "http://"+ self.minio_service_endpoint + ":%s"%self.minio_service_enpoint_port return self.minio_endpoint def create_bucket(self, bucket_name): try: self.boto_client.head_bucket(Bucket=bucket_name) except botocore.exceptions.ClientError: bucket = {'Bucket': bucket_name} self.boto_client.create_bucket(**bucket) def download_file(self, bucket_name, key, filename): try: self.boto_client.download_file(Bucket=bucket_name, Key=key, Filename=filename) mode="a+" except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": logging.info("File not exist") mode="w+" return mode def upload_file(self, bucket_name, blob_name, file_to_upload): self.boto_client.upload_file(file_to_upload, bucket_name, blob_name) return "s3://{}/{}".format(bucket_name, blob_name) class KFServing(kfserving.KFModel): def __init__(self, name: str): super().__init__(name) self.name = name self.ready = False def load(self): if args.storage_uri.startswith(_GCS_PREFIX) or args.storage_uri.startswith(_S3_PREFIX): obj=storage.Storage() obj.download(uri=args.storage_uri, out_dir=args.out_dir) self.ready = True def preprocess(self, request): self.input_data=request['instances'] self.pre=(np.array(self.input_data)/-200) request['instances']=self.pre.tolist() return request['instances'] def postprocess(self, request): """ Add your own metrics and push metrics to prometheus pushgateway Here we are reading data from 'kfserving_metrics.csv' file and generating metrics The Pushgateway allows ephemeral and batch jobs to expose their metrics to Prometheus """ obj=Upload() obj.create_bucket(args.bucket_name) mode=obj.download_file(args.bucket_name, 'kfserving_metrics.csv', 'kfserving_metrics.csv') fieldnames=["b3001", "b3002","b3003","b3004","b3005","b3006","b3007","b3008","b3009","b3010","b3011","b3012","b3013", "class_id", "probabilities"] with open('kfserving_metrics.csv', mode) as csvfile: writer=csv.DictWriter(csvfile, fieldnames=fieldnames) if mode=='w+': writer.writeheader() row={'class_id': request['class_ids'][0][0], 'probabilities':request['probabilities'][0]} for label, value in zip(self.feature_col,self.input_data): row.update({label:value}) writer.writerow(row) with open('kfserving_metrics.csv', 'r') as file: lines=file.readlines() if len(lines)>=100: data_frames=pd.read_csv('kfserving_metrics.csv') data_frame=data_frames.drop(['class_id','probabilities'], axis=1) data_frame=data_frame/-200 metrics={} for feature in fieldnames[:-2]: metrics["blerssi_input_data_%s_mean"%feature]=data_frame.mean().to_dict()[feature] metrics["blerssi_input_data_%s_min"%feature]=data_frame.min().to_dict()[feature] metrics["blerssi_input_data_%s_std"%feature]=data_frame.std().to_dict()[feature] metrics["blerssi_input_data_%s_median"%feature]=data_frame.median().to_dict()[feature] metrics["blerssi_class_id_mean"]=data_frames['class_id'].mean() metrics["blerssi_class_id_min"]=data_frames['class_id'].min() metrics["blerssi_class_id_std"]=data_frames['class_id'].std() registry = CollectorRegistry() for k,v in metrics.items(): self.push_metrics(k,v, registry) obj.upload_file(args.bucket_name, 'kfserving_metrics.csv', 'kfserving_metrics.csv') return request def push_metrics(self, metric_name, value, registry): #push metrics to prometheus pushgateway g = Gauge(metric_name, 'blerssi', registry=registry) g.set(value) push_to_gateway(args.pushgateway, job='blerssi', registry=registry) def predict(self, request): X=request self.feature_col=["b3001", "b3002","b3003","b3004","b3005","b3006","b3007","b3008","b3009","b3010","b3011","b3012","b3013"] input={} for i in range(len(X)): input.update({self.feature_col[i]:[X[i]]}) for dir in os.listdir(args.out_dir): if re.match('[0-9]',dir): exported_path=os.path.join(args.out_dir,dir) break else: raise Exception("Model path not found") # Open a Session to predict with tf.Session() as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], exported_path) predictor= tf.contrib.predictor.from_saved_model(exported_path,signature_def_key='predict') output_dict= predictor(input) sess.close() output={} output['probabilities']=output_dict['probabilities'].tolist() output['class_ids']=output_dict['class_ids'].tolist() logging.info("output: %s"%output['class_ids']) return output if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--http_port', default=8081, type=int, help='The HTTP Port listened to by the model server.') parser.add_argument('--pushgateway', help='Prometheus pushgateway to push metrics') parser.add_argument('--storage_uri', help='storage uri for your model') parser.add_argument('--out_dir', help='out dir') parser.add_argument('--bucket_name', default='kfserving', help='bucket name to store model metrics') args, _ = parser.parse_known_args() model = KFServing("blerssi-model") model.load() kfserving.KFServer(http_port=args.http_port).start([model])
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Sun Oct 16 17:49:18 2016 @author: yannick """ import sys with open(sys.argv[1], "r") as FILE: SEQ = "".join(x.strip("\n\r\t ") for x in FILE.readlines()[1:]) SEQ_INV = SEQ.replace("A","t").replace("C","g").replace("G","c") \ .replace("T","a").upper() for i in xrange(len(SEQ)): for j in xrange(i+2, len(SEQ)): if SEQ[i:j] == SEQ_INV[j:i:-1]: print i+1, j-i+1 if j-i > 12: break
""" Application Skeleton """ from flask import Flask from flask_bootstrap import Bootstrap from config import CONFIG BOOTSTRAP = Bootstrap() def create_app(config_name): """ Factory Function """ app = Flask(__name__) app.config.from_object(CONFIG[config_name]) BOOTSTRAP.init_app(app) # call controllers from flask_seguro.controllers.main import main as main_blueprint app.register_blueprint(main_blueprint) return app
from apps.products.models_mongo import MongoCustomerModel, MongoProductModel, MongoReviewModel from apps.products.models_dto import MongoProduct2DTO from config.settings.config_common import LOGTAIL_SOURCE_TOKEN, PAGE_SIZE_PRODUCT_DETAIL_REVIEW import datetime from logtail import LogtailHandler import logging handler = LogtailHandler(source_token=LOGTAIL_SOURCE_TOKEN) logger = logging.getLogger(__name__) logger.handlers = [] logger.addHandler(handler) class MongoProcessor: def common_query_body(): return [ { "$project":{ "product_number":1, "name":1, "chinese_name":1, "brand":1, "catalog_id_array":1, "main_img":1, "other_img_array":1, "publish_status":1, "original_price":1, "current_price":1, "stock":1, "safe_stock":1, "description":1, "keyword_array":1, "created_time":1, "updated_time":1, "created_user_id":1, "updated_user_id":1, "avg_star":1, "review_array":{ "$reverseArray":"$review_array" }, "sku_array":1, "tag_array":1, "feature_array":1, "sales":1, "sales_rank":1, "source":1, "type":1, "version":1, "sku_array_tmp":"$sku_array" } }, { "$unwind":"$sku_array_tmp" }, { "$replaceRoot":{ "newRoot":{ "$mergeObjects":[ { "product_number":"$product_number", "name":"$name", "chinese_name":"$chinese_name", "brand":"$brand", "catalog_id_array":"$catalog_id_array", "main_img":"$main_img", "other_img_array":"$other_img_array", "publish_status":"$publish_status", "original_price":"$original_price", "current_price":"$current_price", "stock":"$stock", "safe_stock":"$safe_stock", "description":"$description", "keyword_array":"$keyword_array", "created_time":"$created_time", "updated_time":"$updated_time", "created_user_id":"$created_user_id", "updated_user_id":"$updated_user_id", "avg_star":"$avg_star", "review_array":{ "$slice":[ "$review_array", PAGE_SIZE_PRODUCT_DETAIL_REVIEW ] }, "sku_array":"$sku_array", "tag_array":"$tag_array", "feature_array":"$feature_array", "sales":"$sales", "sales_rank":"$sales_rank", "source":"$source", "version":"$version", "type":"$type" }, "$sku_array_tmp" ] } } }, { "$unwind":{ "path":"$attr_array", "preserveNullAndEmptyArrays": True } }, { "$group":{ "_id":{ "attr_name":"$attr_array.name", "attr_value":"$attr_array.value", "product_number":"$product_number", "name":"$name", "chinese_name":"$chinese_name", "brand":"$brand", "catalog_id_array":"$catalog_id_array", "main_img":"$main_img", "other_img_array":"$other_img_array", "publish_status":"$publish_status", "original_price":"$original_price", "current_price":"$current_price", "stock":"$stock", "safe_stock":"$safe_stock", "description":"$description", "keyword_array":"$keyword_array", "created_time":"$created_time", "updated_time":"$updated_time", "created_user_id":"$created_user_id", "updated_user_id":"$updated_user_id", "avg_star":"$avg_star", "review_array":"$review_array", "sku_array":"$sku_array", "tag_array":"$tag_array", "feature_array":"$feature_array", "sales":"$sales", "sales_rank":"$sales_rank", "source":"$source", "version":"$version", "type":"$type" }, "attr_value_array":{ "$addToSet":{ "sku_number":"$sku_number", "attr_name":"$attr_array.name", "attr_value":"$attr_array.value" } } } }, { "$group":{ "_id":{ "attr_name":"$_id.attr_name", "product_number":"$_id.product_number", "name":"$_id.name", "chinese_name":"$_id.chinese_name", "brand":"$_id.brand", "catalog_id_array":"$_id.catalog_id_array", "main_img":"$_id.main_img", "other_img_array":"$_id.other_img_array", "publish_status":"$_id.publish_status", "original_price":"$_id.original_price", "current_price":"$_id.current_price", "stock":"$_id.stock", "safe_stock":"$_id.safe_stock", "description":"$_id.description", "keyword_array":"$_id.keyword_array", "created_time":"$_id.created_time", "updated_time":"$_id.updated_time", "created_user_id":"$_id.created_user_id", "updated_user_id":"$_id.updated_user_id", "avg_star":"$_id.avg_star", "review_array":"$_id.review_array", "sku_array":"$_id.sku_array", "tag_array":"$_id.tag_array", "feature_array":"$_id.feature_array", "sales":"$_id.sales", "sales_rank":"$_id.sales_rank", "source":"$_id.source", "version":"$_id.version", "type":"$_id.type" }, "attr_value_array":{ "$addToSet":{ "sku_number_array":"$attr_value_array.sku_number", "attr_name":"$_id.attr_name", "attr_value":"$_id.attr_value" } } } }, { "$group":{ "_id":{ "product_number":"$_id.product_number", "name":"$_id.name", "chinese_name":"$_id.chinese_name", "brand":"$_id.brand", "catalog_id_array":"$_id.catalog_id_array", "main_img":"$_id.main_img", "other_img_array":"$_id.other_img_array", "publish_status":"$_id.publish_status", "original_price":"$_id.original_price", "current_price":"$_id.current_price", "stock":"$_id.stock", "safe_stock":"$_id.safe_stock", "description":"$_id.description", "keyword_array":"$_id.keyword_array", "created_time":"$_id.created_time", "updated_time":"$_id.updated_time", "created_user_id":"$_id.created_user_id", "updated_user_id":"$_id.updated_user_id", "avg_star":"$_id.avg_star", "review_array":"$_id.review_array", "sku_array":"$_id.sku_array", "tag_array":"$_id.tag_array", "feature_array":"$_id.feature_array", "sales":"$_id.sales", "sales_rank":"$_id.sales_rank", "source":"$_id.source", "version":"$_id.version", "type":"$_id.type" }, "attr_array_pdp":{ "$push":{ "attr_name":"$_id.attr_name", "attr_value_array":"$attr_value_array" } } } }, { "$project":{ "_id":0, "product_number":"$_id.product_number", "name":"$_id.name", "chinese_name":"$_id.chinese_name", "brand":"$_id.brand", "catalog_id_array":"$_id.catalog_id_array", "main_img":"$_id.main_img", "other_img_array":"$_id.other_img_array", "publish_status":"$_id.publish_status", "original_price":"$_id.original_price", "current_price":"$_id.current_price", "stock":"$_id.stock", "safe_stock":"$_id.safe_stock", "description":"$_id.description", "keyword_array":"$_id.keyword_array", "created_time":"$_id.created_time", "updated_time":"$_id.updated_time", "created_user_id":"$_id.created_user_id", "updated_user_id":"$_id.updated_user_id", "avg_star":"$_id.avg_star", "review_array":"$_id.review_array", "sku_array":"$_id.sku_array", "tag_array":"$_id.tag_array", "feature_array":"$_id.feature_array", "attr_array_pdp":"$attr_array_pdp", "sales":"$_id.sales", "sales_rank":"$_id.sales_rank", "source":"$_id.source", "version":"$_id.version", "type":"$_id.type" } } ] def query_product_by_product_number(product_number): aggregate_array = [ { "$match": { "product_number": product_number } } ] aggregate_array.extend(MongoProcessor.common_query_body()) product_array = list(MongoProductModel.objects().aggregate(aggregate_array)) if len(product_array) == 0: return None else: return product_array[0] def query_product_by_sku_number(sku_number): aggregate_array = [ { "$match": { "sku_array.sku_number": sku_number } } ] aggregate_array.extend(MongoProcessor.common_query_body()) product_array = list(MongoProductModel.objects().aggregate(aggregate_array)) if len(product_array) == 0: return None else: return product_array[0] def query_review_array_by_product_number(product_number, page_number, page_size): query_body = [ { "$match":{ "product_number": str(product_number) } }, { "$project":{ "review":{ "$reverseArray":"$review_array" } } }, { "$unwind":"$review" }, { "$skip":(int(page_number) - 1) * int(page_size) }, { "$limit":int(page_size) }, { "$project":{ "customer":"$review.customer", "content":"$review.content", "created_time":"$review.created_time", "star":"$review.star" } } ] review_array = list(MongoProductModel.objects().aggregate(query_body)) if len(review_array) == 0: return None else: return review_array def query_sku_array_by_sku_number_array(sku_number_array): query_body = [ { "$match": { "sku_array.sku_number": { "$in": sku_number_array } } }, { "$unwind": "$sku_array" }, { "$match": { "sku_array.sku_number": { "$in": sku_number_array } } }, { "$replaceRoot": { "newRoot": { "$mergeObjects": [ { "_id": "$_id", "product_number": "$product_number", "name": "$name", "chinese_name": "$chinese_name", "brand": "$brand", "catalog_id_array": "$catalog_id_array", "main_img": "$main_img", "other_img_array": "$other_img_array", "publish_status": "$publish_status", "original_price": "$original_price", "current_price": "$current_price", "stock": "$stock", "safe_stock": "$safe_stock", "description": "$description", "keyword_array": "$keyword_array", "created_time": "$created_time", "updated_time": "$updated_time", "created_user_id": "$created_user_id", "updated_user_id": "$updated_user_id", "avg_star": "$avg_star", "tag_array": "$tag_array", "feature_array": "$feature_array", "sales": "$sales", "sales_rank": "$sales_rank", "source": "$source", "version": "$version", "type": "$type" }, "$sku_array" ] } } }, { "$project": { "_id": 0, "product_number": "$product_number", "name": "$name", "chinese_name": "$chinese_name", "brand": "$brand", "catalog_id_array": "$catalog_id_array", "main_img": "$main_img", "other_img_array": "$other_img_array", "publish_status": "$publish_status", "original_price": "$original_price", "current_price": "$current_price", "stock": "$stock", "safe_stock": "$safe_stock", "description": "$description", "keyword_array": "$keyword_array", "created_time": "$created_time", "updated_time": "$updated_time", "created_user_id": "$created_user_id", "updated_user_id": "$updated_user_id", "avg_star": "$avg_star", "sku_number": "$sku_number", "sku_img_array": "$sku_img_array", "attr_array": "$attr_array", "sattr_array": "$sattr_array", "tag_array": "$tag_array", "feature_array": "$feature_array", "sales": "$sales", "sales_rank": "$sales_rank", "source": "$source", "version": "$version", "type": "$type" } } ] product_array = list(MongoProductModel.objects().aggregate(query_body)) res = [] for product in product_array: res.append(MongoProduct2DTO(product)) return res def query_changed_product_number_array(): query_body = [ { "$match": { "changed_flag": 1 } }, { "$group": { "_id": "null", "sync_product_number_array": { "$addToSet": "$product_number" } } } ] productNumberArray = list(MongoProductModel.objects().aggregate(query_body)) return productNumberArray def query_product_array_by_product_number_array(product_number_array): if len(product_number_array) == 0: return None query_body = [ { "$match": { "product_number": { "$in": product_number_array } } }, { "$unwind": "$sku_array" }, { "$replaceRoot": { "newRoot": { "$mergeObjects": [ { "product_number": "$product_number", "name": "$name", "chinese_name": "$chinese_name", "brand": "$brand", "catalog_id_array": "$catalog_id_array", "main_img": "$main_img", "other_img_array": "$other_img_array", "publish_status": "$publish_status", "original_price": "$original_price", "current_price": "$current_price", "stock": "$stock", "safe_stock": "$safe_stock", "description": "$description", "keyword_array": "$keyword_array", "created_time": "$created_time", "updated_time": "$updated_time", "created_user_id": "$created_user_id", "updated_user_id": "$updated_user_id", "avg_star": "$avg_star", "tag_array": "$tag_array", "feature_array": "$feature_array", "sales": "$sales", "sales_rank": "$sales_rank", "source": "$source", "version": "$version", "type": "$type" }, "$sku_array" ] } } }, { "$unwind": { "path": "$attr_array", "preserveNullAndEmptyArrays": True } }, { "$unwind": { "path": "$sattr_array", "preserveNullAndEmptyArrays": True } }, { "$group": { "_id": { "product_number": "$product_number", "name": "$name", "chinese_name": "$chinese_name", "brand": "$brand", "catalog_id_array": "$catalog_id_array", "main_img": "$main_img", "other_img_array": "$other_img_array", "publish_status": "$publish_status", "original_price": "$original_price", "current_price": "$current_price", "stock": "$stock", "safe_stock": "$safe_stock", "description": "$description", "keyword_array": "$keyword_array", "created_time": "$created_time", "updated_time": "$updated_time", "created_user_id": "$created_user_id", "updated_user_id": "$updated_user_id", "avg_star": "$avg_star", "tag_array": "$tag_array", "feature_array": "$feature_array", "sales": "$sales", "sales_rank": "$sales_rank", "source": "$source", "version": "$version", "type": "$type" }, "attr_array": { "$addToSet": "$attr_array" }, "sattr_array": { "$addToSet": "$sattr_array" }, "sku_number_array": { "$addToSet": "$sku_number" } } }, { "$project": { "product_number": "$_id.product_number", "name": "$_id.name", "chinese_name": "$_id.chinese_name", "brand": "$_id.brand", "catalog_id_array": "$_id.catalog_id_array", "main_img": "$_id.main_img", "other_img_array": "$_id.other_img_array", "publish_status": "$_id.publish_status", "original_price": "$_id.original_price", "current_price": "$_id.current_price", "stock": "$_id.stock", "safe_stock": "$_id.safe_stock", "description": "$_id.description", "keyword_array": "$_id.keyword_array", "created_time": "$_id.created_time", "updated_time": "$_id.updated_time", "created_user_id": "$_id.created_user_id", "updated_user_id": "$_id.updated_user_id", "avg_star": "$_id.avg_star", "sku_number_array": "$sku_number_array", "attr_array": "$attr_array", "sattr_array": "$sattr_array", "tag_array": "$_id.tag_array", "feature_array": "$_id.feature_array", "sales": "$_id.sales", "sales_rank": "$_id.sales_rank", "source": "$_id.source", "version": "$_id.version", "type": "$_id.type" } } ] product_array = list(MongoProductModel.objects().aggregate(query_body)) return product_array def add_review_by_product_number(product_number, content, star, customer): product = MongoProductModel.objects(product_number=product_number) if product.count() == 0: return None else: product = product[0] review = MongoReviewModel( content=content, star=star, created_time=datetime.datetime.now(), customer=MongoCustomerModel( id=str(customer.id), first_name=customer.first_name, last_name=customer.last_name, profile_img=customer.img.name)) product.review_array.append(review) product.avg_star = (float(product.avg_star * len(product.review_array) ) + float(star)) / (len(product.review_array) + 1) product.changed_flag = 1 product.save() return review def update_product_changed_flag_by_product_number_array(product_number_array): if len(product_number_array) == 0: return False MongoProductModel.objects(product_number__in=product_number_array).update(changed_flag=0) return True def delete_all_products(): MongoProductModel.objects().delete() logger.info("deleted all mongo data")
import LevelBuilder from sprites import * from sprite_templates import * def render(name,bg): lb = LevelBuilder.LevelBuilder(name+".plist",background=bg) lb.addObject(Hero.HeroSprite(x=49, y=58,width=42,height=74)) lb.addObject(Bullet.BulletSprite(x=0, y=0,width=10,height=10,angle='0',restitution=0.5,static='false',friction=0.5,density=3,spawnEvent='onShoot')) lb.addObject(Teleporter.TeleporterSprite(level_id='leveldata/menu')) lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=150-115-50,y=250,width=100,height=500,zoom_fact=1.0)) lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=150,y=320-60,width=128,height=100,zoom_fact=0.1666)) lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=150+115+50,y=250,width=100,height=500,zoom_fact=0.75)) lb.addObject(WatchtowerVisual.WatchtowerVisualSprite(x=150, y=92,width=128,height=235-50,angle='0',restitution=0.2,static='true',friction=0.5,density=20,firstframe='watchtower.png' )) BlueMonster.create(lb,0) Snake.create(lb, 3*480) lb.addObject(Beam.BeamSprite(x=2551, y=161,width=546,height=87,angle='0' ,restitution=0.2,static='false',friction=0.5,density=40 ).setName('Beam')) lb.addObject(Beam.BeamSprite(x=2298, y=57, width=30,height=114,angle='0' ,restitution=0.2,static='true',friction=0.5,density=20 ,classname='Destructable', firstframe ='brittle_brick_4_1.png' ).setName('dBeam')) lb.addObject(Crate.CrateSprite(x=2798,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2759,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2717,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2674,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2638,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2599,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2570,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2609,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2647,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2684,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2720,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(EnemyEquipedRotor.EnemyEquipedRotorSprite(x=1714,y=156,scaling=2.21875,speed=3000,torque=3)) lb.addObject(Crate.CrateSprite(x=2557,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2518,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2476,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2433,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2397,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Crate.CrateSprite(x=2358,y=223,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2378,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2417,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2455,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2492,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(Pickup.PickupSprite(x=2528,y=260,width=32, height=32, static='false',angle=0)) lb.addObject(BulletTimePickup.BulletTimePickupSprite(x=2763,y=258,width=32, height=32, static='false',angle=0)) lb.addObject(BulletTimePickup.BulletTimePickupSprite(x=2798,y=258,width=32, height=32, static='false',angle=0)) lb.addObject(Beam.BeamSprite(x=2327, y=110,width=29,height=14,angle='0' ,restitution=0.2,static='true',friction=0.5,density=20 , firstframe ='bar_long.png' ).setName('Beam')) lb.addObject(Beam.BeamSprite(x=2764, y=110,width=29,height=14,angle='0' ,restitution=0.2,static='true',friction=0.5,density=20 , firstframe ='bar_long.png' ).setName('Beam')) lb.render()
from collections import OrderedDict from transformers import BertTokenizer, is_tf_available from transformers.modeling_albert import AlbertConfig from transformers.tokenization_albert import AlbertTokenizer from ..model_base import TaskModels, ModelType, ModelTaskType, model_func default_model = model_func(ModelType.albert, AlbertConfig, AlbertTokenizer, 'albert') bert_model = model_func(ModelType.albert, AlbertConfig, BertTokenizer, 'albert') ''' Attention please, some albert models are using BertTokenizer. ''' class Albert_Task_Models_Base(TaskModels): MODELS = { "cn": [bert_model("clue/albert_chinese_tiny"), bert_model("clue/albert_chinese_small"), default_model("onePatient/albert_chinese_small"), bert_model("voidful/albert_chinese_tiny"), bert_model("voidful/albert_chinese_small"), bert_model("voidful/albert_chinese_base"), bert_model("voidful/albert_chinese_large"), bert_model("voidful/albert_chinese_xlarge") ]} class Albert_Task_Models(Albert_Task_Models_Base): from transformers.modeling_albert import AlbertForMaskedLM, AlbertModel, AlbertForQuestionAnswering, \ AlbertForTokenClassification, AlbertPreTrainedModel, AlbertForSequenceClassification MODEL_CLASSES = OrderedDict([ (ModelTaskType.base, AlbertModel), (ModelTaskType.pretrain, AlbertPreTrainedModel), (ModelTaskType.lm_head, AlbertForMaskedLM), (ModelTaskType.seq_cls, AlbertForSequenceClassification), (ModelTaskType.token_cls, AlbertForTokenClassification), (ModelTaskType.qa, AlbertForQuestionAnswering), ]) if is_tf_available(): class TFAlbert_Task_Models(Albert_Task_Models_Base): from transformers.modeling_tf_albert import TFAlbertForMaskedLM, TFAlbertModel, TFAlbertForQuestionAnswering, \ TFAlbertPreTrainedModel, TFAlbertForSequenceClassification, TFAlbertForMultipleChoice MODEL_CLASSES = OrderedDict([ (ModelTaskType.base, TFAlbertModel), (ModelTaskType.pretrain, TFAlbertPreTrainedModel), (ModelTaskType.lm_head, TFAlbertForMaskedLM), (ModelTaskType.seq_cls, TFAlbertForSequenceClassification), (ModelTaskType.multi_choice, TFAlbertForMultipleChoice), (ModelTaskType.qa, TFAlbertForQuestionAnswering), ])
# runs one dataset instead of looping through a list of them # this way all of the files can be run at once import CloneSeqAnalysisLibrary from CloneSeqAnalysisLibrary import analyze_clone_seq import os from os import path import sys colony = sys.argv[1] reference_file = "/local/storage/rhlin_to_copy/CloneSeqExample/Reference.fa" attempted_file = "/local/storage/rhlin_to_copy/CloneSeqExample/Mutation_Attempts.txt" sequencePath = "/local/storage/rhlin_to_copy/CloneSeqExample/" outputPath = "/local/storage/rhlin/ESP_7_output/" # outputPath = "/local/storage/rhlin/ESP_7_test/" sequencing_file = sequencePath + "ESP_7_" + colony + "_trimmed.fastq" fileName = sequencing_file[-20 - len(colony):-6] samFileName = fileName + ".sam" outputFile = fileName + "_Summary.txt" # print("reference_file: " + reference_file) # print("attempted_file: " + attempted_file) # print("sequencePath: " + sequencePath) # print("outputPath: " + outputPath) # print("sequencing_file: ", sequencing_file) # print("samFileName: ", samFileName) # print("outputFile: ", outputFile) # quit() analyze_clone_seq(sequencing_file, attempted_file, reference_file, samFileName, outputFile, outputPath)
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' summaryReportMap ---------------- Given a project id this service returns a map image showing the features for that project. ''' from arcpy import mapping, env, Extent, GetParameterAsText, SetParameterAsText from os.path import join, dirname from math import isnan path_to_mxd = join(dirname(__file__), 'SummaryReport.mxd') image_path = join(env.scratchFolder, 'map_export.png') def main(project_id, width, height): mxd = mapping.MapDocument(path_to_mxd) data_frame = mapping.ListDataFrames(mxd)[0] feature_layers = mapping.ListLayers(mxd)[0:3] xmin = None ymin = None xmax = None ymax = None for l in feature_layers: l.definitionQuery = 'Project_ID = {}'.format(project_id) extent = l.getExtent() if xmin is None or isnan(xmin) or extent.XMin < xmin: xmin = extent.XMin if ymin is None or isnan(ymin) or extent.YMin < ymin: ymin = extent.YMin if xmax is None or isnan(xmax) or extent.XMax > xmax: xmax = extent.XMax if ymax is None or isnan(ymax) or extent.YMax > ymax: ymax = extent.YMax #: validate that features were found if isnan(xmin): raise Exception('No features found for project id: {}!'.format(project_id)) data_frame.extent = Extent(xmin, ymin, xmax, ymax) mapping.ExportToPNG(mxd, image_path, data_frame=mxd.activeDataFrame, df_export_width=int(width), df_export_height=int(height)) print(image_path) return image_path if __name__ == '__main__': result = main(GetParameterAsText(0), GetParameterAsText(1), GetParameterAsText(2)) SetParameterAsText(3, result)
class Solution: def combinationSum4(self, nums, target): memo = {} def dfs(sm): if sm in memo: return memo[sm] else: if sm >= target: memo[sm] = sm == target return memo[sm] cnt = 0 for num in nums: memo[sm + num] = dfs(sm + num) cnt += memo[sm + num] return cnt return dfs(0)
""" Calculate the probability of encountering a word w, given that it is within the top 1000 most common words. P(r) is given by 0.1/r, where 1 < r < 1000. 1000 words is the limit as beyond that the harmonic series diverges for less frequent words. About 50% of all words in the Oxford English Corpus reside in the top 100 words. """ HARMONIC_LIMIT = 1000 def frequency_of(rank, harmonic_limit=HARMONIC_LIMIT): """ Rank a word according to zipf's law. >>> frequency_of(1) 0.1 >>> frequency_of(3) 0.1 / 3 """ return 0.1 / min(rank, 10000)
# -*- coding: utf-8 -*- import random from random import randrange from model.group import Group from model.address import Address import pytest from generator.address import testdata @pytest.mark.parametrize("address", testdata, ids=[repr(x) for x in testdata]) def test_del_address_to_group(app, orm, address, check_ui): addresses_in_group = None if not len(orm.get_address_list()): app.address.create(address) if not len(orm.get_group_list()): app.group.create(Group(name="test")) if len(orm.get_all_address_to_group()) == 0: all_addresses = orm.get_address_list() all_group = orm.get_group_list() select_address = random.choice(all_addresses) select_group = random.choice(all_group) app.address.select_all_groups() app.address.selected_by_id(select_address.id) app.address.insert_address_in_group_by_id(select_group.id) all_group = orm.get_group_list() count_group = len(all_group) while count_group: select_group = random.choice(all_group) if len(select_group.name): addresses_in_group = orm.get_address_in_group(select_group) if len(addresses_in_group): break else: count_group -= 1 else: count_group -= 1 assert addresses_in_group is not None and len(addresses_in_group) != 0, "No contacts in groups" app.address.to_select_group(select_group.id) select_address = random.choice(addresses_in_group) app.address.selected_by_id(select_address.id) app.address.delete_address_from_group_by_id(select_address.id) new_addresses_in_group = orm.get_address_in_group(select_group) assert len(addresses_in_group) - 1 == len(new_addresses_in_group) if check_ui: orm_addr_in_gr = sorted(orm.get_address_in_group(select_group), key=Address.id_or_max) app_addr_in_gr = sorted(app.address.get_address_in_group(select_group), key=Address.id_or_max) assert orm_addr_in_gr == app_addr_in_gr
""" Patches bokeh resources to make it easy to add external JS and CSS resources via the panel.config object. """ from __future__ import absolute_import, division, unicode_literals import glob import json import os from collections import OrderedDict from pathlib import Path from bokeh.resources import Resources from bokeh.settings import settings from jinja2 import Environment, Markup, FileSystemLoader with open(Path(__file__).parent.parent / 'package.json') as f: package_json = json.load(f) js_version = package_json['version'].split('+')[0] CDN_DIST = f"https://unpkg.com/@holoviz/panel@{js_version}/dist/" LOCAL_DIST = "/static/extensions/panel/" DIST_DIR = Path(__file__).parent.parent / 'dist' def get_env(): ''' Get the correct Jinja2 Environment, also for frozen scripts. ''' local_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '_templates')) return Environment(loader=FileSystemLoader(local_path)) def css_raw(self): from ..config import config raw = super(Resources, self).css_raw for cssf in config.css_files: if not os.path.isfile(cssf): continue with open(cssf, encoding='utf-8') as f: css_txt = f.read() if css_txt not in raw: raw.append(css_txt) resources = settings.resources(default='server') for cssf in glob.glob(str(DIST_DIR / 'css' / '*.css')): if resources != 'inline': break with open(cssf, encoding='utf-8') as f: css_txt = f.read() if css_txt not in raw: raw.append(css_txt) return raw + config.raw_css def js_files(self): from ..config import config files = super(Resources, self).js_files js_files = files + list(config.js_files.values()) # Load requirejs last to avoid interfering with other libraries require_index = [i for i, jsf in enumerate(js_files) if 'require' in jsf] resources = settings.resources(default='server') dist_dir = LOCAL_DIST if resources == 'server' else CDN_DIST if require_index: requirejs = js_files.pop(require_index[0]) if any('ace' in jsf for jsf in js_files): js_files.append(dist_dir+'pre_require.js') js_files.append(requirejs) if any('ace' in jsf for jsf in js_files): js_files.append(dist_dir+'post_require.js') return js_files def css_files(self): from ..config import config files = super(Resources, self).css_files for cssf in config.css_files: if os.path.isfile(cssf) or cssf in files: continue files.append(cssf) resources = settings.resources(default='server') dist_dir = LOCAL_DIST if resources == 'server' else CDN_DIST for cssf in glob.glob(str(DIST_DIR / 'css' / '*.css')): if resources == 'inline': break files.append(dist_dir + f'css/{os.path.basename(cssf)}') return files def conffilter(value): return json.dumps(OrderedDict(value)).replace('"', '\'') _env = get_env() _env.filters['json'] = lambda obj: Markup(json.dumps(obj)) _env.filters['conffilter'] = conffilter Resources.css_raw = property(css_raw) Resources.js_files = property(js_files) Resources.css_files = property(css_files)
# -*- coding: utf-8 -*- ''' 上海旅游景点客流量爬虫 这个要用到selenium和chromedriver,下载驱动用这个网址: https://sites.google.com/a/chromium.org/chromedriver/downloads 注意驱动版本和chrome版本对应 chrome版本查询: 打开chrome浏览器, chrome://version 爬取网站:https://shanghaicity.openservice.kankanews.com/public/tour/ 给了mysql和mongo两种数据库插入版本 ''' import requests from lxml import etree import random import time import pymongo from pymongo.errors import DuplicateKeyError import logging # 引入logging模块 import logging.handlers import json import re import os import threading import urllib.request from selenium import webdriver from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from scrapy.http.response.html import HtmlResponse from selenium.webdriver.chrome.options import Options import pymysql from pymysql import cursors ''' 样本: {'CODE': '2', 'NAME': '上海野生动物园', 'TIME': '2019-07-21 15:45:00', 'R_TIME': '2019\\/7\\/21 15:44:55', 'NUM': '7318', 'SSD': '舒适', 'DES': '', 'START_TIME': '08:00', 'END_TIME': '18:00', 'INFO': '上海野生动物园是集野生动物饲养、展览、繁育保护、科普教育与休闲娱乐为一体的主题公园。景区于1995年11月18日正式对外开放,地处上海浦东新区,占地153公顷(约2300亩),是首批国家5A级旅游景区。 园区居住着大熊猫、金丝猴、金毛羚牛、朱鹮、长颈鹿、斑马、羚羊、白犀牛、猎豹等来自国内外的珍稀野生动物200余种,上万余只。园区分为车入区和步行区两大参观区域。 步行区,让您在寓教于乐中进一步了解动物朋友。不仅可以观赏到大熊猫、非洲象、亚洲象、长颈鹿、黑猩猩、长臂猿、狐猴、火烈鸟、朱鹮等众多珍稀野生动物,更有诸多特色的动物行为展示和互动体验呈现。 车入区为动物散放养展示形式,保持着 “人在‘笼’中,动物自由”的展览模式,给动物更多的自由空间。使您身临其境的感受一群群斑马、羚羊、角马、犀牛等食草动物簇拥在一起悠闲觅食;又能领略猎豹、东北虎、非洲狮、熊、狼等大型猛兽“部落”展现野性雄姿。 另外,园内还设有5座功能各异的表演场馆。身怀绝技的俄罗斯专业团队携各路动物明星演艺“魔幻之旅”;猎豹、格力犬、蒙 联系电话:021-58036000', 'MAX_NUM': '60000', 'IMAGE': '图片111111_20160302080923201.png', 'TYPE': '正常', 'T_CODE': '5', 'INITIAL': 'SHYSDWY', 'RANK': '5A', 'COUNTY': '浦东新区', 'LOCATION_X': 121.723586, 'LOCATION_Y': 31.05928, 'SWITCH': 1, 'WEATHER_INFO': 1, 'WEATHER_DES': '多云', 'WEATHER_HIGH': '33', 'WEATHER_LOW': '26', 'WEATHER_DIRECTION': '东南风', 'WEATHER_POWER': '3-4级'} ''' # MongoDb 配置 LOCAL_MONGO_HOST = '127.0.0.1' LOCAL_MONGO_PORT = 27017 DB_NAME = 'Traffic' # mongo数据库的Host, collection设置 client = pymongo.MongoClient(LOCAL_MONGO_HOST, LOCAL_MONGO_PORT) collection = client[DB_NAME]["Attractions"] ''' # SQL 配置 dbparams = { 'host': 'localhost', 'port': 3306, 'user': 'root', 'password': '1234', # 'database': 'traffic', 'charset': 'utf8' } conn = pymysql.connect(**dbparams) cursor = conn.cursor() try: sql2 = "CREATE DATABASE IF NOT EXISTS traffic" # 执行创建数据库的sql cursor.execute(sql2) except: pass dbparams = { 'host': 'localhost', 'port': 3306, 'user': 'root', 'password': '1234', 'database': 'traffic', 'charset': 'utf8' } conn = pymysql.connect(**dbparams) cursor = conn.cursor() try: sql = """CREATE TABLE Attraction( id varchar(255), code varchar(255), name_ varchar(255), time_ varchar(255), real_time varchar(255), num varchar(255), max_num varchar(255), ssd varchar(255), start_time varchar(255), end_time varchar(255), rank varchar(255), county varchar(255), loc_x varchar(255), loc_y varchar(255), weather_info varchar(255), weather_des varchar(255), weather_high varchar(255), weather_low varchar(255), weather_dir varchar(255), weather_pow varchar(255))DEFAULT CHARSET=utf8""" cursor.execute(sql) except Exception as e: print(e) ''' # 随机请求头设置 USER_AGENTS = [ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/44.0.2403.155 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36', 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; Avant Browser; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)', 'Mozilla/5.0 (X11; Linux i686; rv:64.0) Gecko/20100101 Firefox/64.0', 'Mozilla/5.0 (X11; Linux i586; rv:63.0) Gecko/20100101 Firefox/63.0', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:62.0) Gecko/20100101 Firefox/62.0' ] url = 'https://shanghaicity.openservice.kankanews.com/public/tour/filterinfo2' def attrct(): headers = {} headers['User-Agent'] = random.choice(USER_AGENTS) chrome_options = Options() headers = random.choice(USER_AGENTS) chrome_options.add_argument('--user-agent={}'.format(headers)) # 设置请求头的User-Agent chrome_options.add_argument('--blink-settings=imagesEnabled=false') # 不加载图片, 提升速度 chrome_options.add_argument('--headless') # 浏览器不提供可视化页面 chrome_options.add_argument('--disable-gpu') driver = webdriver.Chrome(chrome_options=chrome_options) driver.get(url) tree_node = etree.HTML(driver.page_source) # tree_node = etree.HTML(driver.page_source.encode('utf8').decode('unicode_escape')) # print(driver.page_source.encode('utf8').decode('unicode_escape')) # print(driver.page_source) # tree_node = etree.HTML(driver.page_source) for i in eval(tree_node.xpath("//pre//text()")[0]): code = i["CODE"] name = str(i["NAME"]) # print(name) time_ = i["TIME"] real_time = i["R_TIME"].replace("\\", "").replace("/", "-") num = i["NUM"] max_num = i["MAX_NUM"] ssd = i["SSD"] start_time = i["START_TIME"] end_time = i["END_TIME"] type_ = i["TYPE"] rank = i["RANK"] county = i["COUNTY"] loc_x = str(i["LOCATION_X"]) loc_y = str(i["LOCATION_Y"]) weather_info = str(i["WEATHER_INFO"]) weather_des = i["WEATHER_DES"] weather_high = i["WEATHER_HIGH"] weather_low = i["WEATHER_LOW"] weather_dir = i["WEATHER_DIRECTION"] weather_pow = i["WEATHER_POWER"] id = code + "-" + time_.replace(" ", '-').replace(":", '-') dict_attrct = {"_id": id, "code": code, "name": name, "time": time_, "real_time": real_time, "num": num, "max_num": max_num, "ssd": ssd, "start_time": start_time, "end_time": end_time, "type": type_, "rank": rank, "county": county, "loc_x": loc_x, "loc_y": loc_y, "weather_info": weather_info, "weather_des": weather_des, "weather_high": weather_high, "weather_low": weather_low, "weather_dir": weather_dir, "weather_pow": weather_pow} logger.info(str(dict_attrct)) try: collection.insert_one(dict_attrct) except DuplicateKeyError as e: pass driver.close() ''' try: cursor.execute("""INSERT INTO Attraction (id,code,name_,time_,real_time,num,max_num,ssd,start_time,end_time,rank,county,loc_x,loc_y,weather_info,weather_des,weather_high,weather_low,weather_dir,weather_pow) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""",(id, code, name, time_, real_time, num, max_num, ssd, start_time, end_time, rank, county, loc_x, loc_y, weather_info, weather_des, weather_high, weather_low, weather_dir, weather_pow)) conn.commit() except Exception as e: print(e) ''' if __name__ == '__main__': logger = logging.getLogger() logger.setLevel(logging.INFO) # Log等级总开关 # 第二步,创建一个handler,用于写入日志文件 rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time())) log_path = os.path.join(os.getcwd(), 'Logs') if not os.path.exists(log_path): os.makedirs(log_path) log_file = os.path.join(log_path, rq + 'Attractions.log') fh = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=1024, backupCount=5) # fh = logging.FileHandler(logfile, mode='w') fh.setLevel(logging.DEBUG) # 输出到file的log等级的开关 # st = logging.StreamHandler() # st.setLevel(logging.DEBUG) # 输出到file的log等级的开关 # 第三步,定义handler的输出格式 formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s") fh.setFormatter(formatter) # st.setFormatter(formatter) # 第四步,将logger添加到handler里面 logger.addHandler(fh) # logger.addHandler(st) t = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) logger.info(t) attrct()
import smtplib from email.mime.text import MIMEText class Mail(object): """docstring for Mail""" def __init__(self, smtpserver, username, password): super(Mail, self).__init__() self.server = smtplib.SMTP(smtpserver) self.username = username self.password = password def send(self, me, you, subject, message): self.server.login(self.username, self.password) msg = MIMEText(message) msg['Subject'] = subject msg['From'] = me msg['To'] = you self.server.sendmail(me, [you], msg.as_string()) self.server.quit() # mail = Mail(smtp, u, p) # mail.send(Me, You, "hello run", "ok and test")
#!/usr/bin/env python """cron_parser.py: command line utility to check cron expressions and parse all possible outputs for security purposes""" """ method to convert cron piece to 1. check if has wild cards 2. strip them if it does 3. operate via wild cards and determine all possible values for term 4. save all value to list need to output cron_pieces on separate lines. cron_pieces are: minute, hour, day of month, month, day of week """ __author__ = "Ed Heaver" __credits__ = ["Ed Heaver"] __license__ = "GPL" __version__ = "0.8" __maintainer__ = "Ed Heaver" __email__ = "[email protected]" __status__ = "Development" #import re and argv from sys import re from sys import argv #accept user input from cmdline with script (hard coded cron_ex for testing purposes) #cron_ex = argv[0-6] #cron_ex = "*/15 0 1,15 * 1-5 /usr/bin/find" #minutes, hours, day of month, month, day of week lists minutes = [] hours = [] day_of_month = [] month = [] day_of_week = [] #split cron into pieces #cron_pieces = cron_ex.split(" ") #funtion to strip non numeric chars and operate on numbers with wild card chars removed. currently have *,/,- and , implemented and can implement more def cron_output (piece, time_period, values): if bool(re.match(r"^\*\/\d+$", piece)) == True: number = int(re.sub(r"\W+", '', piece)) for i in range(0,time_period): if i%number == 0: values.append(i) return values elif bool(re.match(r"^\*$", piece)) == True: for i in range(1,time_period): values.append(i) return values elif bool(re.match(r"^\d+\-\d+$", piece)) == True: terms = piece.split('-') term1 = int(terms[0]) term2 = int(terms[1])+1 for i in range (term1, term2): values.append(i) return values elif bool(re.match(r"^\d+$", piece)) == True: number = int(re.sub(r"\W+", '', piece)) values.append(number) return values elif bool(re.match(r"^\d+\,\d+$", piece)) == True: terms = piece.split(',') term1 = int(terms[0]) term2 = int(terms[1]) values.append(term1) values.append(term2) return values else: return values return values #output cron pieces into grid of all possible outputs and error handling if len(argv) != 6: minutes = cron_output(argv[1],60, minutes) hours = cron_output(argv[2], 24, hours) day_of_month = cron_output(argv[3], 31, day_of_month) month = cron_output(argv[4], 13, month) day_of_week = cron_output(argv[5], 7, day_of_week) print ("This cron will run on the following... \n") print("Minute(s): " + str(minutes)) print("Hour(s): " + str(hours)) print("Day(s) of the Month: " + str(day_of_month)) print("Month(s): " + str(month)) print("Day(s) of the Week: " + str(day_of_week)) print(argv[6]) else: print("This is not a valid cron. There needs to be 6 arguments.")
# GENERATED BY KOMAND SDK - DO NOT EDIT import insightconnect_plugin_runtime import json class Component: DESCRIPTION = "Create a new patch scan template" class Input: DESCRIPTION = "description" NAME = "name" PATCHGROUPIDS = "patchGroupIds" PATH = "path" THREADCOUNT = "threadCount" class Output: PATCH_SCAN_TEMPLATE = "patch_scan_template" class CreatePatchScanTemplateInput(insightconnect_plugin_runtime.Input): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "description": { "type": "string", "title": "Description", "description": "Description that explains the purpose of this patch scan template", "order": 2 }, "name": { "type": "string", "title": "Name", "description": "Name of the patch scan template", "order": 1 }, "patchGroupIds": { "type": "array", "title": "Patch Group IDs", "description": "The IDs of the patch groups to use", "items": { "type": "integer" }, "order": 5 }, "path": { "type": "string", "title": "Path", "description": "Path to the location of the machine group within the Patch Scan Templates list in the navigation pane", "order": 3 }, "threadCount": { "type": "integer", "title": "Thread Count", "description": "Specifies maximum number of machines that can be simultaneously scanned during one patch scan", "order": 4 } }, "required": [ "name", "patchGroupIds" ] } """) def __init__(self): super(self.__class__, self).__init__(self.schema) class CreatePatchScanTemplateOutput(insightconnect_plugin_runtime.Output): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "patch_scan_template": { "$ref": "#/definitions/patch_scan_template", "title": "Patch Scan Template", "description": "Detailed information about the patch scan template", "order": 1 } }, "required": [ "patch_scan_template" ], "definitions": { "patch_filter": { "type": "object", "title": "patch_filter", "properties": { "patchFilePath": { "type": "string", "title": "Patch File Path", "description": "The patch file path", "order": 1 }, "patchGroupFilterType": { "type": "string", "title": "Patch Group Filter Type", "description": "The patch's filter describes how this filter will be applied. The values can be Scan, Skip, or None", "order": 2 }, "patchGroupIds": { "type": "array", "title": "Patch Group IDs", "description": "The IDs of the patch groups to use", "items": { "type": "integer" }, "order": 3 }, "patchPropertyFilter": { "$ref": "#/definitions/patch_property_filter", "title": "Patch Property Filter", "description": "Patch property filter (security, non-security, critical, etc.)", "order": 4 }, "scanFor": { "type": "string", "title": "Scan For", "description": "Gets or sets the type of patches to scan for", "order": 5 }, "softwareDistribution": { "type": "boolean", "title": "Software Distribution", "description": "Is software distribution included in the scan", "order": 6 }, "vendorFamilyProductFilter": { "type": "object", "title": "Vendor Family Product Filter", "description": "Vendor and family product hierarchy", "order": 7 } }, "definitions": { "patch_property_filter": { "type": "object", "title": "patch_property_filter", "properties": { "customActions": { "type": "boolean", "title": "Custom Actions", "description": "Custom actions", "order": 1 }, "nonSecurityPatchSeverities": { "type": "string", "title": "Non Security Patch Severities", "description": "The non-security patch severities", "order": 2 }, "securityPatchSeverities": { "type": "string", "title": "Security Patch Severities", "description": "The security patch severities", "order": 3 }, "securityTools": { "type": "boolean", "title": "Security Tools", "description": "Security tools", "order": 4 } } } } }, "patch_property_filter": { "type": "object", "title": "patch_property_filter", "properties": { "customActions": { "type": "boolean", "title": "Custom Actions", "description": "Custom actions", "order": 1 }, "nonSecurityPatchSeverities": { "type": "string", "title": "Non Security Patch Severities", "description": "The non-security patch severities", "order": 2 }, "securityPatchSeverities": { "type": "string", "title": "Security Patch Severities", "description": "The security patch severities", "order": 3 }, "securityTools": { "type": "boolean", "title": "Security Tools", "description": "Security tools", "order": 4 } } }, "patch_scan_template": { "type": "object", "title": "patch_scan_template", "properties": { "creator": { "type": "string", "title": "Creator", "description": "The name of the person who created the template", "order": 1 }, "description": { "type": "string", "title": "Description", "description": "Provides a description that explains the purpose of this patch scan template", "order": 2 }, "id": { "type": "string", "title": "ID", "description": "Specifies the ID of the patch scan template", "order": 3 }, "isSystem": { "type": "boolean", "title": "Is System", "description": "Indicates if this is a system template", "order": 4 }, "links": { "type": "object", "title": "Links", "description": "Shows the related URLs for each patch scan template and for the usedby list", "order": 5 }, "name": { "type": "string", "title": "Name", "description": "Specifies the patch scan template name", "order": 6 }, "patchFilter": { "$ref": "#/definitions/patch_filter", "title": "Patch Filter", "description": "Specifies the mode", "order": 7 }, "path": { "type": "string", "title": "Path", "description": "The path that describes the location of the machine group within the Patch Scan Templates list in the navigation pane", "order": 8 }, "threadCount": { "type": "integer", "title": "Thread Count", "description": "Specifies maximum number of machines that can be simultaneously scanned during one patch scan", "order": 9 } }, "definitions": { "patch_filter": { "type": "object", "title": "patch_filter", "properties": { "patchFilePath": { "type": "string", "title": "Patch File Path", "description": "The patch file path", "order": 1 }, "patchGroupFilterType": { "type": "string", "title": "Patch Group Filter Type", "description": "The patch's filter describes how this filter will be applied. The values can be Scan, Skip, or None", "order": 2 }, "patchGroupIds": { "type": "array", "title": "Patch Group IDs", "description": "The IDs of the patch groups to use", "items": { "type": "integer" }, "order": 3 }, "patchPropertyFilter": { "$ref": "#/definitions/patch_property_filter", "title": "Patch Property Filter", "description": "Patch property filter (security, non-security, critical, etc.)", "order": 4 }, "scanFor": { "type": "string", "title": "Scan For", "description": "Gets or sets the type of patches to scan for", "order": 5 }, "softwareDistribution": { "type": "boolean", "title": "Software Distribution", "description": "Is software distribution included in the scan", "order": 6 }, "vendorFamilyProductFilter": { "type": "object", "title": "Vendor Family Product Filter", "description": "Vendor and family product hierarchy", "order": 7 } }, "definitions": { "patch_property_filter": { "type": "object", "title": "patch_property_filter", "properties": { "customActions": { "type": "boolean", "title": "Custom Actions", "description": "Custom actions", "order": 1 }, "nonSecurityPatchSeverities": { "type": "string", "title": "Non Security Patch Severities", "description": "The non-security patch severities", "order": 2 }, "securityPatchSeverities": { "type": "string", "title": "Security Patch Severities", "description": "The security patch severities", "order": 3 }, "securityTools": { "type": "boolean", "title": "Security Tools", "description": "Security tools", "order": 4 } } } } }, "patch_property_filter": { "type": "object", "title": "patch_property_filter", "properties": { "customActions": { "type": "boolean", "title": "Custom Actions", "description": "Custom actions", "order": 1 }, "nonSecurityPatchSeverities": { "type": "string", "title": "Non Security Patch Severities", "description": "The non-security patch severities", "order": 2 }, "securityPatchSeverities": { "type": "string", "title": "Security Patch Severities", "description": "The security patch severities", "order": 3 }, "securityTools": { "type": "boolean", "title": "Security Tools", "description": "Security tools", "order": 4 } } } } } } } """) def __init__(self): super(self.__class__, self).__init__(self.schema)
from PyQt5.QtCore import Qt, pyqtSignal, QPropertyAnimation, QPoint, QAbstractAnimation, QParallelAnimationGroup from PyQt5.QtWidgets import QWidget, QPushButton, QGridLayout, QHBoxLayout class PyQtSwitch(QWidget): toggled = pyqtSignal(bool) def __init__(self): super().__init__() self.__initVal() self.__initUi() def __initVal(self): self.__circle_diameter = 20 self.__animationEnabledFlag = False self.__pointAnimation = '' self.__colorAnimation = '' def __initUi(self): self.__circle = QPushButton() self.__circle.setCheckable(True) self.__circle.toggled.connect(self.__toggled) self.__layForBtnAlign = QHBoxLayout() self.__layForBtnAlign.setAlignment(Qt.AlignLeft) self.__layForBtnAlign.addWidget(self.__circle) self.__layForBtnAlign.setContentsMargins(0, 0, 0, 0) innerWidgetForStyle = QWidget() innerWidgetForStyle.setLayout(self.__layForBtnAlign) lay = QGridLayout() lay.addWidget(innerWidgetForStyle) lay.setContentsMargins(0, 0, 0, 0) self.setLayout(lay) self.__setStyle() def __setStyle(self): self.__circle.setFixedSize(self.__circle_diameter, self.__circle_diameter) self.setStyleSheet( f'QWidget {{ border: {self.__circle_diameter // 20}px solid #AAAAAA; ' f'border-radius: {self.__circle_diameter // 2}px; }}') self.setFixedSize(self.__circle_diameter * 2, self.__circle_diameter) def setAnimation(self, f: bool): self.__animationEnabledFlag = f if self.__animationEnabledFlag: self.__colorAnimation = QPropertyAnimation(self, b'point') self.__colorAnimation.valueChanged.connect(self.__circle.move) self.__colorAnimation.setDuration(100) self.__colorAnimation.setStartValue(QPoint(0, 0)) self.__colorAnimation.setEndValue(QPoint(self.__circle_diameter, 0)) self.__pointAnimation = QPropertyAnimation(self, b'color') self.__pointAnimation.valueChanged.connect(self.__setColor) self.__pointAnimation.setDuration(100) self.__pointAnimation.setStartValue(255) self.__pointAnimation.setEndValue(200) self.__animationGroup = QParallelAnimationGroup() self.__animationGroup.addAnimation(self.__colorAnimation) self.__animationGroup.addAnimation(self.__pointAnimation) def mousePressEvent(self, e): self.__circle.toggle() return super().mousePressEvent(e) def __toggled(self, f): if self.__animationEnabledFlag: if f: self.__animationGroup.setDirection(QAbstractAnimation.Forward) self.__animationGroup.start() else: self.__animationGroup.setDirection(QAbstractAnimation.Backward) self.__animationGroup.start() else: if f: self.__circle.move(self.__circle_diameter, 0) self.__layForBtnAlign.setAlignment(Qt.AlignRight) self.__setColor(200) else: self.__circle.move(0, 0) self.__layForBtnAlign.setAlignment(Qt.AlignLeft) self.__setColor(255) self.toggled.emit(f) def __setColor(self, f: int): self.__circle.setStyleSheet(f'QPushButton {{ background-color: rgb({f}, {f}, 255); }}') def setCircleDiameter(self, diameter: int): self.__circle_diameter = diameter self.__setStyle() self.__colorAnimation.setEndValue(QPoint(self.__circle_diameter, 0))
from tkinter import Tk, Frame, Entry, Button, Checkbutton, StringVar from tkinter import END from .cashe_manager import ListCacheManager class MultiSelectPopup: def __init__(self, title, cache_name, starting_selected=None, cache_amount=100): self.tags = [] self.entries = [] self.root = Tk() self.root.title(title) self.root.minsize(width=400, height=1) self.options_frame = Frame(self.root) self.button_frame = Frame(self.root) self.cm = ListCacheManager(cache_name, cache_amount) options = self.cm.retrieve() if starting_selected is None: starting_selected = [] for val in starting_selected: if val not in options: options.append(val) self.vars = [] for val in options: self.__add_option(val, val in starting_selected) self.inp_field = Entry(self.button_frame) self.inp_field.config(width=100) self.inp_field.pack() self.confirm_button = Button(self.button_frame, text="Confirm", command=self.submit) self.confirm_button.pack() self.cancel_button = Button(self.button_frame, text="Cancel", command=lambda: self.root.destroy()) self.cancel_button.pack() self.options_frame.pack() self.button_frame.pack() self.inp_field.focus_set() self.inp = None self.inp_field.bind('<Return>', lambda event: self.__add_input()) self.root.bind('<Escape>', lambda event: self.root.destroy()) self.root.after(1, lambda: self.root.focus_force()) def __add_input(self): text = self.inp_field.get().strip() self.inp_field.delete(0, END) self.inp_field.insert(END, "") if text == "": return if text in self.cm.retrieve(): return self.__add_option(text) self.vars[-1].set(text) self.cm.insert(text) print("{0} inserted into cache".format(text)) def __add_option(self, text, selected=False): if selected: var = StringVar(value=text) else: var = StringVar(value="") button = Checkbutton(self.options_frame, text=text, var=var, onvalue=text, offvalue="") button.pack() self.vars.append(var) def submit(self): self.inp = [] for var in self.vars: value = var.get() if value: self.inp.append(value) self.root.destroy() def run(self) -> str: self.root.mainloop() return self.inp
# Databricks notebook source # MAGIC %sql # MAGIC create database hive_database # COMMAND ---------- # MAGIC %sql # MAGIC CREATE TABLE int_i18nRegions (id string, iso_country_code string, region_name string, extractDate date, load_date_time timestamp, # MAGIC countryCode string, load_date date) USING PARQUET LOCATION 'dbfs:/mnt/natmsdnadlsdatabricks/integration/i18nRegions/incoming/'; # COMMAND ---------- # MAGIC %sql # MAGIC CREATE TABLE int_videoCategories (id string, assignable boolean, title string, extractDate date, load_date_time timestamp, # MAGIC countryCode string, load_date date) USING PARQUET LOCATION 'dbfs:/mnt/natmsdnadlsdatabricks/integration/videoCategories/incoming/'; # COMMAND ---------- # MAGIC %sql # MAGIC drop table int_video; # COMMAND ---------- # MAGIC %sql # MAGIC CREATE TABLE int_video (id string, # MAGIC title string, # MAGIC channelId string, # MAGIC channelTitle string, # MAGIC categoryId string, # MAGIC tags array<string>, # MAGIC commentCount int, # MAGIC dislikeCount int, # MAGIC favoriteCount int, # MAGIC likeCount int, # MAGIC viewCount int, # MAGIC ratingDisabled boolean, # MAGIC commentDisabled boolean, # MAGIC publishedAt timestamp, # MAGIC extractDate date, # MAGIC load_date_time timestamp, # MAGIC countryCode string, # MAGIC load_date date) # MAGIC USING PARQUET LOCATION 'dbfs:/mnt/natmsdnadlsdatabricks/integration/video/incoming/'; # COMMAND ---------- spark.catalog.refreshTable("int_video") # COMMAND ---------- # MAGIC %sql # MAGIC select distinct load_date_time from int_video order by load_date_time desc;
""" Copyright (c) 2018-2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from .format_converter import FileBasedAnnotationConverter, ConverterReturn from ..representation import NoiseSuppressionAnnotation from ..utils import read_txt class NoiseSuppressionDatasetConverter(FileBasedAnnotationConverter): __provider__ = 'noise_suppression_dataset' def convert(self, check_content=False, **kwargs): pairs_list = read_txt(self.annotation_file) annotation = [] for line in pairs_list: clean_signal, noisy_signal = line.split(' ') annotation.append(NoiseSuppressionAnnotation(noisy_signal, clean_signal)) return ConverterReturn(annotation, None, None)
# https://github.com/Cold-Winter/Nattack import time import numpy as np import torch import torch.nn as nn from tqdm import tqdm import cv2 class Normalize(torch.nn.Module): def __init__(self): super(Normalize, self).__init__() self.mean = torch.Tensor([0.485, 0.456, 0.406]) self.std = torch.Tensor([0.229, 0.224, 0.225]) def forward(self, x): return (x - self.mean.type_as(x)[None,:,None,None]) / self.std.type_as(x)[None,:,None,None] def torch_arctanh(x, eps=1e-6): x *= (1. - eps) return (np.log((1 + x) / (1 - x))) * 0.5 def softmax(x): return np.divide(np.exp(x), np.sum(np.exp(x), -1, keepdims=True)) def nattack(model, test_loader, adv_model): normalize = Normalize() npop = 200 # population size sigma = 0.1 # noise standard deviation alpha = 0.02 # learning rate # alpha = 0.001 # learning rate boxmin = 0 boxmax = 1 boxplus = (boxmin + boxmax) / 2. boxmul = (boxmax - boxmin) / 2. epsi = 0.031 epsilon = 1e-30 totalImages = 0 succImages = 0 faillist = [] successlist = [] printlist = [] start_time = time.time() iterator = tqdm(test_loader) for i, (inputs, targets) in enumerate(iterator): success = False input_var = inputs.clone().cuda() inputs = inputs.squeeze(0).numpy().transpose(1, 2, 0) modify = np.random.randn(1, 3, 32, 32) * 0.001 with torch.no_grad(): probs = nn.Softmax(dim=1)(model(normalize(input_var))) _, indices = torch.max(probs, 1) if targets[0] != indices.data.cpu()[0]: continue totalImages += 1 for runstep in range(200): Nsample = np.random.randn(npop, 3, 32, 32) modify_try = modify.repeat(npop, 0) + sigma * Nsample temp = [] for x in modify_try: temp.append(cv2.resize(x.transpose(1,2,0), dsize=(288,288), interpolation=cv2.INTER_LINEAR).transpose(2,0,1)) modify_try = np.array(temp) newimg = torch_arctanh((inputs-boxplus) / boxmul).transpose(2, 0, 1) #print('newimg', newimg,flush=True) inputimg = np.tanh(newimg + modify_try) * boxmul + boxplus if runstep % 10 == 0: temp = [] for x in modify: temp.append(cv2.resize(x.transpose(1,2,0), dsize=(288,288), interpolation=cv2.INTER_LINEAR).transpose(2,0,1)) modify_test = np.array(temp) realinputimg = np.tanh(newimg+modify_test) * boxmul + boxplus realdist = realinputimg - (np.tanh(newimg) * boxmul + boxplus) realclipdist = np.clip(realdist, -epsi, epsi) # print('realclipdist :', realclipdist, flush=True) realclipinput = realclipdist + (np.tanh(newimg) * boxmul + boxplus) l2real = np.sum((realclipinput - (np.tanh(newimg) * boxmul + boxplus))**2)**0.5 #l2real = np.abs(realclipinput - inputs.numpy()) # print(inputs.shape) #outputsreal = model(realclipinput.transpose(0,2,3,1)).data.cpu().numpy() input_var = torch.from_numpy(realclipinput.astype('float32')).cuda() with torch.no_grad(): outputsreal = model(normalize(input_var)).data.cpu().numpy()[0] outputsreal = softmax(outputsreal) #print(outputsreal) # print('probs ', np.sort(outputsreal)[-1:-6:-1]) # print('target label ', np.argsort(outputsreal)[-1:-6:-1]) # print('negative_probs ', np.sort(outputsreal)[0:3:1]) if (np.argmax(outputsreal) != targets) and (np.abs(realclipdist).max() <= epsi): succImages += 1 success = True # print('clipimage succImages: '+str(succImages)+' totalImages: '+str(totalImages)) # print('lirealsucc: '+str(realclipdist.max())) successlist.append(i) printlist.append(runstep) # imsave(folder+classes[targets[0]]+'_'+str("%06d" % batch_idx)+'.jpg',inputs.transpose(1,2,0)) break dist = inputimg - (np.tanh(newimg) * boxmul + boxplus) clipdist = np.clip(dist, -epsi, epsi) clipinput = (clipdist + (np.tanh(newimg) * boxmul + boxplus)).reshape(npop,3,288,288) #.reshape(npop,3,32,32) target_onehot = np.zeros((1,1000)) target_onehot[0][targets]=1. clipinput = np.squeeze(clipinput) clipinput = np.asarray(clipinput, dtype='float32') input_var = torch.from_numpy(clipinput).cuda() #outputs = model(clipinput.transpose(0,2,3,1)).data.cpu().numpy() with torch.no_grad(): outputs = adv_model(normalize(input_var)).data.cpu().numpy() outputs = softmax(outputs) target_onehot = target_onehot.repeat(npop,0) real = np.log((target_onehot * outputs).sum(1)+epsilon) other = np.log(((1. - target_onehot) * outputs - target_onehot * 10000.).max(1)[0]+epsilon) loss1 = np.clip(real - other, 0.,1000) Reward = 0.5 * loss1 # Reward = l2dist Reward = -Reward A = (Reward - np.mean(Reward)) / (np.std(Reward)+1e-7) modify = modify + (alpha/(npop*sigma)) * ((np.dot(Nsample.reshape(npop,-1).T, A)).reshape(3,32,32)) #.reshape(3,32,32)) iterator.set_description('Nattack Progress:{}/{}'.format(succImages, totalImages)) end_time = time.time() if not success: faillist.append(i) # print('failed:', faillist) # else: # print('successed:', successlist) if totalImages == 1000: break # print(faillist) success_rate = succImages/float(totalImages) # print('run steps: ',printlist) # np.savez('runstep', printlist) # print('succ rate', success_rate) # print('time taken (min): {:.4f}'.format((end_time - start_time) / 60)) return success_rate # print('attack success rate: %.2f%% (over %d data points)' % (success_rate*100, args.end-args.start))
# Generated by Django 3.2.12 on 2022-03-19 12:55 from django.db import migrations, models class Migration(migrations.Migration): """ Migration file to make the title field of push notification translations required """ dependencies = [ ("cms", "0012_mediafile_file_size"), ] operations = [ migrations.AlterField( model_name="pushnotificationtranslation", name="title", field=models.CharField(max_length=250, verbose_name="title"), ), ]
from collections import defaultdict, namedtuple from core.timeline import * from core.log import * import random AFFLICT_LIST = ['poison', 'paralysis', 'burn', 'blind', 'bog', 'stun', 'freeze', 'sleep', 'frostbite'] class Dot(object): """ Damage over time; e.g. poison """ def __init__(self, name, coef, duration, iv, dtype=None): self.name = name self.dtype = dtype self.active = 0 self.coef = coef self.iv = iv # Seconds between each damage tick self.duration = duration self.true_dmg_event = Event('true_dmg') self.true_dmg_event.dname = name self.true_dmg_event.dtype = dtype if dtype else name self.true_dmg_event.comment = '' self.tick_dmg = 0 self.quickshot_event = Event('dmg_formula') self.tick_timer = Timer(self.tick_proc) self.dotend_timer = Timer(self.dot_end_proc) def dot_end_proc(self, t): log('dot', self.name, 'end\t') self.active = 0 self.tick_timer.off() self.cb_end() def cb_end(self): pass def tick_proc(self, t): if self.active == 0: return t.timing += self.iv self.true_dmg_event.count = self.tick_dmg self.true_dmg_event.on() def __call__(self): return self.on() def get(self): return self.active def on(self): if self.active: log('dot', self.name, 'failed\t') return 0 self.active = 1 self.tick_timer.on(self.iv) self.dotend_timer.on(self.duration) self.quickshot_event.dmg_coef = self.coef self.quickshot_event.dname = self.name self.quickshot_event.dtype = self.dtype if self.dtype else self.name self.quickshot_event() self.tick_dmg = self.quickshot_event.dmg log('dot', self.name, 'start\t', '%f/%d' % (self.iv, self.duration)) return 1 def off(self): self.tick_timer.off() self.dotend_timer.off() log('dot', self.name, 'end by other reason') class AfflicUncapped(object): def __init__(self, name=None): self.name = name self.resist = 0 self.rate = 1 self.tolerance = 0.2 self.duration = 12 self.states = None self.stacks = [] self._get = 0.0 self.c_uptime = (0, 0) self.last_afflict = 0 Timer(self.uptime, repeat=1).on(1) def get_tolerance(self): if self.tolerance > 1: return float(self.tolerance) / 100.0 else: return self.tolerance def get_rate(self): if self.rate > 2: return float(self.rate) / 100.0 else: return self.rate def get_resist(self): if self.resist > 1: return float(self.resist) / 100.0 else: return self.resist def get(self): return self._get def update(self): nostack_p = 1.0 for stack_p in self.stacks: nostack_p *= 1.0 - stack_p self._get = 1.0 - nostack_p def stack_end_fun(self, p): def end_callback(t): self.stacks.remove(p) self.update() return end_callback def __call__(self, *args, **argv): return self.on(*args, **argv) def on(self): self.resist = self.get_resist() self.rate = self.get_rate() self.tolerance = self.get_tolerance() if self.states is None: self.states = defaultdict(lambda: 0.0) self.states[self.resist] = 1.0 states = defaultdict(lambda: 0.0) total_success_p = 0.0 for res, state_p in self.states.items(): if res >= self.rate or res >= 1: states[res] += state_p else: rate_after_res = min(1.0, self.rate - res) success_p = state_p * rate_after_res fail_p = state_p * (1.0 - rate_after_res) total_success_p += success_p states[res + self.tolerance] += success_p states[res] += fail_p self.states = states self.stacks.append(total_success_p) Timer(self.stack_end_fun(total_success_p), self.duration).on() self.update() return total_success_p def uptime(self, t): next_r = self.get() next_t = now() if next_r == 0: self.last_afflict = next_t prev_r, prev_t = self.c_uptime rate = prev_r + next_r*(next_t-prev_t) self.c_uptime = (rate, next_t) if next_t > 0 and rate > 0 and next_t % 60 == 0: log('{}_uptime'.format(self.name), '{:.2f}/{:.2f}'.format(rate, next_t), '{:.2%}'.format(rate/next_t)) class AfflicCapped(object): State = namedtuple("State", "timers resist") def __init__(self, name=None, duration=12): self.name = name self.resist = 0 self.rate = 1 self.tolerance = 0.2 self.default_duration = duration self.duration = duration self.stack_cap = 1 self.states = None self._get = 0.0 self.c_uptime = (0, 0) self.last_afflict = 0 Timer(self.uptime, repeat=1).on(1) def get_tolerance(self): if self.tolerance > 1: return float(self.tolerance) / 100.0 else: return self.tolerance def get_rate(self): if self.rate > 2: return float(self.rate) / 100.0 else: return self.rate def get_resist(self): if self.resist > 1: return float(self.resist) / 100.0 else: return self.resist def get(self): return self._get def update(self): total_p = 0.0 states = defaultdict(lambda: 0.0) for state, state_p in self.states.items(): reduced_state = self.State(frozenset([t for t in state.timers if t.timing > now()]), state.resist) states[reduced_state] += state_p if reduced_state.timers: total_p += state_p self.states = states self._get = total_p return total_p def stack_end(self, t): self.update() def __call__(self, *args, **argv): return self.on(*args, **argv) def on(self): self.resist = self.get_resist() self.rate = self.get_rate() self.tolerance = self.get_tolerance() timer = Timer(self.stack_end, self.duration).on() if self.states is None: self.states = defaultdict(lambda: 0.0) self.states[self.State(frozenset(), self.resist)] = 1.0 states = defaultdict(lambda: 0.0) total_p = 0.0 for start_state, start_state_p in self.states.items(): res = start_state.resist if res >= self.rate or res >= 1 or len(start_state.timers) >= self.stack_cap: states[start_state] += start_state_p else: rate_after_res = min(1, self.rate - res) succeed_timers = frozenset(list(start_state.timers) + [timer]) state_on_succeed = self.State(succeed_timers, min(1.0, res + self.tolerance)) overall_succeed_p = start_state_p * rate_after_res overall_fail_p = start_state_p * (1.0 - rate_after_res) total_p += overall_succeed_p states[state_on_succeed] += overall_succeed_p if overall_fail_p > 0: states[start_state] += overall_fail_p self.states = states self.update() return total_p def uptime(self, t): next_r = self.get() next_t = now() if next_r == 0: self.last_afflict = next_t prev_r, prev_t = self.c_uptime rate = prev_r + next_r*(next_t-prev_t) self.c_uptime = (rate, next_t) if next_t > 0 and rate > 0 and next_t % 60 == 0: log('{}_uptime'.format(self.name), '{:.2f}/{:.2f}'.format(rate, next_t), '{:.2%}'.format(rate/next_t)) class Afflic_dot(AfflicUncapped): def __init__(self, name=None, duration=12, iv=3.99): super().__init__(name) self.coef = 0.97 self.default_duration = duration self.duration = duration self.default_iv = iv self.iv = iv def on(self, name, rate, coef, duration=None, iv=None, dtype=None): self.rate = rate self.coef = coef self.dtype = dtype self.duration = duration or self.default_duration self.iv = iv or self.default_iv dot = Dot('o_%s_%s' % (name, self.name), coef, self.duration, self.iv, self.dtype) dot.on() r = super().on() dot.tick_dmg *= r return r class Afflic_cc(AfflicCapped): def __init__(self, name=None, duration=6.5): super().__init__(name, duration) self.stack_cap = 1 def on(self, name, rate, duration=None): self.rate = rate self.duration = duration or self.default_duration return super().on() def cb_end(self): pass class Afflic_scc(AfflicCapped): def __init__(self, name=None, duration=8): super().__init__(name, duration) self.stack_cap = 1 def on(self, name, rate, duration=None): self.rate = rate self.duration = duration or self.default_duration return super().on() def cb_end(self): pass class Afflic_bog(Afflic_scc): def on(self, name, rate, duration=None): p = super().on(name, rate, duration) if p: from core.advbase import Debuff Debuff('{}_bog'.format(name),-0.5*p,self.duration,1,'att','bog').on() return p class Afflics(object): def __init__(self): self.rinit() self.poison = Afflic_dot('poison', duration=15, iv=2.99) self.burn = Afflic_dot('burn', duration=12, iv=3.99) self.paralysis = Afflic_dot('paralysis', duration=13, iv=3.99) self.frostbite = Afflic_dot('frostbite', duration=21, iv=2.99) self.blind = Afflic_scc('blind', duration=8) self.bog = Afflic_bog('bog', duration=8) self.freeze = Afflic_cc('freeze', duration=4.5) self.stun = Afflic_cc('stun', duration=6.5) self.sleep = Afflic_cc('sleep', duration=6.5) self.poison.resist = 0 self.burn.resist = 0 self.paralysis.resist = 0 self.blind.resist = 80 self.bog.resist = 100 self.freeze.resist = 80 self.stun.resist = 80 self.sleep.resist = 80 self.frostbite.resist = 0 self.poison.tolerance = 5 self.burn.tolerance = 5 self.paralysis.tolerance = 5 self.blind.tolerance = 10 self.bog.tolerance = 20 self.freeze.tolerance = 20 self.stun.tolerance = 20 self.sleep.tolerance = 20 self.frostbite.tolerance = 5 def add(self, name, atype, rate, duration, coef=0, iv=0): if atype == 'burning': atype = 'burn' if atype == 'para': atype = 'paralysis' if atype in ['poison', 'burn', 'paralysis']: return self.add_dot(name, atype, rate, coef, duration, iv) elif atype in ['blind', 'freeze', 'stun', 'sleep', 'bog']: return self.add_cc(name, atype, rate, coef, duration, iv) def get(self, atype): if atype in ['poison', 'burn', 'paralysis']: stack = 0 for i in self.dot: if i[0] == atype and i[1].get(): stack += 1 return stack elif atype in ['blind', 'freeze', 'stun', 'sleep', 'bog']: if atype in self.cc: return self.cc[atype].get() def r(self): return random.random() / self.luck def refresh_dot(self): tmp = [] for i in self.dot: if i[1].get(): tmp.append(i) self.dot = tmp def refresh_cc(self): tmp = {} for i in self.cc: if self.cc[i].get(): tmp.append(i) self.cc = tmp def add_dot(self, name, atype, rate, coef, duration, iv): if not iv: errrrrr() if self.resist[atype] < 100: r = self.r() log('afflic', rate, self.resist[atype], r * 100) if rate < self.resist[atype]: return 0 if r * 100 < (rate - self.resist[atype]): log('afflic', 'succ', name, atype) self.refresh_dot() dot = Dot('o_' + name + '_' + atype, coef, duration, iv) dot.on() self.dot.append((atype, dot)) self.resist[atype] += 20 # 5 return 1 else: log('afflic', 'perfect_resist') return 0 def add_cc(self, name, atype, rate, coef, duration, iv): if self.resist[atype] < 100: r = self.r() log('afflic', rate, self.resist[atype], r * 100) if atype in self.cc: self.cc[atype].on() return 0 elif rate < self.resist[atype]: return 0 elif r * 100 < (rate - self.resist[atype]): log('afflic', 'succ', name, atype) self.refresh_cc() cc = Dot('o_' + name + '_' + atype, 0, duration, duration + 0.01) cc.on() self.cc[atype] = cc if atype == 'blind': self.resist[atype] += 20 # 10 else: # elif atype in ['freeze','stun','sleep','bog']: self.resist[atype] += 20 return 1 else: log('afflic', 'perfect_resist') return 0 def get_uptimes(self): uptimes = {} # for atype in ['poison', 'burn', 'paralysis', 'blind', 'freeze', 'stun', 'sleep', 'bog']: for atype in AFFLICT_LIST: aff = self.__dict__[atype] rate, t = aff.c_uptime # last = aff.last_afflict if rate > 0: # print('{}_uptime'.format(atype), '{:.2f}/{:.2f}'.format(rate, t), '{:.2%}'.format(rate/t)) # print('last_{}: {:.2f}s'.format(atype, last)) uptimes[atype] = rate/t return uptimes def rinit(self): self.resist = {} self.resist['poison'] = 0 self.resist['burn'] = 0 self.resist['freeze'] = 80 self.resist['paralysis'] = 80 self.resist['blind'] = 80 self.resist['stun'] = 80 self.resist['curse'] = 0 self.resist['bog'] = 80 self.resist['sleep'] = 80 self.dot = [] self.cc = {} self.luck = 1
#!/usr/bin/env python2.4 """ Code to solve tests at http://www.willmcgugan.com/blog/tech/2009/12/21/python-developer-programming-tests/ """ words = [ w.rstrip() for w in open('/usr/share/dict/words') ] an = {} for w in words: key = "".join(sorted(w)) #if key == "opst": # print w # print an.get(key,[]) x = an.setdefault(key,[]) x.append(w) def ana(word): return an.get("".join(sorted(word)),[]) if __name__ == "__main__": print ana("pots") print '--' print ana("train") print '--' print ana('drive') print '--' print ana('python')
# -*- coding: utf-8 -*- """ Web API ~~~~ ref: web_api.yaml :copyright: (c) 2017 by Baidu, Inc. :license: Apache, see LICENSE for more details. """ from __future__ import absolute_import, print_function from flask import g from .base_api import Resource from ..service import DataService class Datas(Resource): """ ref: web_api.yaml """ def get(self): """ ref: web_api.yaml :return: """ pattern = None if 'pattern' in g.args: pattern = g.args['pattern'] datas = DataService.list(pattern) for data_no, data in enumerate(datas): datas[data_no] = { "id": data.id, "name": data.name, "uri": '/v1/data/%s' % data.name, "createTime": data.create_time * 1000, "updateTime": data.update_time * 1000, "labelRatio": data.label_ratio, "period": { "length": data.period, "ratio": data.period_ratio }, "display": { "start": data.start_time * 1000, "end": min(data.start_time + 86400, data.end_time) * 1000 }, "time": { "start": data.start_time * 1000, "end": data.end_time * 1000 } } return self.render(data=datas), 200, None
def LI(): return list(map(int, input().split())) import sys sys.setrecursionlimit(10 ** 9) ''' 思ったこと これグラフで,ダイクストラで求められるのでは?(なんとなく) ダイクストラでは無理そう.(始点が決まっていないから) N≤10だし普通に全探索?BFS? 見落としてたけど実行制限 5 secだった. PyPy: 2341 ms Python: TLE 実装に20分くらいかかったけど解けた. 解説: https://twitter.com/e869120/status/1390074137192767489 permutationで全探索しようと思ったけどTLEっぽくてやめちゃった.計算量的に行けそうってのは理解していたけどなんか無理そうだった. permutationで普通にやったら普通に早かった. PyPy: 418 ms Python: TLE ''' # https://atcoder.jp/contests/typical90/tasks/typical90_af from itertools import permutations def main(*args): N, A, M, XY = args NG = [set() for n in range(N)] for x, y in XY: NG[x-1].add(y-1) NG[y-1].add(x-1) INF = 2 ** 32 ans = INF for p in permutations(range(N)): # 誰がどの区間を走るか cost = A[p[0]][0] # p[0]: 1区間目を走る人 for i in range(1, N): if p[i-1] in NG[p[i]]: # 喧嘩していたならば break else: cost += A[p[i]][i] else: # 最後までつながったら ans = min(ans, cost) print(ans if ans != INF else -1) if __name__ == '__main__': N = int(input()) A = [LI() for n in range(N)] M = int(input()) main(N, A, M, [LI() for m in range(M)])
import jsonschema class GeneratorBase(object): def __init__(self): self.schema = None def _get_schema(self): if self.schema is not None: return self.schema self.schema = self.get_schema() validator = jsonschema.validators.validator_for(self.schema) self.schema = validator(self.schema) return self.schema def internal_validate(self, source): schema = self._get_schema() try: schema.validate(source) except jsonschema.ValidationError as e: return False return True def internal_generate(self, source): if not self.internal_validate(source): return None return self.generate_pipeline(source) @classmethod def get_schema(cls): raise NotImplementedError() @classmethod def generate_pipeline(cls, source): raise NotImplementedError()
# NeoPixel library strandtest example # Author: Tony DiCola ([email protected]) # # Direct port of the Arduino NeoPixel library strandtest example. Showcases # various animations on a strip of NeoPixels. import time import math import requests import json import time #hue var hue1 = "http://192.168.0.24/api/2XVv90KBB8kItGJd55lc4agdxYLctwR42NwAlnhz/lights/4/state" #hue cmds hue_on = {"on":True, "bri":254, "ct":500} hue_off = {"on":False} hue_color = {"on":True,"Red":254, "bri":254,"xy":[0.3000,0.3000]} #change string code def constrain(val, min_val, max_val): return min(max_val, max(min_val, val)) def codeToRgb(code): R = [] G = [] B = [] W = [] if len(code) == 8: R = [int(code[0:2],16)] G = [int(code[2:4],16)] B = [int(code[4:6],16)] W = [int(code[6:8],16)] return R+G+B+W elif len(code) == 6: R = [int(code[0:2],16)] G = [int(code[2:4],16)] B = [int(code[4:6],16)] W = [0] return R+G+B+W else: print "invalid code!" R = [0] G = [0] B = [0] W = [0] return R+G+B+W #for hue def XYtoRGB(x,y,brightness=1.0): RGB=[] z = 1.0 - x - y Y = brightness # The given brightness value X = (Y / y) * x Z = (Y / y) * z r = (X * 1.656492) - (Y * 0.354851) - (Z * 0.255038) g = ((-X) * 0.707196) + (Y * 1.655397) + (Z * 0.036152) b = (X * 0.051713) - (Y * 0.121364) + (Z * 1.011530) r = ((r <= 0.0031308) and 12.92 * r or (1.0 + 0.055) * pow(r, (1.0 / 2.4)) - 0.055)*254 g = ((g <= 0.0031308) and 12.92 * g or (1.0 + 0.055) * pow(g, (1.0 / 2.4)) - 0.055)*254 b = ((b <= 0.0031308) and 12.92 * b or (1.0 + 0.055) * pow(b, (1.0 / 2.4)) - 0.055)*254 r=[constrain(int(round(r)),0,255)] g=[constrain(int(round(g)),0,255)] b=[constrain(int(round(b)),0,255)] RGB = r+g+b print RGB def RGBtoXY(r,g,b): XY = [] red = float((r > 0.04045) and pow((r + 0.055) / (1.0 + 0.055), 2.4) or (r / 12.92)) green = float((g > 0.04045) and pow((g + 0.055) / (1.0 + 0.055), 2.4) or (g / 12.92)) blue = float((b > 0.04045) and pow((b + 0.055) / (1.0 + 0.055), 2.4) or (b / 12.92)) X = red * 0.664511 + green * 0.154324 + blue * 0.162028 Y = red * 0.283881 + green * 0.668433 + blue * 0.047685 Z = red * 0.000088 + green * 0.072310 + blue * 0.986039 x = [round((X / (X + Y + Z)),4)] y = [round((Y / (X + Y + Z)),4)] XY = x+y print "coordinate?" print XY return XY def hueColor(r,g,b): color = RGBtoXY(r,g,b) hue_color = {"on":True,"Red":254, "bri":254,"xy":color} requests.put(hue1, json.dumps(hue_color), timeout=5) """ Start with a temperature, in Kelvin, somewhere between 1000 and 40000. (Other values may work, but I can't make any promises about the quality of the algorithm's estimates above 40000 K.) Note also that the temperature and color variables need to be declared as floating-point. """ def TemptoRGB(temper): temper = (temper+100)/100 red = 0.0 green = 0.0 blue = 0.0 #Calculate Red if temper <= 66: red = [255] else: red = temper - 60 red = 329.698727446 * (pow(red, -0.1332047592)) red = [constrain(int(round(red)),0,255)] #Calculate Green if temper <= 66: green = temper green = 99.4708025861 * math.log(green) - 161.1195681661 green = [constrain(int(round(green)),0,255)] else: green = temper - 60 green = 288.1221695283 * (pow(green, -0.0755148492)) green = [constrain(int(round(green)),0,255)] #Calculate Blue if temper >= 66: blue = [255] else: if temper <= 19: blue = [0] else: blue = temper - 10 blue = 138.5177312231 * math.log(blue) - 305.0447927307 blue = [constrain(int(round(blue)),0,255)] return red+green+blue # Main program logic follows: if __name__ == '__main__': #code = codeToRgb('ffcc00') #temper = 6000 #mired = pow(10,6)/temper #code = TemptoRGB(temper) #hue_on = {"on":True, "bri":254, "ct":mired} hue_on = hueColor(0,0,255) #r = code[0] #g = code[1] #b = code[2] requests.put(hue1, json.dumps(hue_on), timeout=5)
from rest_framework import serializers from django_quill.fields import FieldQuill __all__ = ( "QuillFieldMixin", "QuillHtmlField", "QuillPlainField", ) class QuillFieldMixin: pass class QuillHtmlField(QuillFieldMixin, serializers.Field): def to_representation(self, value: FieldQuill): return value.quill.html class QuillPlainField(QuillFieldMixin, serializers.Field): def to_representation(self, value: FieldQuill): return value.quill.plain
# # Copyright (C) 2008, 2009 Loic Dachary <[email protected]> # Copyright (C) 2009 Johan Euphrosine <[email protected]> # # This software's license gives you freedom; you can copy, convey, # propagate, redistribute and/or modify this program under the terms of # the GNU Affero General Public License (AGPL) as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version of the AGPL published by the FSF. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero # General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program in a file in the toplevel directory called # "AGPLv3". If not, see <http://www.gnu.org/licenses/>. # from twisted.internet import reactor from pokernetwork.pokerrestclient import PokerProxyClientFactory local_reactor = reactor # # return a value if all actions were complete # def rest_filter(site, request, packet): if request.finished: #pragma: no cover # # the request has been answered by a filter earlier in the chain # return True #pragma: no cover service = site.resource.service #pragma: no cover uid = request.args.get('uid', [''])[0] #pragma: no cover if uid: #pragma: no cover resthost = site.memcache.get(uid) #pragma: no cover if not resthost: #pragma: no cover cursor = service.db.cursor() #pragma: no cover cursor.execute("SELECT host,port,path FROM resthost WHERE name LIKE 'explain%' ORDER BY RAND()") #pragma: no cover if cursor.rowcount > 0: #pragma: no cover resthost = cursor.fetchone() #pragma: no cover if resthost: #pragma: no cover (host, port, path) = [str(s) for s in resthost] #pragma: no cover parts = request.uri.split('?', 1) #pragma: no cover if len(parts) > 1: #pragma: no cover path += '?' + parts[1] #pragma: no cover request.content.seek(0, 0) #pragma: no cover header = request.getAllHeaders() #pragma: no cover data = request.content.read() #pragma: no cover clientFactory = PokerProxyClientFactory( #pragma: no cover request.method, path, request.clientproto, #pragma: no cover header, data, request, #pragma: no cover host + ':' + str(port) + path) #pragma: no cover local_reactor.connectTCP(host, int(port), clientFactory) #pragma: no cover return clientFactory.deferred #pragma: no cover return True #pragma: no cover
from transformers import pipeline unmasker = pipeline('fill-mask', model='bert-base-uncased') unmasker("Hello I'm a [MASK] model.") [{'sequence': "[CLS] hello i'm a fashion model. [SEP]", 'score': 0.1073106899857521, 'token': 4827, 'token_str': 'fashion'}, {'sequence': "[CLS] hello i'm a role model. [SEP]", 'score': 0.08774490654468536, 'token': 2535, 'token_str': 'role'}, {'sequence': "[CLS] hello i'm a new model. [SEP]", 'score': 0.05338378623127937, 'token': 2047, 'token_str': 'new'}, {'sequence': "[CLS] hello i'm a super model. [SEP]", 'score': 0.04667217284440994, 'token': 3565, 'token_str': 'super'}, {'sequence': "[CLS] hello i'm a fine model. [SEP]", 'score': 0.027095865458250046, 'token': 2986, 'token_str': 'fine'}]
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities from . import outputs from ._inputs import * __all__ = ['LogsIndexArgs', 'LogsIndex'] @pulumi.input_type class LogsIndexArgs: def __init__(__self__, *, filters: pulumi.Input[Sequence[pulumi.Input['LogsIndexFilterArgs']]], name: pulumi.Input[str], daily_limit: Optional[pulumi.Input[int]] = None, disable_daily_limit: Optional[pulumi.Input[bool]] = None, exclusion_filters: Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexExclusionFilterArgs']]]] = None, retention_days: Optional[pulumi.Input[int]] = None): """ The set of arguments for constructing a LogsIndex resource. :param pulumi.Input[Sequence[pulumi.Input['LogsIndexFilterArgs']]] filters: Logs filter :param pulumi.Input[str] name: The name of the index. :param pulumi.Input[int] daily_limit: The number of log events you can send in this index per day before you are rate-limited. :param pulumi.Input[bool] disable_daily_limit: If true, sets the daily*limit value to null and the index is not limited on a daily basis (any specified daily*limit value in the request is ignored). If false or omitted, the index's current daily_limit is maintained. :param pulumi.Input[Sequence[pulumi.Input['LogsIndexExclusionFilterArgs']]] exclusion_filters: List of exclusion filters. :param pulumi.Input[int] retention_days: The number of days before logs are deleted from this index. """ pulumi.set(__self__, "filters", filters) pulumi.set(__self__, "name", name) if daily_limit is not None: pulumi.set(__self__, "daily_limit", daily_limit) if disable_daily_limit is not None: pulumi.set(__self__, "disable_daily_limit", disable_daily_limit) if exclusion_filters is not None: pulumi.set(__self__, "exclusion_filters", exclusion_filters) if retention_days is not None: pulumi.set(__self__, "retention_days", retention_days) @property @pulumi.getter def filters(self) -> pulumi.Input[Sequence[pulumi.Input['LogsIndexFilterArgs']]]: """ Logs filter """ return pulumi.get(self, "filters") @filters.setter def filters(self, value: pulumi.Input[Sequence[pulumi.Input['LogsIndexFilterArgs']]]): pulumi.set(self, "filters", value) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ The name of the index. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter(name="dailyLimit") def daily_limit(self) -> Optional[pulumi.Input[int]]: """ The number of log events you can send in this index per day before you are rate-limited. """ return pulumi.get(self, "daily_limit") @daily_limit.setter def daily_limit(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "daily_limit", value) @property @pulumi.getter(name="disableDailyLimit") def disable_daily_limit(self) -> Optional[pulumi.Input[bool]]: """ If true, sets the daily*limit value to null and the index is not limited on a daily basis (any specified daily*limit value in the request is ignored). If false or omitted, the index's current daily_limit is maintained. """ return pulumi.get(self, "disable_daily_limit") @disable_daily_limit.setter def disable_daily_limit(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disable_daily_limit", value) @property @pulumi.getter(name="exclusionFilters") def exclusion_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexExclusionFilterArgs']]]]: """ List of exclusion filters. """ return pulumi.get(self, "exclusion_filters") @exclusion_filters.setter def exclusion_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexExclusionFilterArgs']]]]): pulumi.set(self, "exclusion_filters", value) @property @pulumi.getter(name="retentionDays") def retention_days(self) -> Optional[pulumi.Input[int]]: """ The number of days before logs are deleted from this index. """ return pulumi.get(self, "retention_days") @retention_days.setter def retention_days(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "retention_days", value) @pulumi.input_type class _LogsIndexState: def __init__(__self__, *, daily_limit: Optional[pulumi.Input[int]] = None, disable_daily_limit: Optional[pulumi.Input[bool]] = None, exclusion_filters: Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexExclusionFilterArgs']]]] = None, filters: Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexFilterArgs']]]] = None, name: Optional[pulumi.Input[str]] = None, retention_days: Optional[pulumi.Input[int]] = None): """ Input properties used for looking up and filtering LogsIndex resources. :param pulumi.Input[int] daily_limit: The number of log events you can send in this index per day before you are rate-limited. :param pulumi.Input[bool] disable_daily_limit: If true, sets the daily*limit value to null and the index is not limited on a daily basis (any specified daily*limit value in the request is ignored). If false or omitted, the index's current daily_limit is maintained. :param pulumi.Input[Sequence[pulumi.Input['LogsIndexExclusionFilterArgs']]] exclusion_filters: List of exclusion filters. :param pulumi.Input[Sequence[pulumi.Input['LogsIndexFilterArgs']]] filters: Logs filter :param pulumi.Input[str] name: The name of the index. :param pulumi.Input[int] retention_days: The number of days before logs are deleted from this index. """ if daily_limit is not None: pulumi.set(__self__, "daily_limit", daily_limit) if disable_daily_limit is not None: pulumi.set(__self__, "disable_daily_limit", disable_daily_limit) if exclusion_filters is not None: pulumi.set(__self__, "exclusion_filters", exclusion_filters) if filters is not None: pulumi.set(__self__, "filters", filters) if name is not None: pulumi.set(__self__, "name", name) if retention_days is not None: pulumi.set(__self__, "retention_days", retention_days) @property @pulumi.getter(name="dailyLimit") def daily_limit(self) -> Optional[pulumi.Input[int]]: """ The number of log events you can send in this index per day before you are rate-limited. """ return pulumi.get(self, "daily_limit") @daily_limit.setter def daily_limit(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "daily_limit", value) @property @pulumi.getter(name="disableDailyLimit") def disable_daily_limit(self) -> Optional[pulumi.Input[bool]]: """ If true, sets the daily*limit value to null and the index is not limited on a daily basis (any specified daily*limit value in the request is ignored). If false or omitted, the index's current daily_limit is maintained. """ return pulumi.get(self, "disable_daily_limit") @disable_daily_limit.setter def disable_daily_limit(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disable_daily_limit", value) @property @pulumi.getter(name="exclusionFilters") def exclusion_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexExclusionFilterArgs']]]]: """ List of exclusion filters. """ return pulumi.get(self, "exclusion_filters") @exclusion_filters.setter def exclusion_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexExclusionFilterArgs']]]]): pulumi.set(self, "exclusion_filters", value) @property @pulumi.getter def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexFilterArgs']]]]: """ Logs filter """ return pulumi.get(self, "filters") @filters.setter def filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogsIndexFilterArgs']]]]): pulumi.set(self, "filters", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the index. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="retentionDays") def retention_days(self) -> Optional[pulumi.Input[int]]: """ The number of days before logs are deleted from this index. """ return pulumi.get(self, "retention_days") @retention_days.setter def retention_days(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "retention_days", value) class LogsIndex(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, daily_limit: Optional[pulumi.Input[int]] = None, disable_daily_limit: Optional[pulumi.Input[bool]] = None, exclusion_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexExclusionFilterArgs']]]]] = None, filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexFilterArgs']]]]] = None, name: Optional[pulumi.Input[str]] = None, retention_days: Optional[pulumi.Input[int]] = None, __props__=None): """ ## Import ```sh $ pulumi import datadog:index/logsIndex:LogsIndex name> <indexName> ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] daily_limit: The number of log events you can send in this index per day before you are rate-limited. :param pulumi.Input[bool] disable_daily_limit: If true, sets the daily*limit value to null and the index is not limited on a daily basis (any specified daily*limit value in the request is ignored). If false or omitted, the index's current daily_limit is maintained. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexExclusionFilterArgs']]]] exclusion_filters: List of exclusion filters. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexFilterArgs']]]] filters: Logs filter :param pulumi.Input[str] name: The name of the index. :param pulumi.Input[int] retention_days: The number of days before logs are deleted from this index. """ ... @overload def __init__(__self__, resource_name: str, args: LogsIndexArgs, opts: Optional[pulumi.ResourceOptions] = None): """ ## Import ```sh $ pulumi import datadog:index/logsIndex:LogsIndex name> <indexName> ``` :param str resource_name: The name of the resource. :param LogsIndexArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(LogsIndexArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, daily_limit: Optional[pulumi.Input[int]] = None, disable_daily_limit: Optional[pulumi.Input[bool]] = None, exclusion_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexExclusionFilterArgs']]]]] = None, filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexFilterArgs']]]]] = None, name: Optional[pulumi.Input[str]] = None, retention_days: Optional[pulumi.Input[int]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = LogsIndexArgs.__new__(LogsIndexArgs) __props__.__dict__["daily_limit"] = daily_limit __props__.__dict__["disable_daily_limit"] = disable_daily_limit __props__.__dict__["exclusion_filters"] = exclusion_filters if filters is None and not opts.urn: raise TypeError("Missing required property 'filters'") __props__.__dict__["filters"] = filters if name is None and not opts.urn: raise TypeError("Missing required property 'name'") __props__.__dict__["name"] = name __props__.__dict__["retention_days"] = retention_days super(LogsIndex, __self__).__init__( 'datadog:index/logsIndex:LogsIndex', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, daily_limit: Optional[pulumi.Input[int]] = None, disable_daily_limit: Optional[pulumi.Input[bool]] = None, exclusion_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexExclusionFilterArgs']]]]] = None, filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexFilterArgs']]]]] = None, name: Optional[pulumi.Input[str]] = None, retention_days: Optional[pulumi.Input[int]] = None) -> 'LogsIndex': """ Get an existing LogsIndex resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] daily_limit: The number of log events you can send in this index per day before you are rate-limited. :param pulumi.Input[bool] disable_daily_limit: If true, sets the daily*limit value to null and the index is not limited on a daily basis (any specified daily*limit value in the request is ignored). If false or omitted, the index's current daily_limit is maintained. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexExclusionFilterArgs']]]] exclusion_filters: List of exclusion filters. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsIndexFilterArgs']]]] filters: Logs filter :param pulumi.Input[str] name: The name of the index. :param pulumi.Input[int] retention_days: The number of days before logs are deleted from this index. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _LogsIndexState.__new__(_LogsIndexState) __props__.__dict__["daily_limit"] = daily_limit __props__.__dict__["disable_daily_limit"] = disable_daily_limit __props__.__dict__["exclusion_filters"] = exclusion_filters __props__.__dict__["filters"] = filters __props__.__dict__["name"] = name __props__.__dict__["retention_days"] = retention_days return LogsIndex(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="dailyLimit") def daily_limit(self) -> pulumi.Output[Optional[int]]: """ The number of log events you can send in this index per day before you are rate-limited. """ return pulumi.get(self, "daily_limit") @property @pulumi.getter(name="disableDailyLimit") def disable_daily_limit(self) -> pulumi.Output[bool]: """ If true, sets the daily*limit value to null and the index is not limited on a daily basis (any specified daily*limit value in the request is ignored). If false or omitted, the index's current daily_limit is maintained. """ return pulumi.get(self, "disable_daily_limit") @property @pulumi.getter(name="exclusionFilters") def exclusion_filters(self) -> pulumi.Output[Optional[Sequence['outputs.LogsIndexExclusionFilter']]]: """ List of exclusion filters. """ return pulumi.get(self, "exclusion_filters") @property @pulumi.getter def filters(self) -> pulumi.Output[Sequence['outputs.LogsIndexFilter']]: """ Logs filter """ return pulumi.get(self, "filters") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the index. """ return pulumi.get(self, "name") @property @pulumi.getter(name="retentionDays") def retention_days(self) -> pulumi.Output[int]: """ The number of days before logs are deleted from this index. """ return pulumi.get(self, "retention_days")
from __future__ import annotations import toolcli import toolsql def get_command_spec() -> toolcli.CommandSpec: return { 'f': migrate_upgrade_command, 'help': 'create migrations', 'args': [ {'name': '--message'}, {'name': '--noedit', 'action': 'store_true'}, {'name': '--noautogenerate', 'action': 'store_true'}, ], 'special': { 'inject': ['migrate_config'], }, } def migrate_upgrade_command( migrate_config: toolsql.MigrateConfig, message: str, noedit: bool, noautogenerate: bool, ) -> None: autogenerate = not noautogenerate toolsql.create_migration( migrate_config=migrate_config, message=message, autogenerate=autogenerate, ) edit = not noedit if edit: toolsql.edit_migrations(migrate_config=migrate_config)
from flask_restplus import Resource, reqparse, Api from flask.json import jsonify from restplus import api as api from database.models import Monografia from database import db from database.operations import save_to, delete_data from flask import request from flask import make_response from flask_jwt import jwt_required from rdf.models import Monografia as MonografiaRDF from simpot import graph from rdflib import Graph ns = api.namespace('monografia', description='Operations related to "Monografias"') @api.representation('application/xml') def xml(data, code, headers): resp = make_response(data, code) resp.headers.extend(headers) return resp @ns.route('/<string:codigo>') class MonografiaItem(Resource): parser = reqparse.RequestParser() for i in ["codigo", "discente", "orientador", "titulo", "codigo_curso", "ano", "siape_orientador"]: parser.add_argument(i, type=str, required=False, help='') def get(self, codigo): if Monografia.query.filter(Monografia.codigo == codigo).first(): return Monografia.query.filter(Monografia.codigo == codigo).one().json() return {'Message': 'Monografia with the codigo {} is not found'.format(codigo)} @api.response(201, 'Monografia successfully created.') @jwt_required() def post(self, codigo): if Monografia.query.filter(Monografia.codigo == codigo).first(): return jsonify({' Message': 'Monografia with the codigo {} already exists'.format(codigo)}) args = MonografiaItem.parser.parse_args() item = Monografia(args) print(item) save_to(item, db) return "ok", 201 get_arguments = reqparse.RequestParser() get_arguments.add_argument('curso', type=int, required=False, help='Código do curso') get_arguments.add_argument('discente', type=str, required=False, help='Nome ou parte do nome de um discente') get_arguments.add_argument('titulo', type=str, required=False, help='Titulo da monografia') get_arguments.add_argument('siape_orientador', type=str, required=False, help='Siape do orientador') @ns.route('/') class MonografiaCollection (Resource): @api.expect(get_arguments, validate=True) def get(self): cod_curso = request.args.get("curso") discente = request.args.get("discente") titulo = request.args.get("titulo") siape_orientador = request.args.get("siape_orientador") query = Monografia.query print(cod_curso, discente) if (cod_curso): query = query.filter(Monografia.codigo_curso == cod_curso) if (discente): query = query.filter(Monografia.discente.like("%" + discente + "%")) if (titulo): query = query.filter(Monografia.titulo.like("%" + titulo + "%")) if (siape_orientador): query = query.filter(Monografia.siape_orientador.like("%" + siape_orientador + "%")) if request.headers['accept'] == 'application/xml': dados_rdf = list(map(lambda mono: MonografiaRDF(mono.codigo, mono.titulo, mono.codigo_curso, mono.discente, mono.siape_orientador), query.order_by(Monografia.titulo).all())) grafo = graph(dados_rdf) return xml(grafo.serialize().decode(), 201, {'Content-Type': 'application/xml'}) else: data = list(map(lambda x: x.json(), query.order_by(Monografia.titulo).all())) return jsonify({'data': data, 'length': len(data)})
import subprocess import pandas as pd import tempfile import os __all__ = ['runRscript'] def runRscript(Rcmd, inDf=None, outputFiles=0, removeTempFiles=None): """Runs an R cmd with option to provide a DataFrame as input and file as output. Params ------ Rcmd : str String containing the R-script to run. inDf : pd.DataFrame or list of pd.DataFrame's Data to be passed to the R script via a CSV file. Object should be referenced in the script as "INPUTDF" or "INPUTDF0" etc. if list outputFiles : int Number of output CSV files available for writing by the R-script. The contents of the file are returned as a pd.DataFrame. File name should be referenced as "OUTPUTFNX" in the R-script removeTempFiles : True, False or None For debugging. If True then the temporary script and data files will always be removed. If None then they will be removed if there is not an error. If False they will not be removed. Returns ------- stdout : str Output of the R-script at the terminal (including stderr) output : pd.DataFrame or list of pd.DataFrames Optionally, the contents of CSV file(s) written by the R-script as a pd.DataFrame""" """Write data to a tempfile if required""" if not inDf is None: if not type(inDf) is list: inputH, inputFn = tempfile.mkstemp(suffix='.csv', prefix='tmp-Rinput-', text=True) readCmd = 'INPUTDF <- read.csv("%s")\n' % inputFn Rcmd = readCmd + Rcmd os.close(inputH) inDf.to_csv(inputFn) else: inputFilenames = [] for i, idf in enumerate(inDf): inputH, inputFn = tempfile.mkstemp(suffix='.csv', prefix='tmp-Rinput%d-' % i, text=True) readCmd = 'INPUTDF%d <- read.csv("%s")\n' % (i, inputFn) Rcmd = readCmd + Rcmd os.close(inputH) idf.to_csv(inputFn) inputFilenames.append(inputFn) """Set up an output file if required""" outFn = [] for outi in range(outputFiles): outputH, outputFn = tempfile.mkstemp(suffix='.txt', prefix='tmp-Routput-', text=True) outCmd = 'OUTPUTFN%d <- "%s"\n' % (outi, outputFn) Rcmd = outCmd + Rcmd outFn.append(outputFn) os.close(outputH) """Write script to tempfile""" scriptH, scriptFn = tempfile.mkstemp(suffix='.R', prefix='tmp-Rscript-', text=True) with open(scriptFn, 'w') as fh: fh.write(Rcmd) os.close(scriptH) """Run the R script and collect output""" try: cmdList = ['Rscript', '--vanilla', scriptFn] res = subprocess.check_output(cmdList, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: res = bytes('STDOUT:\n%s\nSTDERR:\n%s' % (e.stdout, e.stderr), 'utf-8') print('R process returned an error') if removeTempFiles is None: print('Leaving tempfiles for debugging.') print(' '.join(cmdList)) if not inDf is None: print(inputFn) for outputFn in outFn: print(outputFn) removeTempFiles = False """Read the ouptfile if required""" outDf = [] for outputFn in outFn: try: tmp = pd.read_csv(outputFn) outDf.append(tmp) except: print('Cannot read output CSV: reading as text (%s)' % outputFn) with open(outputFn, 'r') as fh: tmp = fh.read() if len(tmp) == 0: print('Output file is empty! (%s)' % outputFn) tmp = None outDf.append(tmp) # outDf = [pd.read_csv(outputFn) for outputFn in outFn] if len(outDf) == 0: outDf = None elif len(outDf) == 1: outDf = outDf[0] """Cleanup the temporary files""" if removeTempFiles is None or removeTempFiles: os.remove(scriptFn) if not inDf is None: if not type(inDf) is list: os.remove(inputFn) else: for inputFn in inputFilenames: os.remove(inputFn) else: print('Leaving tempfiles for debugging.') print(' '.join(cmdList)) if not inDf is None: print(inputFn) for outputFn in outFn: print(outputFn) if outputFiles == 0: return res.decode('utf-8') else: return res.decode('utf-8'), outDf def _test_simple(): Rcmd = """ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14) trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69) group <- gl(2, 10, 20, labels = c("Ctl","Trt")) weight <- c(ctl, trt) lm.D9 <- lm(weight ~ group) lm.D90 <- lm(weight ~ group - 1) # omitting intercept anova(lm.D9) summary(lm.D90)""" res = runRscript(Rcmd) print(res) def _test_io(): ctrl = [4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14] trt = [4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69] inDf = pd.DataFrame({'weight':ctrl + trt, 'group': ['Ctl']*len(ctrl) + ['Trt']*len(trt)}) Rcmd = """print(head(INPUTDF)) lm.D9 <- lm(weight ~ group, data=INPUTDF) lm.D90 <- lm(weight ~ group - 1, data=INPUTDF) # omitting intercept anova(lm.D9) summary(lm.D90) write.csv(data.frame(summary(lm.D90)$coefficients), OUTPUTFN) """ res, outputFile = runRscript(Rcmd, inDf=inDf, outputFiles=1) print(res) print(outputFile)
# split off into its own module for aliasing without circrefs from cgi import escape from DocumentTemplate.ustr import ustr def html_quote(v, name='(Unknown name)', md={}): return escape(ustr(v), 1)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import json from urllib.request import urlretrieve, Request, urlopen from urllib.parse import urlencode, quote from bs4 import BeautifulSoup def replace_all(text): rep = { "&lt": "", "&amp": "", "&apos": "", "&quot": "", ";": "", "<b>": "", "</b>": "", "&gt": "", "‘": "", "’": "", u"\xa0": u"", "“": "", "”": "", } for i, j in rep.items(): text = text.replace(i, j) return text def main(): req_url = u'https://openapi.naver.com/v1/search/news.xml?query=' + quote('XXXXXX') + '&display=100&start=1&sort=date' request = Request(req_url) request.add_header('X-Naver-Client-Id', 'xxxxxxxxxxxxxxxxxxxx') request.add_header('X-Naver-Client-Secret', 'xxxxxxxxxx') response = urlopen(request) rescode = response.getcode() if (rescode == 200): response_body = response.read() data = response_body.decode('utf-8') soup = BeautifulSoup(data, 'html.parser') msg = [0, 0, 0, 0] for i, s in enumerate(soup.find_all(['link', 'title', 'description', 'pubdate'])): if i < 5: continue if i % 4 == 1: # desc msg.append(replace_all(s.text)) elif i % 4 == 2: # date Fri, 14 Apr 2017 msg.append(replace_all(s.text)) write_msg = '%s\n%s\n%s' % (msg[2], msg[1], msg[0]) del msg[:] elif i % 4 == 3: # title msg.append(replace_all(s.text)) elif i % 4 == 0: # url msg.append(replace_all(s.text)) else: print('[NAVER news] Error Code: %d', rescode) if __name__ == '__main__': main()