max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
socketio/asgi.py | zwerg44/python-socketio | 0 | 12788351 | import engineio
class ASGIApp(engineio.ASGIApp):
"""ASGI application middleware for Socket.IO.
This middleware dispatches traffic to an Socket.IO application. It can
also serve a list of static files to the client, or forward unrelated
HTTP traffic to another ASGI application.
:param socketio_server: The Socket.IO server. Must be an instance of the
``socketio.AsyncServer`` class.
:param static_files: A dictionary where the keys are URLs that should be
served as static files. For each URL, the value is
a dictionary with ``content_type`` and ``filename``
keys. This option is intended to be used for serving
client files during development.
:param other_asgi_app: A separate ASGI app that receives all other traffic.
:param socketio_path: The endpoint where the Socket.IO application should
be installed. The default value is appropriate for
most cases.
Example usage::
import socketio
import uvicorn
eio = socketio.AsyncServer()
app = engineio.ASGIApp(eio, static_files={
'/': {'content_type': 'text/html', 'filename': 'index.html'},
'/index.html': {'content_type': 'text/html',
'filename': 'index.html'},
})
uvicorn.run(app, '127.0.0.1', 5000)
"""
def __init__(self, socketio_server, other_asgi_app=None,
static_files=None, socketio_path='socket.io'):
super().__init__(socketio_server, other_asgi_app,
static_files=static_files,
engineio_path=socketio_path)
| 2.8125 | 3 |
test/utils.py | f-dangel/backobs | 1 | 12788352 | import random
import numpy
import torch
from backobs.integration import extend as backobs_extend
from backobs.integration import (
extend_with_access_unreduced_loss as backobs_extend_with_access_unreduced_loss,
)
def set_deepobs_seed(seed=0):
"""Set all seeds used by DeepOBS."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
def set_up_problem(
tproblem_cls,
batch_size,
force_no_l2_reg=True,
seed=None,
extend=False,
unreduced_loss=False,
):
"""Create problem with neural network, and set to train mode."""
if seed is not None:
set_deepobs_seed(0)
if force_no_l2_reg:
tproblem = tproblem_cls(batch_size, l2_reg=0.0)
else:
tproblem = tproblem_cls(batch_size)
tproblem.set_up()
tproblem.train_init_op()
if unreduced_loss and not extend:
raise ValueError("To use unreduced_loss, enable the extend option.")
if extend:
if unreduced_loss:
backobs_extend_with_access_unreduced_loss(tproblem)
else:
tproblem = backobs_extend(tproblem)
return tproblem
def get_reduction_factor(loss, unreduced_loss):
"""Return the factor used to reduce the individual losses."""
mean_loss = unreduced_loss.flatten().mean()
sum_loss = unreduced_loss.flatten().sum()
if torch.allclose(mean_loss, sum_loss):
raise RuntimeError(
"Cannot determine reduction factor. ",
"Results from 'mean' and 'sum' reduction are identical. ",
f"'mean': {mean_loss}, 'sum': {sum_loss}",
)
if torch.allclose(loss, mean_loss):
factor = 1.0 / unreduced_loss.numel()
elif torch.allclose(loss, sum_loss):
factor = 1.0
else:
raise RuntimeError(
"Reductions 'mean' or 'sum' do not match with loss. ",
f"'mean': {mean_loss}, 'sum': {sum_loss}, loss: {loss}",
)
return factor
atol = 1e-5
rtol = 1e-5
def report_nonclose_values(x, y):
x_numpy = x.data.cpu().numpy().flatten()
y_numpy = y.data.cpu().numpy().flatten()
close = numpy.isclose(x_numpy, y_numpy, atol=atol, rtol=rtol)
where_not_close = numpy.argwhere(numpy.logical_not(close))
for idx in where_not_close:
x, y = x_numpy[idx], y_numpy[idx]
print("{} versus {}. Ratio of {}".format(x, y, y / x))
def check_sizes_and_values(*plists, atol=atol, rtol=rtol):
check_sizes(*plists)
list1, list2 = plists
check_values(list1, list2, atol=atol, rtol=rtol)
def check_sizes(*plists):
for i in range(len(plists) - 1):
assert len(plists[i]) == len(plists[i + 1])
for params in zip(*plists):
for i in range(len(params) - 1):
assert params[i].size() == params[i + 1].size()
def check_values(list1, list2, atol=atol, rtol=rtol):
for i, (g1, g2) in enumerate(zip(list1, list2)):
print(i)
print(g1.size())
report_nonclose_values(g1, g2)
assert torch.allclose(g1, g2, atol=atol, rtol=rtol)
| 2.4375 | 2 |
usmart_sdk/usmart.py | UrbanTide/usmart-sdk-py | 0 | 12788353 | <reponame>UrbanTide/usmart-sdk-py<filename>usmart_sdk/usmart.py<gh_stars>0
"""
USMRT SDK
"""
import requests
class USMART:
auth = None
def __init__(self, auth=None):
if auth is not None:
if "keyId" not in auth:
raise Exception("Auth requires keyId")
if "keySecret" not in auth:
raise Exception("Auth requires keySecret")
self.auth = auth
def request(self, organisation, resource, revision=None, query=None):
url = self.buildURL(organisation, resource, revision)
queryString = self.buildQuery(query)
url += "?" + queryString
headers = None
if self.auth:
headers = {
"api-key-id": self.auth.keyId,
"api-key-secret": self.auth.keySecret
}
return requests.get(
url,
headers=headers
)
def buildQuery(self, query=None):
limit = 10
offset = 0
if query and "limit" in query:
limit = query["limit"]
if query and "offset" in query:
offset = query["offset"]
queries = []
queries.append("limit(" + str(limit) + "," + str(offset) + ")")
if query and "equals" in query:
queries = queries + self.buildEqualQueries(query["equals"])
return "&".join(queries)
def buildEqualQueries(self, equals):
results = []
for equalQuery in equals:
results.append(
"" + equalQuery["key"] + "=" + equalQuery["value"]
)
return results
def buildURL(self, organisation, resource, revision=None):
revisionString = revision + "/" if revision else "latest/"
return "https://api.usmart.io/org/" + organisation + "/" + resource + "/" +\
revisionString + "urql";
| 2.546875 | 3 |
sparsePlane/sparseplane/data/planercnn_transforms.py | jinlinyi/SparsePlanes | 69 | 12788354 | import copy
import numpy as np
import os
import torch
import pickle
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
PolygonMasks,
polygons_to_bitmask,
)
import pycocotools.mask as mask_util
from PIL import Image
import torchvision.transforms as transforms
from . import GaussianBlur
__all__ = ["PlaneRCNNMapper"]
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def annotations_to_instances(
annos, image_size, mask_format="polygon", max_num_planes=20
):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of annotations, one per instance.
image_size (tuple): height, width
Returns:
Instances: It will contains fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
"""
boxes = [
BoxMode.convert(obj["bbox"], BoxMode(obj["bbox_mode"]), BoxMode.XYXY_ABS)
for obj in annos
]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
masks = PolygonMasks(segms)
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert (
segm.ndim == 2
), "Expect segmentation of 2 dimensions, got {}.".format(segm.ndim)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a full-image segmentation mask "
"as a 2D ndarray.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "plane" in annos[0]:
plane = [torch.tensor(obj["plane"]) for obj in annos]
plane_idx = [torch.tensor([i]) for i in range(len(plane))]
target.gt_planes = torch.stack(plane, dim=0)
target.gt_plane_idx = torch.stack(plane_idx, dim=0)
return target
class PlaneRCNNMapper:
"""
A callable which takes a dict produced by the detection dataset, and applies transformations,
including image resizing and flipping. The transformation parameters are parsed from cfg file
and depending on the is_train condition.
Note that for our existing models, mean/std normalization is done by the model instead of here.
"""
def __init__(self, cfg, is_train=True, dataset_names=None):
self.cfg = cfg
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.depth_on = cfg.MODEL.DEPTH_ON
self.camera_on = cfg.MODEL.CAMERA_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
self._eval_gt_box = cfg.TEST.EVAL_GT_BOX
self._augmentation = cfg.DATALOADER.AUGMENTATION
# fmt: on
if self.load_proposals:
raise ValueError("Loading proposals not yet supported")
self.is_train = is_train
assert dataset_names is not None
if self.camera_on:
kmeans_trans_path = cfg.MODEL.CAMERA_HEAD.KMEANS_TRANS_PATH
kmeans_rots_path = cfg.MODEL.CAMERA_HEAD.KMEANS_ROTS_PATH
assert os.path.exists(kmeans_trans_path)
assert os.path.exists(kmeans_rots_path)
with open(kmeans_trans_path, "rb") as f:
self.kmeans_trans = pickle.load(f)
with open(kmeans_rots_path, "rb") as f:
self.kmeans_rots = pickle.load(f)
if self._augmentation:
color_jitter = transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)
augmentation = [
transforms.RandomApply([color_jitter], p=0.2),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5),
transforms.ToTensor(),
]
self.img_transform = transforms.Compose(augmentation)
def __call__(self, dataset_dict):
"""
Transform the dataset_dict according to the configured transformations.
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a new dict that's going to be processed by the model.
It currently does the following:
1. Read the image from "file_name"
2. Transform the image and annotations
3. Prepare the annotations to :class:`Instances`
"""
dataset_dict = copy.deepcopy(dataset_dict)
for i in range(2):
image = utils.read_image(
dataset_dict[str(i)]["file_name"], format=self.img_format
)
utils.check_image_size(dataset_dict[str(i)], image)
if self.is_train and self._augmentation:
image = Image.fromarray(image)
dataset_dict[str(i)]["image"] = self.img_transform(image) * 255.0
image_shape = dataset_dict[str(i)]["image"].shape[1:]
else:
image_shape = image.shape[:2]
dataset_dict[str(i)]["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
if self.depth_on:
if "depth_head" in self.cfg.MODEL.FREEZE:
dataset_dict[str(i)]["depth"] = torch.as_tensor(
np.zeros((480, 640)).astype("float32")
)
else:
# load depth map
house, img_id = dataset_dict[str(i)]["image_id"].split("_", 1)
depth_path = os.path.join(
"/Pool1/users/jinlinyi/dataset/mp3d_rpnet_v4_sep20/observations",
house,
img_id + ".pkl",
)
with open(depth_path, "rb") as f:
obs = pickle.load(f)
# This assertion is to check dataset is clean
# assert((obs['color_sensor'][:,:,:3][:,:,::-1].transpose(2, 0, 1)-dataset_dict[str(i)]["image"].numpy()).sum()==0)
depth = obs["depth_sensor"]
dataset_dict[str(i)]["depth"] = torch.as_tensor(
depth.astype("float32")
)
if self.camera_on:
relative_pose = dataset_dict["rel_pose"]
x, y, z = relative_pose["position"]
w, xi, yi, zi = relative_pose["rotation"]
dataset_dict["rel_pose"]["tran_cls"] = torch.LongTensor(
self.xyz2class(x, y, z)
)
dataset_dict["rel_pose"]["rot_cls"] = torch.LongTensor(
self.quat2class(w, xi, yi, zi)
)
if not self.is_train and not self._eval_gt_box:
return dataset_dict
if not self._eval_gt_box:
for i in range(2):
if "annotations" in dataset_dict[str(i)]:
annos = [
self.transform_annotations(obj)
for obj in dataset_dict[str(i)].pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# Should not be empty during training
instances = annotations_to_instances(annos, image_shape)
dataset_dict[str(i)]["instances"] = instances[
instances.gt_boxes.nonempty()
]
else:
for i in range(2):
if "annotations" in dataset_dict[str(i)]:
annos = [
self.transform_annotations(obj)
for obj in dataset_dict[str(i)]["annotations"]
if obj.get("iscrowd", 0) == 0
]
# Should not be empty during training
instances = annotations_to_instances(annos, image_shape)
dataset_dict[str(i)]["instances"] = instances[
instances.gt_boxes.nonempty()
]
return dataset_dict
def transform_annotations(self, annotation, transforms=None, image_size=None):
"""
Apply image transformations to the annotations.
After this method, the box mode will be set to XYXY_ABS.
"""
return annotation
def xyz2class(self, x, y, z):
return self.kmeans_trans.predict([[x, y, z]])
def quat2class(self, w, xi, yi, zi):
return self.kmeans_rots.predict([[w, xi, yi, zi]])
def class2xyz(self, cls):
assert (cls >= 0).all() and (cls < self.kmeans_trans.n_clusters).all()
return self.kmeans_trans.cluster_centers_[cls]
def class2quat(self, cls):
assert (cls >= 0).all() and (cls < self.kmeans_rots.n_clusters).all()
return self.kmeans_rots.cluster_centers_[cls]
| 2.46875 | 2 |
tests/test_models.py | ivoire/KissCache | 0 | 12788355 | <filename>tests/test_models.py<gh_stars>0
# -*- coding: utf-8 -*-
# vim: set ts=4
#
# Copyright 2019 Linaro Limited
#
# Author: <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import pytest
import time
from kiss_cache.models import Resource
def test_resource_parse_ttl():
assert Resource.parse_ttl("1d") == 60 * 60 * 24
assert Resource.parse_ttl("18d") == 60 * 60 * 24 * 18
assert Resource.parse_ttl("5h") == 60 * 60 * 5
assert Resource.parse_ttl("34m") == 60 * 34
assert Resource.parse_ttl("500s") == 500
assert Resource.parse_ttl("42s") == 42
with pytest.raises(NotImplementedError, match="Unknown TTL value"):
Resource.parse_ttl("42t")
with pytest.raises(Exception, match="The TTL should be positive"):
Resource.parse_ttl("-1s")
def test_resource_path(db):
res = Resource.objects.create(url="https://example.com/kernel")
assert (
res.path == "76/66828e5a43fe3e8c06c2e62ad216cc354c91da92f093d6d8a7c3dc9d1baa82"
)
def test_resource_total_size(db):
assert Resource.total_size() == 0
Resource.objects.create(url="http://example.com", content_length=4212)
Resource.objects.create(url="http://example.org", content_length=5379)
Resource.objects.create(url="http://example.net", content_length=2)
assert Resource.total_size() == 4212 + 5379 + 2
def test_resource_total_size(db, settings):
settings.RESOURCE_QUOTA = 12
assert Resource.is_over_quota() is False
Resource.objects.create(url="http://example.com", content_length=4212)
Resource.objects.create(url="http://example.org", content_length=5379)
Resource.objects.create(url="http://example.net", content_length=2)
assert Resource.is_over_quota() is True
settings.RESOURCE_QUOTA = 4212 + 5379 + 2 - 1
assert Resource.is_over_quota() is True
settings.RESOURCE_QUOTA = 4212 + 5379 + 2 + 1
assert Resource.is_over_quota() is False
settings.RESOURCE_QUOTA = 0
assert Resource.is_over_quota() is False
def test_resource_progress(db, settings, tmpdir):
settings.DOWNLOAD_PATH = str(tmpdir)
res = Resource.objects.create(url="https://example.com/kernel")
assert res.progress() == "??"
res.content_length = 56
assert res.progress() == 0
(tmpdir / "76").mkdir()
(
tmpdir / "76/66828e5a43fe3e8c06c2e62ad216cc354c91da92f093d6d8a7c3dc9d1baa82"
).write_text("hello", encoding="utf-8")
assert res.progress() == 8
def test_resource_stream(db, monkeypatch, settings, tmpdir):
monkeypatch.setattr(time, "sleep", lambda d: d)
settings.DOWNLOAD_PATH = str(tmpdir)
res = Resource.objects.create(url="https://example.com/kernel")
(tmpdir / "76").mkdir()
with (
tmpdir / "76/66828e5a43fe3e8c06c2e62ad216cc354c91da92f093d6d8a7c3dc9d1baa82"
).open("wb") as f_out:
f_out.write(b"hello")
f_out.flush()
it = res.stream()
assert next(it) == b"hello"
f_out.write(b" world")
f_out.flush()
assert next(it) == b" world"
res.status_code = 200
res.state = Resource.STATE_FINISHED
res.save()
f_out.write(b"!")
f_out.flush()
assert next(it) == b"!"
with pytest.raises(StopIteration):
next(it)
def test_resource_stream_errors(db, monkeypatch, settings, tmpdir):
monkeypatch.setattr(time, "sleep", lambda d: d)
settings.DOWNLOAD_PATH = str(tmpdir)
res = Resource.objects.create(url="https://example.com/kernel", content_length=11)
(tmpdir / "76").mkdir()
with (
tmpdir / "76/66828e5a43fe3e8c06c2e62ad216cc354c91da92f093d6d8a7c3dc9d1baa82"
).open("wb") as f_out:
f_out.write(b"hello")
f_out.flush()
it = res.stream()
assert next(it) == b"hello"
f_out.write(b" world")
f_out.flush()
assert next(it) == b" world"
res.status_code = 403
res.state = Resource.STATE_FINISHED
res.save()
# The length is right: no exception will be raised
with pytest.raises(StopIteration):
next(it)
def test_resource_stream_errors_2(db, monkeypatch, settings, tmpdir):
monkeypatch.setattr(time, "sleep", lambda d: d)
settings.DOWNLOAD_PATH = str(tmpdir)
res = Resource.objects.create(url="https://example.com/kernel", content_length=13)
(tmpdir / "76").mkdir()
with (
tmpdir / "76/66828e5a43fe3e8c06c2e62ad216cc354c91da92f093d6d8a7c3dc9d1baa82"
).open("wb") as f_out:
f_out.write(b"hello")
f_out.flush()
it = res.stream()
assert next(it) == b"hello"
f_out.write(b" world")
f_out.flush()
assert next(it) == b" world"
res.status_code = 403
res.state = Resource.STATE_FINISHED
res.save()
# The length is wrong: an exception should be raised
with pytest.raises(Exception, match="Resource length streamed is wrong: 11 vs 13"):
next(it)
def test_resource_stream_errors_3(db, monkeypatch, settings, tmpdir):
monkeypatch.setattr(time, "sleep", lambda d: d)
settings.DOWNLOAD_PATH = str(tmpdir)
res = Resource.objects.create(url="https://example.com/kernel")
(tmpdir / "76").mkdir()
with (
tmpdir / "76/66828e5a43fe3e8c06c2e62ad216cc354c91da92f093d6d8a7c3dc9d1baa82"
).open("wb") as f_out:
f_out.write(b"hello")
f_out.flush()
it = res.stream()
assert next(it) == b"hello"
f_out.write(b" world")
f_out.flush()
assert next(it) == b" world"
res.delete()
# the length is unknown: an exception should be raised
with pytest.raises(Exception, match="Resource was deleted and length is unknown"):
next(it)
def test_resource_stream_errors_4(db, monkeypatch, settings, tmpdir):
monkeypatch.setattr(time, "sleep", lambda d: d)
settings.DOWNLOAD_PATH = str(tmpdir)
res = Resource.objects.create(url="https://example.com/kernel", content_length=13)
(tmpdir / "76").mkdir()
with (
tmpdir / "76/66828e5a43fe3e8c06c2e62ad216cc354c91da92f093d6d8a7c3dc9d1baa82"
).open("wb") as f_out:
f_out.write(b"hello")
f_out.flush()
it = res.stream()
assert next(it) == b"hello"
f_out.write(b" world")
f_out.flush()
assert next(it) == b" world"
res.delete()
# the length is unknown: an exception should be raised
with pytest.raises(
Exception, match="Resource was deleted and streamed length is wrong: 11 vs 13"
):
next(it)
def test_resource_stream_errors_5(db, monkeypatch, settings, tmpdir):
monkeypatch.setattr(time, "sleep", lambda d: d)
settings.DOWNLOAD_PATH = str(tmpdir)
res = Resource.objects.create(url="https://example.com/kernel", content_length=11)
(tmpdir / "76").mkdir()
with (
tmpdir / "76/66828e5a43fe3e8c06c2e62ad216cc354c91da92f093d6d8a7c3dc9d1baa82"
).open("wb") as f_out:
f_out.write(b"hello")
f_out.flush()
it = res.stream()
assert next(it) == b"hello"
f_out.write(b" world")
f_out.flush()
assert next(it) == b" world"
res.delete()
# the length is good: no exception should be raised
with pytest.raises(StopIteration):
next(it)
| 2.234375 | 2 |
screenpy/actions/see.py | perrygoy/screenpy | 39 | 12788356 | <reponame>perrygoy/screenpy<filename>screenpy/actions/see.py<gh_stars>10-100
"""
Make an assertion using a Question and a Resolution.
"""
from typing import Any, Union
from hamcrest import assert_that
from screenpy import Actor
from screenpy.pacing import beat
from screenpy.protocols import Answerable
from screenpy.resolutions import BaseResolution
from screenpy.speech_tools import get_additive_description
class See:
"""See if a value or the answer to a Question matches the expected answer.
This is a very important Action in ScreenPy. It is the way to perform
test assertions. See the |Question| and |Resolution| documentation.
Examples::
the_actor.should(
See(TheText.of_the(WELCOME_MESSAGE), ReadsExactly("Welcome!")),
See.the(Number.of(BALLOONS), IsEqualTo(3)),
See.the(list_of_items, ContainsTheItem("juice extractor")),
)
"""
@staticmethod
def the(question: Union[Answerable, Any], resolution: BaseResolution) -> "See":
"""Supply the |Question| and |Resolution| to assert."""
return See(question, resolution)
def describe(self) -> str:
"""Describe the Action in present tense."""
return f"See if {self.question_to_log} is {self.resolution_to_log}."
@beat("{} sees if {question_to_log} is {resolution_to_log}.")
def perform_as(self, the_actor: Actor) -> None:
"""Direct the Actor to make an observation."""
if hasattr(self.question, "answered_by"):
value = self.question.answered_by(the_actor)
else:
# must be a value instead of a question!
value = self.question
assert_that(value, self.resolution)
def __init__(
self, question: Union[Answerable, Any], resolution: BaseResolution
) -> None:
self.question = question
self.question_to_log = get_additive_description(question)
self.resolution = resolution
self.resolution_to_log = resolution.get_line()
| 2.890625 | 3 |
src/files_sort_out/files_sort_out.py | AndriiOshtuk/files_sort_out | 0 | 12788357 | <gh_stars>0
import shelve
import shutil
import time
from pathlib import Path
import click
db_filename = "files_sort_out.db"
@click.group()
def cli() -> None:
"""File Sort a tool to organize images on a path.
To get started, run collect:
$ files_sort_out collect
To show collected image folders:
$ files_sort_out show
To remove(exclude) directories from list run:
$ files_sort_out exclude <path>
Then copy files to a new location:
$ files_sort_out copy <path>
Or move files to a new location:
$ files_sort_out move <path>
To find files duplicates run:
$ files_sort_out duplicate <path>
"""
pass
@cli.command()
@click.argument("root", type=click.Path(resolve_path=True),)
@click.option(
"-p/-n",
"--print/--no-print",
default=True,
help="Print all collected folders",
show_default=True,
)
def collect(root: str, print: bool) -> None:
"""Collect folders where images make up 80% of files.
ROOT is directory to search in
"""
path = Path(root)
image_dirs = []
start_time = time.time()
with click.progressbar(path.glob("**"), label="Searching:") as bar:
for d in bar:
if d.is_dir():
total_files = 0
image_files = 0
for f in d.glob("*"):
if f.is_file():
total_files += 1
if f.suffix in (".jpeg", ".jpg", ".bmp", ".png", ".gif", ".tiff"):
image_files += 1
if total_files and image_files/total_files >= 0.8:
image_dirs.append(d)
elapsed_time = time.time() - start_time
click.echo(f"Found {len(image_dirs)} directories in {elapsed_time:.2f}s:")
with shelve.open(db_filename) as db:
db["image_dirs"] = image_dirs
db["root_dir"] = path
if print:
for d in image_dirs:
click.echo(str(d))
@cli.command()
@click.argument("dest", default=False, type=click.STRING)
def copy(dest: str) -> None:
"""Copy all or selected folders to DEST"""
with shelve.open(db_filename, flag='r') as db:
image_dirs = db['image_dirs']
if not image_dirs:
click.echo("No image folders are found.\nRun collect command first")
return
start_time = time.time()
for n, d in enumerate(image_dirs):
to_dir = Path(dest) / f"0{n}_{d.name}"
to_dir.mkdir(parents=True)
for file in d.iterdir():
if file.is_file():
to_file = to_dir / file.name
shutil.copy(file, to_file)
elapsed_time = time.time() - start_time
click.echo(f"Copied {n+1}directories. Time:{elapsed_time:.2f}s.")
@cli.command()
def list() -> None:
pass
@cli.command()
def show() -> None:
pass
@cli.command()
def exclude() -> None:
pass
@cli.command()
def move() -> None:
pass
@cli.command()
def duplicates() -> None:
pass
if __name__ == "__main__":
cli() | 3.03125 | 3 |
MatOp.py | ElliotHYLee/MyPyTorchAPI | 0 | 12788358 | <gh_stars>0
import torch
import torch.nn as nn
import numpy as np
class BatchScalar33MatMul(nn.Module):
def __init__(self):
super().__init__()
def forward(self, scalar, mat):
s = scalar.unsqueeze(2)
s = s.expand_as(mat)
return s*mat
class GetIdentity(nn.Module):
def __init__(self):
super().__init__()
def forward(self, bn):
I = torch.eye(3, dtype=torch.float)
if torch.cuda.is_available():
I = I.cuda()
I = I.reshape((1, 3, 3))
I = I.repeat(bn, 1, 1)
return I
class Batch33MatVec3Mul(nn.Module):
def __init(self):
super().__init__()
def forward(self, mat, vec):
vec = vec.unsqueeze(2)
result = torch.matmul(mat, vec)
return result.squeeze(2)
class GetSkew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, dw):
bn = dw.shape[0]
skew = torch.zeros((bn, 3, 3), dtype=torch.float)
if torch.cuda.is_available():
skew = skew.cuda()
skew[:, 0, 1] = -dw[:,2]
skew[:, 0, 2] = dw[:,1]
skew[:, 1, 2] = -dw[:,0]
skew[:, 1, 0] = dw[:, 2]
skew[:, 2, 0] = -dw[:, 1]
skew[:, 2, 1] = dw[:, 0]
return skew
class GetCovMatFromChol(nn.Module):
def __init__(self):
super().__init__()
def forward(self, chol_cov):
bn = chol_cov.shape[0]
L = torch.zeros(bn, 3, 3, dtype=torch.float)
LT = torch.zeros(bn, 3, 3, dtype=torch.float)
if torch.cuda.is_available():
L = L.cuda()
LT = LT.cuda()
index = 0
for j in range(0, 3):
for i in range(0, j + 1):
L[:, j, i] = chol_cov[:, index]
LT[:, i, j] = chol_cov[:, index]
index += 1
Q = torch.matmul(L, LT)
return Q
class GetCovMatFromChol_Sequence(nn.Module):
def __init__(self, seq_len):
super().__init__()
self.seq_len = seq_len
def forward(self, chol_cov):
bn = chol_cov.shape[0]
L = torch.zeros(bn, self.seq_len, 3, 3, dtype=torch.float)
LT = torch.zeros(bn, self.seq_len, 3, 3, dtype=torch.float)
if torch.cuda.is_available():
L = L.cuda()
LT = LT.cuda()
index = 0
for j in range(0, 3):
for i in range(0, j + 1):
L[:, :, j, i] = chol_cov[:, :, index]
LT[:, :, i, j] = chol_cov[:, :, index]
index += 1
Q = torch.matmul(L, LT)
return Q
if __name__ == '__main__':
mat1 = np.array([[[1, 2, 3], [4, 1, 6], [7, 8, 1]],
[[1, 12, 13], [14, 1, 16], [17, 18, 1]]], dtype=np.float32)
mat2 = -np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[11, 12, 13], [14, 15, 16], [17, 18, 19]]], dtype=np.float32)
mat1 = torch.from_numpy(mat1).cuda()
mat2 = torch.from_numpy(mat2).cuda()
print(mat1)
print(mat1.shape)
# print(torch.transpose(mat1, dim0=2, dim1=1))
invMat1 = torch.inverse(mat1[:,])
print(invMat1) | 2.28125 | 2 |
test/unit/hsts_support_test.py | grauwoelfchen/pyramid_secure_response | 2 | 12788359 | <reponame>grauwoelfchen/pyramid_secure_response
import pytest
from pyramid_secure_response.hsts_support import tween
@pytest.fixture(autouse=True)
def setup():
import logging
from pyramid_secure_response.hsts_support import logger
logger.setLevel(logging.ERROR)
@pytest.mark.parametrize('max_age,include_subdomains,preload,header', [
('3600', True, True, 'max-age=3600; includeSubDomains; preload'),
('1800', True, False, 'max-age=1800; includeSubDomains'),
('900', False, False, 'max-age=900'),
])
def test_build_hsts_header(max_age, include_subdomains, preload, header):
from collections import namedtuple
from pyramid_secure_response.hsts_support import build_hsts_header
Config = namedtuple('hsts_support', ( # pylint: disable=invalid-name
'max_age',
'include_subdomains',
'preload',
))
hsts_support = Config(max_age, include_subdomains, preload)
assert header == build_hsts_header(hsts_support)
def test_hsts_support_tween_with_disabled(mocker, dummy_request):
mocker.patch('pyramid_secure_response.hsts_support.apply_path_filter',
return_value=True)
mocker.patch('pyramid_secure_response.hsts_support.build_criteria',
return_value=[])
from pyramid.response import Response
from pyramid_secure_response.hsts_support import (
apply_path_filter,
build_criteria,
)
dummy_request.registry.settings = {
'pyramid_secure_response.hsts_support.enabled': 'False'
}
handler_stub = mocker.stub(name='handler_stub')
handler_stub.return_value = Response(status=200)
hsts_support_tween = tween(handler_stub, dummy_request.registry)
res = hsts_support_tween(dummy_request)
# pylint: disable=no-member
assert 1 == handler_stub.call_count
assert 0 == apply_path_filter.call_count
assert 0 == build_criteria.call_count
assert 'Strict-Transport-Security' not in res.headers
def test_hsts_support_tween_with_ignored_path(mocker, dummy_request):
mocker.patch('pyramid_secure_response.hsts_support.apply_path_filter',
return_value=True)
mocker.patch('pyramid_secure_response.hsts_support.build_criteria',
return_value=[])
from pyramid.response import Response
from pyramid_secure_response.hsts_support import (
apply_path_filter,
build_criteria,
)
dummy_request.path = '/humans.txt'
dummy_request.registry.settings = {
'pyramid_secure_response.hsts_support.enabled': 'True',
'pyramid_secure_response.hsts_support.ignore_paths': '\n/humans.txt\n'
}
handler_stub = mocker.stub(name='handler_stub')
handler_stub.return_value = Response(status=200)
hsts_support_tween = tween(handler_stub, dummy_request.registry)
res = hsts_support_tween(dummy_request)
# pylint: disable=no-member
assert 1 == handler_stub.call_count
assert 1 == apply_path_filter.call_count
apply_path_filter.assert_called_once_with(
dummy_request, ('/humans.txt',))
assert 0 == build_criteria.call_count
assert 'Strict-Transport-Security' not in res.headers
def test_hsts_tween_with_none_ssl_request(mocker, dummy_request):
from pyramid_secure_response import hsts_support
mocker.spy(hsts_support, 'apply_path_filter')
mocker.spy(hsts_support, 'build_criteria')
from pyramid.response import Response
from pyramid_secure_response.hsts_support import (
apply_path_filter,
build_criteria,
)
from pyramid_secure_response.util import get_config
dummy_request.url = 'http://example.org/'
dummy_request.registry.settings = {
'pyramid_secure_response.hsts_support.enabled': 'True',
'pyramid_secure_response.hsts_support.max_age': '31536000',
'pyramid_secure_response.hsts_support.include_subdomains': 'True',
'pyramid_secure_response.hsts_support.preload': 'True',
'pyramid_secure_response.hsts_support.proto_header': '',
'pyramid_secure_response.hsts_support.ignore_paths': '\n',
}
handler_stub = mocker.stub(name='handler_stub')
handler_stub.return_value = Response(status=200)
hsts_support_tween = tween(handler_stub, dummy_request.registry)
res = hsts_support_tween(dummy_request)
# pylint: disable=no-member
assert 1 == handler_stub.call_count
assert 1 == apply_path_filter.call_count
apply_path_filter.assert_called_once_with(dummy_request, tuple())
assert 1 == build_criteria.call_count
config = get_config(dummy_request.registry)
build_criteria.assert_called_once_with(
dummy_request, proto_header=config.hsts_support.proto_header)
assert 'Strict-Transport-Security' not in res.headers
def test_hsts_tween_with_ssl_request_plus_none_ssl_extra_header(
mocker, dummy_request):
from pyramid_secure_response import hsts_support
mocker.spy(hsts_support, 'apply_path_filter')
mocker.spy(hsts_support, 'build_criteria')
from pyramid.response import Response
from pyramid_secure_response.hsts_support import (
apply_path_filter,
build_criteria,
)
from pyramid_secure_response.util import get_config
dummy_request.url = 'https://example.org/'
dummy_request.headers['X-Forwarded-Proto'] = 'http'
dummy_request.registry.settings = {
'pyramid_secure_response.hsts_support.enabled': 'True',
'pyramid_secure_response.hsts_support.max_age': '3600',
'pyramid_secure_response.hsts_support.include_subdomains': 'True',
'pyramid_secure_response.hsts_support.preload': 'True',
'pyramid_secure_response.hsts_support.proto_header':
'X-Forwarded-Proto',
'pyramid_secure_response.hsts_support.ignore_paths': '\n',
}
handler_stub = mocker.stub(name='handler_stub')
handler_stub.return_value = Response(status=200)
hsts_support_tween = tween(handler_stub, dummy_request.registry)
res = hsts_support_tween(dummy_request)
# pylint: disable=no-member
assert 1 == handler_stub.call_count
assert 1 == apply_path_filter.call_count
apply_path_filter.assert_called_once_with(dummy_request, tuple())
assert 1 == build_criteria.call_count
config = get_config(dummy_request.registry)
build_criteria.assert_called_once_with(
dummy_request, proto_header=config.hsts_support.proto_header)
assert 'Strict-Transport-Security' not in res.headers
def test_hsts_tween_with_ssl_request(mocker, dummy_request):
from pyramid_secure_response import hsts_support
mocker.spy(hsts_support, 'apply_path_filter')
mocker.spy(hsts_support, 'build_criteria')
from pyramid.response import Response
from pyramid_secure_response.hsts_support import (
apply_path_filter,
build_criteria,
)
from pyramid_secure_response.util import get_config
dummy_request.url = 'https://example.org/'
dummy_request.registry.settings = {
'pyramid_secure_response.hsts_support.enabled': 'True',
'pyramid_secure_response.hsts_support.max_age': '300', # 5 minutes.
'pyramid_secure_response.hsts_support.include_subdomains': 'True',
'pyramid_secure_response.hsts_support.preload': 'True',
'pyramid_secure_response.hsts_support.proto_header': '',
'pyramid_secure_response.hsts_support.ignore_paths': '\n',
}
handler_stub = mocker.stub(name='handler_stub')
handler_stub.return_value = Response(status=200)
hsts_support_tween = tween(handler_stub, dummy_request.registry)
res = hsts_support_tween(dummy_request)
# pylint: disable=no-member
assert 1 == handler_stub.call_count
assert 1 == apply_path_filter.call_count
apply_path_filter.assert_called_once_with(dummy_request, tuple())
assert 1 == build_criteria.call_count
config = get_config(dummy_request.registry)
build_criteria.assert_called_once_with(
dummy_request, proto_header=config.hsts_support.proto_header)
assert 'Strict-Transport-Security' in res.headers
assert 'max-age=300; includeSubDomains; preload' == \
res.headers['Strict-Transport-Security']
def test_hsts_tween_with_ssl_request_plus_extra_header_check(
mocker, dummy_request):
from pyramid_secure_response import hsts_support
mocker.spy(hsts_support, 'apply_path_filter')
mocker.spy(hsts_support, 'build_criteria')
from pyramid.response import Response
from pyramid_secure_response.hsts_support import (
apply_path_filter,
build_criteria,
)
from pyramid_secure_response.util import get_config
dummy_request.url = 'https://example.org/'
dummy_request.headers['X-Forwarded-Proto'] = 'https'
dummy_request.registry.settings = {
'pyramid_secure_response.hsts_support.enabled': 'True',
'pyramid_secure_response.hsts_support.max_age': '604800', # 1 week
'pyramid_secure_response.hsts_support.include_subdomains': 'True',
'pyramid_secure_response.hsts_support.preload': 'True',
'pyramid_secure_response.hsts_support.proto_header':
'X-Forwarded-Proto',
'pyramid_secure_response.hsts_support.ignore_paths': '\n',
}
handler_stub = mocker.stub(name='handler_stub')
handler_stub.return_value = Response(status=200)
hsts_support_tween = tween(handler_stub, dummy_request.registry)
res = hsts_support_tween(dummy_request)
# pylint: disable=no-member
assert 1 == handler_stub.call_count
assert 1 == apply_path_filter.call_count
apply_path_filter.assert_called_once_with(dummy_request, tuple())
assert 1 == build_criteria.call_count
config = get_config(dummy_request.registry)
build_criteria.assert_called_once_with(
dummy_request, proto_header=config.hsts_support.proto_header)
assert 'Strict-Transport-Security' in res.headers
assert 'max-age=604800; includeSubDomains; preload' == \
res.headers['Strict-Transport-Security']
| 1.945313 | 2 |
web/urls.py | odinje/yactff | 1 | 12788360 | <gh_stars>1-10
from django.urls import re_path, path
from django.contrib.auth import views as auth_views
from web import views
from django.conf import settings
from web.forms import LoginForm
LoginForm.signup_open = settings.SIGNUP_OPEN
urlpatterns = [
path("", views.page, name="index"),
path("challenges/", views.challenges, name="challenges"),
path("challenge/<int:id>", views.challenge, name="challenge"),
path("admin/challenge/action/add", views.challenge_add, name="challenge_add"),
path("admin/submission/action/remove/<int:id>", views.submission_remove, name="submission_remove"),
path("scoreboard/", views.scoreboard, name="scoreboard"),
path("scoreboard.json/", views.scoreboard_json, name="scoreboard_json"),
re_path(r"^page/(?P<path>\w+)/$", views.page, name="page"),
path("admin/page/action/add/", views.page_add, name="page_add"),
path("admin/page/action/remove/<int:id>", views.page_remove, name="page_remove"),
path("admin/game/pause", views.pause_game, name="pause_game"),
path("admin/game/export", views.export_game_csv, name="export_game_csv"),
path("login/", auth_views.login, {"template_name": "web/login.html", "authentication_form": LoginForm}, name='login'),
path("signup/", views.signup, name='signup'),
re_path(r"^user/activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$", views.user_activate, name="user_activate"),
path("user/logout", auth_views.logout, {'next_page': '/'}, name='logout'),
path("user/profile", views.user_profile, name="user_profile"),
path("user/password/reset", views.user_password_reset, name="user_password_reset"),
re_path(r"^user/password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$", views.user_password_reset_confirm, name="user_password_reset_confirm"),
path("admin/user/all", views.user_all, name="user_all"),
path("admin/user/<int:id>", views.user_show, name="user_show"),
path("team/profile", views.team_profile, name="team_profile"),
path("team/<int:id>", views.public_team_profile, name="public_team_profile"),
path("team/join", views.team_join, name="team_join"),
path("team/create", views.team_create, name="team_create")
]
#http://garmoncheg.blogspot.no/2012/07/django-resetting-passwords-with.html
| 1.84375 | 2 |
desktop/core/ext-py/future-0.16.0/docs/futureext.py | kokosing/hue | 908 | 12788361 | # -*- coding: utf-8 -*-
"""
Python-Future Documentation Extensions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for automatically documenting filters and tests.
Based on the Jinja2 documentation extensions.
:copyright: Copyright 2008 by <NAME>.
:license: BSD.
"""
import collections
import os
import re
import inspect
from itertools import islice
from types import BuiltinFunctionType
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import prepare_docstring
from sphinx.application import TemplateBridge
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
def parse_rst(state, content_offset, doc):
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(doc, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
class FutureStyle(Style):
title = 'Future Style'
default_style = ""
styles = {
Comment: 'italic #0B6A94', # was: #0066ff',
Comment.Preproc: 'noitalic #B11414',
Comment.Special: 'italic #505050',
Keyword: 'bold #D15E27',
Keyword.Type: '#D15E27',
Operator.Word: 'bold #B80000',
Name.Builtin: '#333333',
Name.Function: '#333333',
Name.Class: 'bold #333333',
Name.Namespace: 'bold #333333',
Name.Entity: 'bold #363636',
Name.Attribute: '#686868',
Name.Tag: 'bold #686868',
Name.Decorator: '#686868',
String: '#AA891C',
Number: '#444444',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
def setup(app):
pass
# uncomment for inline toc. links are broken unfortunately
##app.connect('doctree-resolved', inject_toc)
| 2.140625 | 2 |
src/code/data_analysis.py | ekholabs/pandas_tutorial | 0 | 12788362 | <reponame>ekholabs/pandas_tutorial
"""
Organisation: ekholabs
Author: <EMAIL>
"""
import matplotlib.pyplot as plt
from utils import dataset as ds
'''
Let's start with loading out 'gapminder' dataset.
'''
DF = ds.load_gapminder()
'''
Let's now do somple simple statistical analysis on the data.
'''
def mean_life_expectancy(plot = False):
average = DF.groupby('year')['lifeExp'].mean()
print("Average of life expectancy:", '\n', average, '\n')
if plot:
average.plot()
plt.show()
def life_exp_continent_percapt():
average = DF.groupby(['year', 'continent'])[['lifeExp', 'gdpPercap']].mean()
print("Average of life expectancy per continent and GDP percapta:", '\n', average, '\n')
def countries_per_continent():
nr_countries = DF.groupby('continent')['country'].nunique()
print('# of countries per continent:', '\n', nr_countries, '\n')
if __name__ == '__main__':
mean_life_expectancy()
life_exp_continent_percapt()
countries_per_continent() | 3.515625 | 4 |
footmark/ram/ram.py | konstantin-kornienko/footmark | 18 | 12788363 | """
Represents an VPC Security Group
"""
from footmark.ram.ramobject import TaggedRAMObject
class User(TaggedRAMObject):
def __init__(self, connection=None):
super(User, self).__init__(connection)
def __repr__(self):
return 'Ram:%s' % self.id
def __getattr__(self, name):
if name == 'name':
return self.user_name
if name == 'mobile_phone':
return self.phone
def __setattr__(self, name, value):
if name == 'user_name':
self.name = value
if name == 'user_id':
self.id = value
if name == 'mobile_phone':
self.phone = value
if name == 'user':
for k, v in value.items():
super(TaggedRAMObject, self).__setattr__(k, v)
super(TaggedRAMObject, self).__setattr__(name, value)
def read(self):
ram = {}
for name, value in list(self.__dict__.items()):
if name in ["connection", "region_id", "region", "request_id"]:
continue
if name == 'user':
for k, v in value.items():
ram[k] = v
setattr(self, k, v)
continue
ram[name] = value
return ram
def delete(self):
return self.connection.delete_user(UserName=self.name)
def update(self, **kwargs):
params = {}
if kwargs.get('new_user_name') and kwargs.get('new_user_name') != self.name:
params['new_user_name'] = kwargs.get('new_user_name')
if kwargs.get('mobile_phone') and kwargs.get('mobile_phone') != self.mobile_phone:
params['new_mobile_phone'] = kwargs.get('mobile_phone')
if kwargs.get('display_name') and kwargs.get('display_name') != self.display_name:
params['new_display_name'] = kwargs.get('display_name')
if kwargs.get('email') and kwargs.get('email') != self.email:
params['new_email'] = kwargs.get('email')
if kwargs.get('comments') and kwargs.get('comments') != self.comments:
params['new_comments'] = kwargs.get('comments')
if params:
params['user_name'] = self.name
params['new_user_name'] = kwargs.get('new_user_name') if kwargs.get('new_user_name') else params['user_name']
return self.connection.update_user(**params)
return None
class Profile(TaggedRAMObject):
def __init__(self, connection=None):
super(Profile, self).__init__(connection)
def __repr__(self):
return 'Profile:%s' % self.id
def __getattr__(self, name):
if name == 'name':
return self.user_name
def __setattr__(self, name, value):
if name == 'user_name':
self.name = value
super(TaggedRAMObject, self).__setattr__(name, value)
def get(self):
return self.connection.get_login_profile(user_name=self.user_name)
def read(self):
ram = {}
for name, value in list(self.__dict__.items()):
if name in ["connection", "region_id", "region", "request_id"]:
continue
if name == 'login_profile':
for key, value in value.items():
ram[key] = value
continue
ram[name] = value
return ram
def delete(self):
return self.connection.delete_login_profile(user_name=self.name)
def update(self, **kwargs):
if kwargs.get('password_reset_required') != self.login_profile['password_reset_required'] or kwargs.get('mfa_bind_required') != self.login_profile['mfabind_required'] or kwargs.get('new_password'):
if kwargs.get('new_password'):
kwargs['password'] = kwargs.get('new_password')
return self.connection.update_login_profile(**kwargs)
return False
class Access(TaggedRAMObject):
def __init__(self, connection=None):
super(Access, self).__init__(connection)
def __repr__(self):
return 'Access:%s' % self.id
def __getattr__(self, name):
pass
def __setattr__(self, name, value):
super(TaggedRAMObject, self).__setattr__(name, value)
def read(self):
access_key = {}
for name, value in list(self.__dict__.items()):
if name in ["connection", "region_id", "region", "request_id"]:
continue
if name == 'access_key':
for key, value in value.items():
access_key[key] = value
continue
access_key[name] = value
return access_key
class Group(TaggedRAMObject):
def __init__(self, connection=None):
super(Group, self).__init__(connection)
def __repr__(self):
return 'Group:%s' % self.id
def __getattr__(self, name):
pass
def __setattr__(self, name, value):
if name == 'group_name':
self.name = value
super(TaggedRAMObject, self).__setattr__(name, value)
def read(self):
group = {}
for name, value in list(self.__dict__.items()):
if name in ["connection", "region_id", "region", "request_id"]:
continue
group[name] = value
return group
def delete(self):
return self.connection.delete_group(group_name=self.name)
def update(self, comments=None, new_group_name=None):
params = {}
if comments and comments != self.comments:
params['new_comments'] = comments
if new_group_name and new_group_name != self.name:
params['new_group_name'] = new_group_name
if params:
params['group_name'] = self.name
return self.connection.update_group(**params)
return False
def add_user(self, user_name=None):
users = self.connection.list_users_for_group(group_name=self.name)
flag = False
for user in users:
if user.user_name == user_name:
flag = True
if not flag:
return self.connection.add_user_to_group(user_name=user_name, group_name=self.name)
return False
def remove_user(self, user_name=None):
users = self.connection.list_users_for_group(group_name=self.name)
flag = False
for user in users:
if user.user_name == user_name:
flag = True
if flag:
return self.connection.remove_user_from_group(user_name=user_name, group_name=self.name)
return False
class Role(TaggedRAMObject):
def __init__(self, connection=None):
super(Role, self).__init__(connection)
def __repr__(self):
return 'Role:%s' % self.id
def __getattr__(self, name):
if name == 'name':
return self.role_name
def __setattr__(self, name, value):
if name == 'role_name':
self.name = value
super(TaggedRAMObject, self).__setattr__(name, value)
def read(self):
role = {}
for name, value in list(self.__dict__.items()):
if name in ["connection", "region_id", "region", "request_id"]:
continue
if name == 'role':
for k, v in value.items():
role[k] = v
continue
role[name] = value
return role
def get(self):
return self.connection.get_role(role_name=self.name)
def delete(self):
return self.connection.delete_role(role_name=self.name)
def update_policy(self, policy=None):
params = {}
role_policy = self.connection.get_role(role_name=self.name).read()['assume_role_policy_document']
role_policy = role_policy.replace('\n', '').replace(' ', '')
policy = policy.replace('\n', '').replace(' ', '')
if policy and policy != role_policy:
params['new_assume_role_policy_document'] = policy
if params:
params['role_name'] = self.name
return self.connection.update_role(**params)
return False
class Policy(TaggedRAMObject):
def __init__(self, connection=None):
super(Policy, self).__init__(connection)
def __repr__(self):
return 'Policy:%s' % self.id
def __getattr__(self, name):
if name == 'name':
return self.policy_name
def __setattr__(self, name, value):
if name == 'policy_name':
self.name = value
super(TaggedRAMObject, self).__setattr__(name, value)
def read(self):
policy = {}
for name, value in list(self.__dict__.items()):
if name in ["connection", "region_id", "region", "request_id"]:
continue
if name == 'policy':
for k, v in value.items():
policy[k] = v
continue
if name == 'policy_name':
policy['name'] = value
# if name == 'policies':
# for k, v in value.items():
# policy[k] = v
# continue
policy[name] = value
return policy
# def get(self):
# return self.connection.get_role(role_name=self.name)
def delete(self):
return self.connection.delete_policy(policy_name=self.name)
def attach_policy_to_user(self, user_name=None, policy_type=None):
policy_user = self.connection.list_policies_for_user(user_name=user_name)
params = {'policy_name': self.name}
attach = True
for policy in policy_user:
if policy.name == self.name:
attach = False
if attach:
params['user_name'] = user_name
params['policy_type'] = policy_type
return self.connection.attach_policy_to_user(**params)
return False
def attach_policy_to_group(self, group_name=None, policy_type=None):
policy_group = self.connection.list_policies_for_group(group_name=group_name)
params = {'policy_name': self.name}
attach = True
for policy in policy_group:
if policy.name == self.name:
attach = False
if attach:
params['group_name'] = group_name
params['policy_type'] = policy_type
return self.connection.attach_policy_to_group(**params)
return False
def attach_policy_to_role(self, role_name=None, policy_type=None):
policy_role = self.connection.list_policies_for_role(role_name=role_name)
params = {'policy_name': self.name}
attach = True
for policy in policy_role:
if policy.name == self.name:
attach = False
if attach:
params['role_name'] = role_name
params['policy_type'] = policy_type
return self.connection.attach_policy_to_role(**params)
return False
def detach_policy_from_user(self, user_name=None, policy_type=None):
policy_user = self.connection.list_policies_for_user(user_name=user_name)
params = {'policy_name': self.name}
detach = False
for policy in policy_user:
if policy.name == self.name:
detach = True
if detach:
params['user_name'] = user_name
params['policy_type'] = policy_type
return self.connection.detach_policy_from_user(**params)
return False
def detach_policy_from_group(self, group_name=None, policy_type=None):
policy_group = self.connection.list_policies_for_group(group_name=group_name)
params = {'policy_name': self.name}
detach = False
for policy in policy_group:
if policy.name == self.name:
detach = True
if detach:
params['group_name'] = group_name
params['policy_type'] = policy_type
return self.connection.detach_policy_from_group(**params)
return False
def detach_policy_from_role(self, role_name=None, policy_type=None):
policy_role = self.connection.list_policies_for_role(role_name=role_name)
params = {'policy_name': self.name}
detach = False
for policy in policy_role:
if policy.name == self.name:
detach = True
if detach:
params['role_name'] = role_name
params['policy_type'] = policy_type
return self.connection.detach_policy_from_role(**params)
return False
| 2.4375 | 2 |
core/structures/structures.py | Boooru/BooruViu | 1 | 12788364 | from time import time
class CallController:
def __init__(self, max_call_interval):
self._max_call_interval = max_call_interval
self._last_call = time()
def __call__(self, function):
def wrapped(*args, **kwargs):
now = time()
if now - self._last_call > self._max_call_interval:
self._last_call = now
function(*args, **kwargs)
return wrapped
class Wrap:
__func: callable = None
__a = None
__b = None
def __init__(self, func, a, b):
self.__func = func
self.__a = a
self.__b = b
print("Building wrap: " + str(func))
def get(self):
return self.__func(self.__a, self.__b)
| 3.46875 | 3 |
tachi_local/bakaupdates/utils.py | MisaghM/Tachi-Local-Details | 1 | 12788365 | import re
import urllib.parse
from typing import Union
_scheme_regex = re.compile(r"^(?:https?)?$", flags=re.IGNORECASE | re.ASCII)
_netloc_regex = re.compile(r"^(?:www\.)?mangaupdates\.com$", flags=re.IGNORECASE | re.ASCII)
_query_regex = re.compile(r"id=(\d+)(?:&|$)", flags=re.IGNORECASE | re.ASCII)
def get_id_from_url(url: str) -> str:
if "//" not in url:
url = "//" + url
url_res = urllib.parse.urlparse(url)
if not _scheme_regex.match(url_res.scheme):
raise ValueError("URL is not valid.")
if not _netloc_regex.match(url_res.netloc):
raise ValueError("URL is not a valid mangaupdates.com link.")
if url_res.path != "/series.html":
raise ValueError("URL is not a manga series page.")
id_query = _query_regex.search(url_res.query)
if id_query is None:
raise ValueError("ID not found in the URL.")
return id_query.group(1)
def get_url_by_id(id_: Union[str, int]) -> str:
return "https://www.mangaupdates.com/series.html?id=" + str(id_)
| 2.9375 | 3 |
timing_tests/time_vertex_search.py | vinay-swamy/TALON | 0 | 12788366 | <reponame>vinay-swamy/TALON
import timeit
import sqlite3
import sys
sys.path.append("..")
import talonQ as talon
def run_vertex_queries(location_dict, n):
""" Run vertex query n times"""
for i in range(0,n):
talon.search_for_vertex_at_pos("chr1", 1, location_dict)
def run_timetest(cursor, n):
fn = lambda: run_vertex_queries(cursor, n)
out = "Time for %d query: %f seconds"
print(out % (n, timeit.timeit(fn, number = 1)))
def main():
#conn = sqlite3.connect("../qtests/scratch/toy.db")
conn = sqlite3.connect("../../Temp_TALON_database_experiments/unmodified_full_gencode_v24_12-20-18.db")
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
location_dict = talon.make_location_dict("hg38", cursor)
location_dict_size = int(sys.getsizeof(location_dict))/1000000
print("Size of location dict: %f MB" % location_dict_size)
print("------------------------------------------")
fn = lambda: run_vertex_queries(location_dict, n)
run_timetest(cursor, 1)
run_timetest(cursor, 10)
run_timetest(cursor, 100)
run_timetest(cursor, 1000)
run_timetest(cursor, 10000)
run_timetest(cursor, 100000)
conn.close()
if __name__ == '__main__':
main()
| 2.78125 | 3 |
xml_stream/data_types.py | dfilipp/xml_stream | 8 | 12788367 | """Module containing the data types needed for the module to work"""
from typing import Iterable, List, Dict, Union, Any
from xml.etree.ElementTree import Element
from ._utils import get_unique_and_repeated_sub_elements, \
get_xml_element_attributes_as_dict, add_common_sub_elements
def group_elements_by_tag(elements: List[Element]) -> Dict[str, List[Element]]:
"""Returns a dictionary with elements of the same tag grouped together in lists"""
unique_tags = set([sub_element.tag for sub_element in elements])
return {
tag: XmlListElement(filter((lambda x: x.tag == tag), elements))
for tag in unique_tags
}
def _convert_to_dict_or_str(elements_map: Dict[str, Element]) -> Dict[str, Union[str, Dict[Any, Any]]]:
"""Combines a dictionary of xml elements into a dictionary of dicts or str"""
return {
key: XmlDictElement(value) if value or value.items() else value.text
for key, value in elements_map.items()
}
class XmlListElement(list):
"""An XML List element"""
def __init__(self, items: Iterable):
super().__init__()
for item in items:
# items without SubElements return False as __nonzero__method is not defined on Element
if item:
unique_elements_map, repeated_elements = get_unique_and_repeated_sub_elements(item)
if len(repeated_elements) == 0:
# append a dict
self.append(XmlDictElement(item))
else:
# append a list
repeated_elements = add_common_sub_elements(
elements=repeated_elements, common_sub_elements=unique_elements_map.values())
self.append(XmlListElement(repeated_elements))
elif item.text:
# append a text/number
text = item.text.strip()
if text:
self.append(text)
class XmlDictElement(dict):
"""An XML dict element"""
def __init__(self, xml_element: Element):
super().__init__()
self.update(get_xml_element_attributes_as_dict(xml_element))
unique_root_elements_map, repeated_root_elements = get_unique_and_repeated_sub_elements(xml_element)
if len(repeated_root_elements) > 0:
repeated_root_elements = add_common_sub_elements(
elements=repeated_root_elements, common_sub_elements=unique_root_elements_map.values())
grouped_elements = group_elements_by_tag(elements=repeated_root_elements)
self.update(grouped_elements)
else:
for item in xml_element:
item_attributes_dict = get_xml_element_attributes_as_dict(item)
# if the item has sub elements
if item:
unique_elements_map, repeated_elements = get_unique_and_repeated_sub_elements(item)
if len(repeated_elements) == 0:
value = XmlDictElement(item)
else:
unique_elements_dict = _convert_to_dict_or_str(unique_elements_map)
value = {**unique_elements_dict, **group_elements_by_tag(elements=repeated_elements)}
value.update(item_attributes_dict)
# if item has attributes but no sub elements
elif len(item_attributes_dict) > 0:
if item.text:
item_attributes_dict['_value'] = item.text
value = item_attributes_dict
# if item has no attributes and no sub elements
else:
value = item.text
self.update({item.tag: value})
| 3.265625 | 3 |
aidapy/meta/__init__.py | drdavis/aidapy | 0 | 12788368 | from .meta import get_proc_gen
from .meta import get_dsids
from .meta import proc_gen_from_file
from .meta import sort_files_from_txt
from .meta import _dsid_table
from .meta import _systematic_trees
from .meta import _systematic_ud_prefixes
from .meta import _systematic_weights
from .meta import _systematic_singles
from .meta import _systematic_btag_weights
__ALL__ = [
'get_proc_gen',
'get_dsids',
'proc_gen_from_file',
'sort_files_from_txt'
]
| 1.09375 | 1 |
tests/unit/features/test_schemas.py | Flagsmith/flagsmith-engine | 4 | 12788369 | import pytest
from flag_engine.features.schemas import (
FeatureStateSchema,
MultivariateFeatureOptionSchema,
MultivariateFeatureStateValueSchema,
)
from flag_engine.utils.exceptions import InvalidPercentageAllocation
def test_can_load_multivariate_feature_option_dict_without_id_field():
MultivariateFeatureOptionSchema().load({"value": 1})
def test_can_load_multivariate_feature_state_value_without_id_field():
MultivariateFeatureStateValueSchema().load(
{
"multivariate_feature_option": {"value": 1},
"percentage_allocation": 10,
}
)
def test_dumping_fs_schema_raises_invalid_percentage_allocation_for_invalid_allocation():
# Given
data = {
"multivariate_feature_state_values": [
{"multivariate_feature_option": 12, "percentage_allocation": 100},
{"multivariate_feature_option": 9, "percentage_allocation": 80},
],
"feature_state_value": "value",
}
# Then
with pytest.raises(InvalidPercentageAllocation):
FeatureStateSchema().dump(data)
def test_dumping_fs_schema_works_for_valid_allocation():
# Given
data = {
"multivariate_feature_state_values": [
{"multivariate_feature_option": 12, "percentage_allocation": 20},
{"multivariate_feature_option": 9, "percentage_allocation": 80},
],
"feature_state_value": "value",
}
# Then
FeatureStateSchema().dump(data)
| 2.125 | 2 |
src/models/base.py | thavlik/machine-learning-portfolio | 1 | 12788370 | import torch
from torch import nn, Tensor
from torch.nn import functional as F
from abc import abstractmethod
from typing import List, Callable, Union, Any, TypeVar, Tuple
from .util import reparameterize
class BaseVAE(nn.Module):
def __init__(self,
name: str,
latent_dim: int) -> None:
super(BaseVAE, self).__init__()
self.name = name
self.latent_dim = latent_dim
@abstractmethod
def encode(self, input: Tensor) -> List[Tensor]:
raise NotImplementedError
@abstractmethod
def decode(self, input: Tensor, **kwargs) -> Any:
raise NotImplementedError
def get_sandwich_layers(self) -> List[nn.Module]:
raise NotImplementedError
@abstractmethod
def get_encoder(self) -> List[nn.Module]:
raise NotImplementedError
def forward(self, x: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(x)
z = reparameterize(mu, log_var)
y = self.decode(z, **kwargs)
return [y, x, mu, log_var, z]
def sample(self,
num_samples: int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
def loss_function(self,
recons: Tensor,
input: Tensor,
mu: Tensor,
log_var: Tensor,
z: Tensor,
objective: str = 'default',
beta: float = 1.0,
gamma: float = 1.0,
target_capacity: float = 25.0,
**kwargs) -> dict:
recons_loss = F.mse_loss(recons, input)
result = {'loss': recons_loss,
'Reconstruction_Loss': recons_loss}
kld_loss = torch.mean(-0.5 * torch.sum(1 +
log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
result['KLD_Loss'] = kld_loss
if objective == 'default':
# O.G. beta loss term applied directly to KLD
result['loss'] += beta * kld_loss
elif objective == 'controlled_capacity':
# Use controlled capacity increase from
# https://arxiv.org/pdf/1804.03599.pdf
capacity_loss = torch.abs(kld_loss - target_capacity)
result['Capacity_Loss'] = capacity_loss
result['loss'] += gamma * capacity_loss
else:
raise ValueError(f'unknown objective "{objective}"')
return result
| 2.4375 | 2 |
Project Code/windows_connector.py | nsusas3/SU19CSE299S08G02NSU | 0 | 12788371 | import socket
host = "192.168.0.103"
port_windows = 7899
while True:
try:
socket_windows = socket.socket()
socket_windows.connect((host, port_windows))
temp = socket_windows.recv(1024).decode()
if temp:
print(temp)
continue
except Exception:
continue
| 2.734375 | 3 |
app/main/atendendo.py | ATSTI/Flask-SocketIO-Chat | 0 | 12788372 | <filename>app/main/atendendo.py
#!/usr/bin/env python3
from flask import Flask, render_template
import sqlite3
from sistema_suporte import Conexao
app = Flask(__name__)
@app.route('/atendendo')
def lista_chamados(name=None):
c = Conexao()
chamadas = c.lista_chamadas
#movies = [dict(id=row[0], movie_name=row[1]) for row in cur.fetchall()]
#return render_template('test.html', chats=chamadas)
return render_template('atendendo.html', chats = suporte.query.all() )
"""
def connect_db():
return sqlite3.connect('example.db')
def init_db():
conn = connect_db()
c = conn.cursor()
try:
c.execute('create table movies (id int, name text, category text)')
c.execute('insert into movies (id, name, category) values (?, ?, ?)', (1, 'Alien', 'sci-fi'))
c.execute('insert into movies (id, name, category) values (?, ?, ?)', (2, 'Aliens', 'sci-fi'))
c.execute('insert into movies (id, name, category) values (?, ?, ?)', (3, 'Prometheus', 'sci-fi'))
except sqlite3.OperationalError as e:
assert 'table movies already exists' in str(e)
conn.commit()
conn.close()
def main():
init_db()
"""
| 3.046875 | 3 |
cogs/commands/mod/modactions.py | DiscordGIR/GIRRewrite | 0 | 12788373 | from datetime import datetime, timedelta, timezone
import discord
import humanize
from apscheduler.jobstores.base import ConflictingIdError
from data.model import Case
from data.services import guild_service, user_service
from discord import app_commands
from discord.ext import commands
from discord.utils import escape_markdown, escape_mentions
from utils import GIRContext, cfg, transform_context
from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly
from utils.mod import (add_ban_case, add_kick_case, notify_user,
prepare_editreason_log, prepare_liftwarn_log,
prepare_mute_log, prepare_removepoints_log,
prepare_unban_log, prepare_unmute_log,
submit_public_log, warn)
from utils.views import warn_autocomplete
from utils.views.confirm import SecondStaffConfirm
class ModActions(commands.Cog):
def __init__(self, bot):
self.bot = bot
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="warn a user")
@app_commands.describe(user="User to warn")
@app_commands.describe(points="Points to warn the user with")
@app_commands.describe(reason="Reason for warning")
@transform_context
async def warn(self, ctx: GIRContext, user: ModsAndAboveMemberOrUser, points: app_commands.Range[int, 1, 600], reason: str):
if points < 1: # can't warn for negative/0 points
raise commands.BadArgument(message="Points can't be lower than 1.")
await warn(ctx, target_member=user, mod=ctx.author, points=points, reason=reason)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Kick a user")
@app_commands.describe(member="User to kick")
@app_commands.describe(reason="Reason for kicking")
@transform_context
async def kick(self, ctx: GIRContext, member: ModsAndAboveMember, reason: str) -> None:
reason = escape_markdown(reason)
reason = escape_mentions(reason)
db_guild = guild_service.get_guild()
log = add_kick_case(target_member=member, mod=ctx.author, reason=reason, db_guild=db_guild)
await notify_user(member, f"You were kicked from {ctx.guild.name}", log)
await member.kick(reason=reason)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, member, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Kick a user")
@app_commands.describe(member="User to kick")
@transform_context
async def roblox(self, ctx: GIRContext, member: ModsAndAboveMember) -> None:
reason = "This Discord server is for iOS jailbreaking, not Roblox. Please join https://discord.gg/jailbreak instead, thank you!"
db_guild = guild_service.get_guild()
log = add_kick_case(target_member=member, mod=ctx.author, reason=reason, db_guild=db_guild)
await notify_user(member, f"You were kicked from {ctx.guild.name}", log)
await member.kick(reason=reason)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, member, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Mute a user")
@app_commands.describe(member="User to mute")
@app_commands.describe(duration="Duration of the mute (i.e 10m, 1h, 1d...)")
@app_commands.describe(reason="Reason for muting")
@transform_context
async def mute(self, ctx: GIRContext, member: ModsAndAboveMember, duration: Duration, reason: str = "No reason.") -> None:
reason = escape_markdown(reason)
reason = escape_mentions(reason)
now = datetime.now(tz=timezone.utc)
delta = duration
if delta is None:
raise commands.BadArgument("Please input a valid duration!")
if member.is_timed_out():
raise commands.BadArgument("This user is already muted.")
time = now + timedelta(seconds=delta)
if time > now + timedelta(days=14):
raise commands.BadArgument("Mutes can't be longer than 14 days!")
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="MUTE",
date=now,
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
case.until = time
case.punishment = humanize.naturaldelta(
time - now, minimum_unit="seconds")
try:
await member.timeout(time, reason=reason)
ctx.tasks.schedule_untimeout(member.id, time)
except ConflictingIdError:
raise commands.BadArgument(
"The database thinks this user is already muted.")
guild_service.inc_caseid()
user_service.add_case(member.id, case)
log = prepare_mute_log(ctx.author, member, case)
await ctx.respond_or_edit(embed=log, delete_after=10)
log.remove_author()
log.set_thumbnail(url=member.display_avatar)
dmed = await notify_user(member, f"You have been muted in {ctx.guild.name}", log)
await submit_public_log(ctx, db_guild, member, log, dmed)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Unmute a user")
@app_commands.describe(member="User to unmute")
@app_commands.describe(reason="Reason for unmuting")
@transform_context
async def unmute(self, ctx: GIRContext, member: ModsAndAboveMember, reason: str) -> None:
db_guild = guild_service.get_guild()
if not member.is_timed_out():
raise commands.BadArgument("This user is not muted.")
await member.edit(timed_out_until=None)
try:
ctx.tasks.cancel_unmute(member.id)
except Exception:
pass
case = Case(
_id=db_guild.case_id,
_type="UNMUTE",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
guild_service.inc_caseid()
user_service.add_case(member.id, case)
log = prepare_unmute_log(ctx.author, member, case)
await ctx.respond_or_edit(embed=log, delete_after=10)
dmed = await notify_user(member, f"You have been unmuted in {ctx.guild.name}", log)
await submit_public_log(ctx, db_guild, member, log, dmed)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Ban a user")
@app_commands.describe(user="User to ban")
@app_commands.describe(reason="Reason for banning")
@transform_context
async def ban(self, ctx: GIRContext, user: ModsAndAboveMemberOrUser, reason: str):
reason = escape_markdown(reason)
reason = escape_mentions(reason)
db_guild = guild_service.get_guild()
member_is_external = isinstance(user, discord.User)
# if the ID given is of a user who isn't in the guild, try to fetch the profile
if member_is_external:
if self.bot.ban_cache.is_banned(user.id):
raise commands.BadArgument("That user is already banned!")
self.bot.ban_cache.ban(user.id)
log = await add_ban_case(user, ctx.author, reason, db_guild)
if not member_is_external:
if cfg.ban_appeal_url is None:
await notify_user(user, f"You have been banned from {ctx.guild.name}", log)
else:
await notify_user(user, f"You have been banned from {ctx.guild.name}\n\nIf you would like to appeal your ban, please fill out this form: <{cfg.ban_appeal_url}>", log)
await user.ban(reason=reason)
else:
# hackban for user not currently in guild
await ctx.guild.ban(discord.Object(id=user.id))
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Ban a user anonymously")
@app_commands.describe(user="User to ban")
@app_commands.describe(reason="Reason for banning")
@transform_context
async def staffban(self, ctx: GIRContext, user: ModsAndAboveMemberOrUser, reason: str):
reason = escape_markdown(reason)
reason = escape_mentions(reason)
db_guild = guild_service.get_guild()
member_is_external = isinstance(user, discord.User)
# if the ID given is of a user who isn't in the guild, try to fetch the profile
if member_is_external:
if self.bot.ban_cache.is_banned(user.id):
raise commands.BadArgument("That user is already banned!")
confirm_embed = discord.Embed(description=f"{ctx.author.mention} wants to staff ban {user.mention} with reason `{reason}`. Another Moderator needs to click Yes to submit this ban.\n\nClicking Yes means this was discussed amongst the staff team and will hide the banning Moderator. This should not be used often.", color=discord.Color.blurple())
view = SecondStaffConfirm(ctx, ctx.author)
await ctx.respond_or_edit(view=view, embed=confirm_embed)
await view.wait()
if not view.value:
await ctx.send_warning(f"Cancelled staff banning {user.mention}.")
return
self.bot.ban_cache.ban(user.id)
log = await add_ban_case(user, ctx.author, reason, db_guild)
log.set_field_at(1, name="Mod", value=f"{ctx.guild.name} Staff")
if not member_is_external:
if cfg.ban_appeal_url is None:
await notify_user(user, f"You have been banned from {ctx.guild.name}", log)
else:
await notify_user(user, f"You have been banned from {ctx.guild.name}\n\nIf you would like to appeal your ban, please fill out this form: <{cfg.ban_appeal_url}>", log)
await user.ban(reason=reason)
else:
# hackban for user not currently in guild
await ctx.guild.ban(discord.Object(id=user.id))
await ctx.interaction.message.delete()
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Unban a user")
@app_commands.describe(user="User to unban")
@app_commands.describe(reason="Reason for unbanning")
@transform_context
async def unban(self, ctx: GIRContext, user: UserOnly, reason: str) -> None:
if ctx.guild.get_member(user.id) is not None:
raise commands.BadArgument(
"You can't unban someone already in the server!")
reason = escape_markdown(reason)
reason = escape_mentions(reason)
if not self.bot.ban_cache.is_banned(user.id):
raise commands.BadArgument("That user isn't banned!")
try:
await ctx.guild.unban(discord.Object(id=user.id), reason=reason)
except discord.NotFound:
raise commands.BadArgument(f"{user} is not banned.")
self.bot.ban_cache.unban(user.id)
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="UNBAN",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
guild_service.inc_caseid()
user_service.add_case(user.id, case)
log = prepare_unban_log(ctx.author, user, case)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Purge channel messages")
@app_commands.describe(amount="Number of messages to purge")
@transform_context
async def purge(self, ctx: GIRContext, amount: app_commands.Range[int, 1, 100]) -> None:
if amount <= 0:
raise commands.BadArgument(
"Number of messages to purge must be greater than 0")
elif amount >= 100:
amount = 100
msgs = [message async for message in ctx.channel.history(limit=amount)]
await ctx.channel.purge(limit=amount)
await ctx.send_success(f'Purged {len(msgs)} messages.', delete_after=10)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Marks a warn and lifted and removes points")
@app_commands.describe(member="Member to lift warn of")
@app_commands.describe(case_id="Case ID of the warn to lift")
@app_commands.autocomplete(case_id=warn_autocomplete)
@app_commands.describe(reason="Reason for lifting the warn")
@transform_context
async def liftwarn(self, ctx: GIRContext, member: ModsAndAboveMember, case_id: str, reason: str) -> None:
cases = user_service.get_cases(member.id)
case = cases.cases.filter(_id=case_id).first()
reason = escape_markdown(reason)
reason = escape_mentions(reason)
# sanity checks
if case is None:
raise commands.BadArgument(
message=f"{member} has no case with ID {case_id}")
elif case._type != "WARN":
raise commands.BadArgument(
message=f"{member}'s case with ID {case_id} is not a warn case.")
elif case.lifted:
raise commands.BadArgument(
message=f"Case with ID {case_id} already lifted.")
u = user_service.get_user(id=member.id)
if u.warn_points - int(case.punishment) < 0:
raise commands.BadArgument(
message=f"Can't lift Case #{case_id} because it would make {member.mention}'s points negative.")
# passed sanity checks, so update the case in DB
case.lifted = True
case.lifted_reason = reason
case.lifted_by_tag = str(ctx.author)
case.lifted_by_id = ctx.author.id
case.lifted_date = datetime.now()
cases.save()
# remove the warn points from the user in DB
user_service.inc_points(member.id, -1 * int(case.punishment))
dmed = True
# prepare log embed, send to #public-mod-logs, user, channel where invoked
log = prepare_liftwarn_log(ctx.author, member, case)
dmed = await notify_user(member, f"Your warn has been lifted in {ctx.guild}.", log)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, guild_service.get_guild(), member, log, dmed)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Edit case reason")
@app_commands.describe(member="Member to edit case of")
@app_commands.describe(case_id="Case ID of the case to edit")
@app_commands.autocomplete(case_id=warn_autocomplete)
@app_commands.describe(new_reason="New reason for the case")
@transform_context
async def editreason(self, ctx: GIRContext, member: ModsAndAboveMemberOrUser, case_id: str, new_reason: str) -> None:
# retrieve user's case with given ID
cases = user_service.get_cases(member.id)
case = cases.cases.filter(_id=case_id).first()
new_reason = escape_markdown(new_reason)
new_reason = escape_mentions(new_reason)
# sanity checks
if case is None:
raise commands.BadArgument(
message=f"{member} has no case with ID {case_id}")
old_reason = case.reason
case.reason = new_reason
case.date = datetime.now()
cases.save()
dmed = True
log = prepare_editreason_log(ctx.author, member, case, old_reason)
dmed = await notify_user(member, f"Your case was updated in {ctx.guild.name}.", log)
public_chan = ctx.guild.get_channel(
guild_service.get_guild().channel_public)
found = False
async for message in public_chan.history(limit=200):
if message.author.id != ctx.me.id:
continue
if len(message.embeds) == 0:
continue
embed = message.embeds[0]
if embed.footer.text is None:
continue
if len(embed.footer.text.split(" ")) < 2:
continue
if f"#{case_id}" == embed.footer.text.split(" ")[1]:
for i, field in enumerate(embed.fields):
if field.name == "Reason":
embed.set_field_at(
i, name="Reason", value=new_reason)
await message.edit(embed=embed)
found = True
if found:
await ctx.respond_or_edit(f"We updated the case and edited the embed in {public_chan.mention}.", embed=log, delete_after=10)
else:
await ctx.respond_or_edit(f"We updated the case but weren't able to find a corresponding message in {public_chan.mention}!", embed=log, delete_after=10)
log.remove_author()
log.set_thumbnail(url=member.display_avatar)
await public_chan.send(member.mention if not dmed else "", embed=log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Edit case reason")
@app_commands.describe(member="Member to remove points from")
@app_commands.describe(points="Amount of points to remove")
@app_commands.describe(reason="Reason for removing points")
@transform_context
async def removepoints(self, ctx: GIRContext, member: ModsAndAboveMember, points: app_commands.Range[int, 1, 600], reason: str) -> None:
reason = escape_markdown(reason)
reason = escape_mentions(reason)
if points < 1:
raise commands.BadArgument("Points can't be lower than 1.")
u = user_service.get_user(id=member.id)
if u.warn_points - points < 0:
raise commands.BadArgument(
message=f"Can't remove {points} points because it would make {member.mention}'s points negative.")
# passed sanity checks, so update the case in DB
# remove the warn points from the user in DB
user_service.inc_points(member.id, -1 * points)
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="REMOVEPOINTS",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
punishment=str(points),
reason=reason,
)
# increment DB's max case ID for next case
guild_service.inc_caseid()
# add case to db
user_service.add_case(member.id, case)
# prepare log embed, send to #public-mod-logs, user, channel where invoked
log = prepare_removepoints_log(ctx.author, member, case)
dmed = await notify_user(member, f"Your points were removed in {ctx.guild.name}.", log)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, member, log, dmed)
async def setup(bot):
await bot.add_cog(ModActions(bot))
| 2.109375 | 2 |
src/__init__.py | cynth-s/Information_retrieval | 191 | 12788374 | <reponame>cynth-s/Information_retrieval
__author__ = '<NAME>'
__all__ = ['invdx', 'parse', 'query', 'rank']
| 1.15625 | 1 |
ccat/controller/strategy/TODO_sample_strategy.py | bhaveshrawal/Python | 3 | 12788375 | '''
------------------------------------------------------------------------
MOMENTUM.PY
------------------------------------------------------------------------
If extreme, overtraded or wix issue long, or short signals, trigger the
executor
'''
'''
------------------------------------------------------------------------
IMPORTS
------------------------------------------------------------------------
'''
# Standard library imports
pass
# Third party imports
import pandas as pd
import numpy as np
# Local application imports
from ccat import wix
from ccat import overtraded
from ccat import extreme
# from ccat import height
# from ccat import ema
# from ccat import df_x_df
'''
------------------------------------------------------------------------
CLASSES
------------------------------------------------------------------------
'''
class Momentum:
def __init__(self,
df_bucket:pd.DataFrame,
len_ma_top_wix:int,
len_ma_bottom_wix:int,
len_ma_top_Extreme:int,
len_ma_bottom_Extreme:int,
len_rsi:int,
overbought:int,
oversold:int,
peak:int,
trough:int,
col:str = 'price_close'):
# Shared
self.df_bucket = df_bucket
# Wix
self.len_ma_top_wix = len_ma_top_wix
self.len_ma_bottom_wix = len_ma_bottom_wix
# Extreme
self.len_ma_top_Extreme = len_ma_top_Extreme
self.len_ma_bottom_Extreme = len_ma_bottom_Extreme
# Overtraded
self.len_rsi = len_rsi
self.overbought = overbought
self.oversold = oversold
self.peak = peak
self.trough = trough
self.col = col
def wixes(self):
'''Get Wix signal'''
w = wix.Wix(
df_bucket = self.df_bucket,
len_ma_top = self.len_ma_top_wix,
len_ma_bottom = self.len_ma_bottom_wix)
df_wix = w.get()
return df_wix
def extreme(self):
'''Get Extreme signal
'''
e = extreme.Extreme(
df_bucket = self.df_bucket,
len_ma_top = self.len_ma_top_Extreme,
len_ma_bottom = self.len_ma_bottom_Extreme)
df_extreme = e.get()
return df_extreme
def overtraded(self):
'''Get Overtraded signal
'''
o = overtraded.Overtraded(
df_bucket = self.df_bucket,
len_rsi = self.len_rsi,
overbought = self.overbought,
oversold = self.oversold,
peak = self.peak,
trough = self.trough,
col = self.col)
df_overtraded = o.get()
return df_overtraded
def merge(self):
''' Merges the top and bottom wick ema's into a df_out dataframe
'''
# Initialize df_out dataframe
self.df_out = pd.DataFrame()
# Read the individual signals used in the strategy
df_w = self.wixes()
df_o = self.overtraded()
df_e = self.extreme()
# Merge the three dataframes
# self.df_out = pd.merge(df_w, df_o, on='id')
# Merge the three dataframes
self.df_out = pd.merge(
pd.merge(
df_w,
df_o,
on='id'),
df_e,on='id')
cols = [
'signal_wix',
'signal_overtraded',
'signal_extreme']
# Compiled signal
self.df_out['signal'] = self.df_out[cols].sum(axis=1)
def signals(self):
'''Triggers the chain of methods and returns the df_out
dataframe
'''
self.merge()
return self.df_out
'''
------------------------------------------------------------------------
__MAIN__
------------------------------------------------------------------------
'''
if __name__ == '__main__':
from ccat import config as cnf
from ccat import bucket
# Create a momentum strategy for the 1d BTCUSD candles on Bitmex
# Settings
market_id = 1 # Bitmex
timeframe_id = 6 # 1d
time_end = cnf.now()
count = 500
len_ma_top_wix = 40
len_ma_bottom_wix = 40
len_ma_top_Extreme = 40
len_ma_bottom_Extreme = 40
len_rsi = 40
overbought = 60
oversold = 40
peak = 92
trough = 32
col = 'price_close'
# Get a bucket object from Bucket
b = bucket.Bucket(market_id=market_id, timeframe_id=timeframe_id)
# Update the table
b.update()
# Get a dataframe with all the data for the market and timeframe
df_bucket = b.read_until(count = count, time_end = time_end)
m = Momentum(
df_bucket = df_bucket,
len_ma_top_wix=len_ma_top_wix,
len_ma_bottom_wix=len_ma_bottom_wix,
len_ma_top_Extreme=len_ma_top_Extreme,
len_ma_bottom_Extreme=len_ma_bottom_Extreme,
len_rsi=len_rsi,
overbought=overbought,
oversold=oversold,
peak=peak,
trough=trough,
col=col)
df_signal = m.signals()
df_s = df_signal[['id', 'signal']]
df_b = df_bucket[['id', 'time_close', 'price_close']]
# print(df_s)
df_out = pd.merge(df_b, df_s, on='id')
print(df_out)
| 1.804688 | 2 |
arrayqueues/shared_arrays.py | portugueslab/arrayqueues | 27 | 12788376 | from datetime import datetime
from multiprocessing import Array
from queue import Empty, Full
# except AttributeError:
# from multiprocessing import Queue
import numpy as np
# try:
from arrayqueues.portable_queue import PortableQueue # as Queue
class ArrayView:
def __init__(self, array, max_bytes, dtype, el_shape, i_item=0):
self.dtype = dtype
self.el_shape = el_shape
self.nbytes_el = self.dtype.itemsize * np.product(self.el_shape)
self.n_items = int(np.floor(max_bytes / self.nbytes_el))
self.total_shape = (self.n_items,) + self.el_shape
self.i_item = i_item
self.view = np.frombuffer(array, dtype, np.product(self.total_shape)).reshape(
self.total_shape
)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.el_shape == other.el_shape and self.dtype == other.dtype
return False
def push(self, element):
self.view[self.i_item, ...] = element
i_inserted = self.i_item
self.i_item = (self.i_item + 1) % self.n_items
# a tuple is returned to maximise performance
return self.dtype, self.el_shape, i_inserted
def pop(self, i_item):
return self.view[i_item, ...]
def fits(self, item):
if isinstance(item, np.ndarray):
return item.dtype == self.dtype and item.shape == self.el_shape
return (
item[0] == self.dtype
and item[1] == self.el_shape
and item[2] < self.n_items
)
class ArrayQueue:
"""A drop-in replacement for the multiprocessing queue, usable
only for numpy arrays, which removes the need for pickling and
should provide higher speeds and lower memory usage
"""
def __init__(self, max_mbytes=10):
self.maxbytes = int(max_mbytes * 1000000)
self.array = Array("c", self.maxbytes)
self.view = None
self.queue = PortableQueue()
self.read_queue = PortableQueue()
self.last_item = 0
def check_full(self):
while True:
try:
self.last_item = self.read_queue.get(timeout=0.00001)
except Empty:
break
if self.view.i_item == self.last_item:
raise Full(
"Queue of length {} full when trying to insert {},"
" last item read was {}".format(
self.view.n_items, self.view.i_item, self.last_item
)
)
def put(self, element):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
self.last_item = 0
else:
self.check_full()
qitem = self.view.push(element)
self.queue.put(qitem)
def get(self, **kwargs):
aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return self.view.pop(aritem[2])
def clear(self):
"""Empties the queue without the need to read all the existing
elements
:return: nothing
"""
self.view = None
while True:
try:
_ = self.queue.get_nowait()
except Empty:
break
while True:
try:
_ = self.read_queue.get_nowait()
except Empty:
break
self.last_item = 0
def empty(self):
return self.queue.empty()
def qsize(self):
return self.queue.qsize()
class TimestampedArrayQueue(ArrayQueue):
"""A small extension to support timestamps saved alongside arrays"""
def put(self, element, timestamp=None):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
else:
self.check_full()
qitem = self.view.push(element)
if timestamp is None:
timestamp = datetime.now()
self.queue.put((timestamp, qitem))
def get(self, **kwargs):
timestamp, aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return timestamp, self.view.pop(aritem[2])
class IndexedArrayQueue(ArrayQueue):
"""A small extension to support timestamps saved alongside arrays"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counter = 0
def put(self, element, timestamp=None):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
else:
self.check_full()
qitem = self.view.push(element)
if timestamp is None:
timestamp = datetime.now()
self.queue.put((timestamp, self.counter, qitem))
self.counter += 1
def get(self, **kwargs):
timestamp, index, aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return timestamp, index, self.view.pop(aritem[2])
| 2.8125 | 3 |
python/edl/utils/error_utils.py | WEARE0/edl | 90 | 12788377 | <reponame>WEARE0/edl<filename>python/edl/utils/error_utils.py<gh_stars>10-100
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import time
from edl.utils import exceptions
from edl.utils.log_utils import logger
def handle_errors_until_timeout(f):
def handler(*args, **kwargs):
begin = time.time()
timeout = kwargs["timeout"]
while True:
try:
return f(*args, **kwargs)
except exceptions.EdlDataEndError:
raise exceptions.EdlDataEndError
except exceptions.EdlException as e:
if time.time() - begin >= timeout:
logger.warning("{} execute timeout:{}".format(f.__name__, timeout))
raise e
time.sleep(3)
continue
return functools.wraps(f)(handler)
| 2.265625 | 2 |
Hard/460.py | Hellofafar/Leetcode | 6 | 12788378 | # ------------------------------
# 460. LFU Cache
#
# Description:
# Design and implement a data structure for Least Frequently Used (LFU) cache. It should support the following operations: get and put.
# get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
# put(key, value) - Set or insert the value if the key is not already present. When the cache reaches its capacity, it should invalidate the least frequently used item before inserting a new item. For the purpose of this problem, when there is a tie (i.e., two or more keys that have the same frequency), the least recently used key would be evicted.
#
# Follow up:
# Could you do both operations in O(1) time complexity?
#
# Example:
# LFUCache cache = new LFUCache( 2 /* capacity */ );
#
# cache.put(1, 1);
# cache.put(2, 2);
# cache.get(1); // returns 1
# cache.put(3, 3); // evicts key 2
# cache.get(2); // returns -1 (not found)
# cache.get(3); // returns 3.
# cache.put(4, 4); // evicts key 1.
# cache.get(1); // returns -1 (not found)
# cache.get(3); // returns 3
# cache.get(4); // returns 4
#
# Version: 1.0
# 11/02/18 by Jianfa
# ------------------------------
from collections import OrderedDict
class LFUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.valueDict = {} # value of key
self.countDict = {} # count of key
self.frequencyDict = {} # {fre, OrderedDict} keys of every frequency number. OrderedDict can be sorted so use it to record recently used order
self.frequencyDict[1] = OrderedDict()
self.min = -1 # least frequency so far
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.valueDict:
return -1
count = self.countDict[key]
self.countDict[key] = count + 1
del self.frequencyDict[count][key] # remove key in previous frequencyDict[count]
if count == self.min and len(self.frequencyDict[count]) == 0: # If least frequency needs to add 1
self.min += 1
if count+1 not in self.frequencyDict:
self.frequencyDict[count+1] = OrderedDict()
self.frequencyDict[count+1][key] = 1 # {fre, {key:1}} add {key:1} to frequencyDict
return self.valueDict[key]
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.capacity <= 0:
return
if key in self.valueDict:
self.valueDict[key] = value
self.get(key) # Add frequency
return
if len(self.valueDict) >= self.capacity: # It's over capacity
leastFreq = self.frequencyDict[self.min].popitem(last=False)
self.valueDict.pop(leastFreq[0])
self.valueDict[key] = value # key is not in valueDict, so add it
self.countDict[key] = 1 # update countDict with {key:1}
self.min = 1 # least frequency becomes to 1 again
self.frequencyDict[self.min][key] = 1
# Your LFUCache object will be instantiated and called as such:
# obj = LFUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Follow idea from https://leetcode.com/problems/lfu-cache/discuss/94521/JAVA-O(1)-very-easy-solution-using-3-HashMaps-and-LinkedHashSet
# Used a data structure from collections.OrderedDict | 3.90625 | 4 |
weather/domain/objects.py | rerupp/weather | 1 | 12788379 | import sys
from calendar import monthrange
from contextlib import contextmanager
from csv import DictReader, DictWriter
from datetime import date, datetime, timedelta, MINYEAR
from enum import Enum
from gzip import GzipFile
from importlib.resources import open_binary as open_binary_package
from io import TextIOWrapper
from pathlib import Path
from re import compile as re_compile, IGNORECASE
from typing import Callable, Generator, IO, List, NamedTuple, Union
from urllib.parse import urlencode, urljoin
import requests
from weather.configuration import get_setting, get_logger
# bring the data package into scope
from weather.domain import data
log = get_logger(__name__)
DataPath = Union[str, Path]
DictionaryWriter = Callable[[dict], None]
class CsvDictWriter:
def __init__(self, fields: List[str]):
if not fields or len(fields) == 0:
raise ValueError("Dictionary fields are required...")
self._fields = fields.copy()
@property
def fields(self):
return self._fields.copy()
@contextmanager
def file_writer(self, data_path: DataPath) -> DictionaryWriter:
data_path = Path(data_path) if isinstance(data_path, str) else data_path
if not data_path.exists():
mode = "w"
elif not data_path.is_file():
raise ValueError("CSV filename exists and is not writable...")
else:
mode = "a"
with data_path.open(mode) as fp:
dict_writer = self._get_dict_writer(fp, mode == "w")
yield lambda d: dict_writer.writerow(d)
@contextmanager
def stdout(self) -> DictionaryWriter:
dict_writer = self._get_dict_writer(sys.stdout, True)
yield lambda content: dict_writer.writerow(content)
def _get_dict_writer(self, fp: IO, include_headers: bool = False) -> DictWriter:
dict_writer = DictWriter(fp, fieldnames=self._fields, extrasaction='ignore')
if include_headers:
dict_writer.writeheader()
return dict_writer
class DateRange(NamedTuple('_DateRange', low=date, high=date)):
def __new__(cls, low: date, high: date = None):
if not low:
error = "{}: a low date is required.".format(cls.__name__)
raise ValueError(error)
if not high:
high = low
elif high < low:
error = "{}: high date ({}) cannot be less than low date ({}).".format(cls.__name__, high, low)
raise ValueError(error)
# It looks like there is an open issue with PyCharm, PY-39755, that falsely reports
# "unexpected arguments" when calling the super class.
# noinspection PyArgumentList
return super().__new__(cls, low, high)
def __str__(self):
return "{}(low={},high={})".format(self.__class__.__name__, self.low, self.high)
def __eq__(self, other) -> bool:
if isinstance(other, DateRange):
return self.low == other.low and self.high == other.high
raise NotImplemented
def __contains__(self, other) -> bool:
if isinstance(other, DateRange):
return self.low <= other.low and self.high >= other.high
def total_days(self) -> int:
return (self.high - self.low).days
def get_dates(self) -> Generator[date, None, None]:
if self.low == self.high:
yield self.low
else:
one_day = timedelta(days=1)
ts = self.low
while ts <= self.high:
yield ts
ts += one_day
def spans_years(self) -> bool:
return self.low.year < self.high.year
def as_neutral_date_range(self) -> 'DateRange':
def neutral_day(_date) -> int:
if 2 != _date.month:
is_leap_day = False
else:
is_leap_day = (29 == _date.day)
# MINYEAR and the following year are not leap years
return 28 if is_leap_day else _date.day
low = date(MINYEAR, self.low.month, neutral_day(self.low))
high = date(MINYEAR + 1 if self.spans_years() else MINYEAR, self.high.month, neutral_day(self.high))
return DateRange(low, high)
def with_month_offset(self, low_months: int, high_month: int) -> 'DateRange':
pass
def with_low_month_offset(self, months: int) -> 'DateRange':
pass
def with_high_month_offset(self, months: int) -> 'DateRange':
pass
@staticmethod
def _days_in_month(year: int, month: int):
return monthrange(year, month)[1]
class Location(NamedTuple):
name: str
alias: str
longitude: str
latitude: str
tz: str
def __eq__(self, other):
"""In weather data the location is identified by name and alias which allows this to work."""
if isinstance(other, Location):
return (self.name, other.name) == (self.alias, other.alias)
raise NotImplemented
def __hash__(self):
"""Since equality is base on name and alias this will work for a hash identifier."""
return hash((self.name, self.alias))
def __ne__(self, other):
"""Be explicit as to what not equal to means."""
return not self.__eq__(other)
def __repr__(self) -> str:
return "(name='{}', alias={}, longitude={}, latitude={}, tz={})" \
.format(self.name, self.alias, self.longitude, self.latitude, self.tz)
def is_name(self, name: str, case_sensitive=False) -> bool:
return name == self.name if case_sensitive else name.casefold() == self.name.casefold()
def is_alias(self, alias: str, case_sensitive=False) -> bool:
return alias == self.alias if case_sensitive else alias.casefold() == self.alias.casefold()
def is_considered(self, value: str) -> bool:
return self.is_name(value) or self.is_alias(value)
class Field(Enum):
NAME = "name"
LONGITUDE = "longitude"
LATITUDE = "latitude"
ALIAS = "alias"
TZ = "tz"
def to_dict(self) -> dict:
return {
Location.Field.NAME.value: self.name,
Location.Field.ALIAS.value: self.alias.casefold() if self.alias else self.alias,
Location.Field.LONGITUDE.value: self.longitude,
Location.Field.LATITUDE.value: self.latitude,
Location.Field.TZ.value: self.tz
}
@staticmethod
def from_dict(dictionary: dict) -> 'Location':
def get_field(field_: Location.Field) -> str:
data_ = dictionary.get(field_.value)
if not data_:
raise ValueError("The location {} is required.".format(field_.value))
return str(data_)
return Location(name=get_field(Location.Field.NAME),
alias=get_field(Location.Field.ALIAS).casefold(),
longitude=get_field(Location.Field.LONGITUDE),
latitude=get_field(Location.Field.LATITUDE),
tz=get_field(Location.Field.TZ))
class CityDB:
class Record(NamedTuple):
name: str
state: str
longitude: str
latitude: str
tz: str
zips: str
@staticmethod
def from_dict(db_row: dict) -> 'CityDB.Record':
return CityDB.Record(name=db_row["city"],
state=db_row["state"],
longitude=db_row["long"],
latitude=db_row["lat"],
tz=db_row["tz"],
zips=db_row["zips"])
def to_location(self) -> Location:
return Location(name="{}, {}".format(self.name, self.state),
alias="{} {}".format(self.name, self.state).replace(" ", "_").casefold(),
longitude=self.longitude,
latitude=self.latitude,
tz=self.tz)
def __init__(self):
self._city_db: List[CityDB.Record] = []
# PyCharm is having issues figuring out the import api
# noinspection PyTypeChecker
with open_binary_package(data, 'cities_db.csv.gz') as pkg_file:
with GzipFile(mode="rb", fileobj=pkg_file) as gzip_file:
for row in DictReader(TextIOWrapper(gzip_file, encoding="UTF-8")):
self._city_db.append(CityDB.Record.from_dict(row))
def find(self, city: str = None, state: str = None, zip_code: str = None) -> List['CityDB.Record']:
city_finder = re_compile(city.replace('*', '.*'), IGNORECASE) if city else None
state_finder = re_compile(state, IGNORECASE) if state else None
zip_code_finder = re_compile(zip_code.replace('*', '.*')) if zip_code else None
matches = []
for record in self._city_db:
if city_finder and not city_finder.match(record.name):
continue
if state_finder and not state_finder.match(record.state):
continue
if zip_code_finder:
matches = list(filter(zip_code_finder.match, record.zips.split()))
continue
matches.append(record)
return matches
class WeatherProviderAPI:
RECORDED = "recorded"
ERROR = "error"
API_CALLS_MADE = "api_calls_made"
API_USAGE_LIMIT = 900
API_REQUESTS_MADE_TODAY_HEADER = "X-Forecast-API-Calls"
def __init__(self, key: str = None):
self._key = key if key else get_setting("domain", "history_api_key")
self._url = urljoin("https://api.darksky.net/forecast/", self._key) + "/"
self._api_calls_made = 0
@property
def url(self) -> str:
return self._url
@property
def key(self) -> str:
return self._key
def recorded(self, location: Location, when: datetime) -> dict:
"""
The returned dictionary should always contain either RECORDED_KEY
or ERROR_KEY. Optionally it can contain API_CALLS_MADE_KEY.
"""
def mk_error(reason: str) -> dict:
return {
WeatherProviderAPI.ERROR: reason,
WeatherProviderAPI.API_CALLS_MADE: self._api_calls_made
}
if self._api_calls_made > self.API_USAGE_LIMIT:
return mk_error("You've made too many API requests to Dark Sky today...")
url = urljoin(self.url, "{},{},{}".format(location.latitude, location.longitude, when.isoformat()))
log.debug("url: %s", url)
try:
response = requests.get(url, urlencode({"exclude": "currently,flags"}))
if response.ok:
api_calls = response.headers.get(self.API_REQUESTS_MADE_TODAY_HEADER.lower())
log.debug("api calls: %s", api_calls)
if not api_calls:
log.error("Yikes... Didn't find {} header!!!".format(self.API_REQUESTS_MADE_TODAY_HEADER))
self._api_calls_made = self.API_USAGE_LIMIT + 1
else:
self._api_calls_made = int(api_calls)
return {
WeatherProviderAPI.RECORDED: response.json(),
WeatherProviderAPI.API_CALLS_MADE: self._api_calls_made
}
else:
return mk_error("HTTP {}: {}".format(response.status_code, response.reason))
except Exception as error:
return mk_error(str(error))
class FullHistory(NamedTuple):
date: date
daily: dict
hourly: List[dict]
| 2.390625 | 2 |
jsb/plugs/core/user.py | NURDspace/jsonbot | 1 | 12788380 | # jsb/plugs/core/user.py
#
#
""" users related commands. """
## jsb imports
from jsb.utils.generic import getwho
from jsb.utils.exception import handle_exception
from jsb.utils.name import stripname
from jsb.lib.users import users
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
## basic imports
import logging
## user-whoami command
def handle_whoami(bot, ievent):
""" no arguments - get your username. """
ievent.reply('%s' % bot.users.getname(ievent.auth))
cmnds.add('user-whoami', handle_whoami, ['OPER', 'USER', 'GUEST'])
examples.add('user-whoami', 'get your username', 'user-whoami')
## user-meet command
def handle_meet(bot, ievent):
""" arguments: <nick> - introduce a new user to the bot. """
try: nick = ievent.args[0]
except IndexError:
ievent.missing('<nick>')
return
if bot.users.exist(nick):
ievent.reply('there is already a user with username %s' % nick)
return
userhost = getwho(bot, nick)
logging.warn("users - meet - userhost is %s" % userhost)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
username = bot.users.getname(userhost)
if username:
ievent.reply('we already have a user with userhost %s (%s)' % (userhost, username))
return
result = 0
name = stripname(nick.lower())
result = bot.users.add(name, [userhost, ], ['USER', 'GUEST'])
if result: ievent.reply('%s - %s - (%s) added to user database' % (nick, userhost, name))
else: ievent.reply('add failed')
cmnds.add('user-meet', handle_meet, ['OPER', 'MEET'])
examples.add('user-meet', '<nick> .. introduce <nick> to the bot', 'user-meet dunker')
## user-add command
def handle_adduser(bot, ievent):
""" arguments: <name> <userhost> - introduce a new user to the bot. """
try: (name, userhost) = ievent.args
except ValueError:
ievent.missing('<name> <userhost>')
return
username = bot.users.getname(userhost)
if username:
ievent.reply('we already have a user with userhost %s (%s)' % (userhost, username))
return
result = 0
name = stripname(name.lower())
result = bot.users.add(name, [userhost, ], ['USER', 'GUEST'])
if result: ievent.reply('%s added to user database' % name)
else: ievent.reply('add failed')
cmnds.add('user-add', handle_adduser, 'OPER')
examples.add('user-add', 'add user to the bot', 'user-add dunker bart@localhost')
## user-merge command
def handle_merge(bot, ievent):
""" arguments: <name> <nick> - merge the userhost belonging to <nick> into an already existing user. """
if len(ievent.args) != 2:
ievent.missing('<name> <nick>')
return
name, nick = ievent.args
name = name.lower()
if bot.users.gotperm(name, 'OPER') and not bot.users.allowed(ievent.userhost, 'OPER'):
ievent.reply("only OPER perm can merge with OPER user")
return
if name == 'owner' and not bot.ownercheck(ievent.userhost):
ievent.reply("you are not the owner")
return
if not bot.users.exist(name):
ievent.reply("we have no user %s" % name)
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
if bot.ownercheck(userhost):
ievent.reply("can't merge with owner")
return
result = bot.users.merge(name, userhost)
if result: ievent.reply('%s merged' % nick)
else: ievent.reply('merge failed')
cmnds.add('user-merge', handle_merge, ['OPER', 'MEET'])
examples.add('user-merge', '<name> <nick> .. merge record with <name> with userhost from <nick>', 'user-merge bart dunker')
## user-import command
def handle_import(bot, ievent):
""" arguments: <userhost> - merge the userhost into user giving the command. """
if len(ievent.args) != 1:
ievent.missing('<userhost>')
return
userhost = ievent.args[0]
if bot.ownercheck(userhost):
ievent.reply("can't merge owner")
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("i don't know you %s" % ievent.userhost)
return
result = bot.users.merge(name, userhost)
if result: ievent.reply('%s imported' % userhost)
else: ievent.reply('import failed')
cmnds.add('user-import', handle_import, ['IMPORT', 'OPER'])
examples.add('user-import', 'user-import <userhost> .. merge record with \
<name> with userhost from the person giving the command (self merge)', 'user-import <EMAIL>')
## user-del command
def handle_delete(bot, ievent):
""" arguments: <name> - remove user. """
if not bot.ownercheck(ievent.userhost):
ievent.reply('only owner can use delete')
return
if len(ievent.args) == 0:
ievent.missing('<name>')
return
name = ievent.args[0]
result = 0
name = stripname(name)
name = name.lower()
try:
result = bot.users.delete(name)
if result:
ievent.reply('%s deleted' % name)
return
except KeyError: pass
ievent.reply('no %s item in database' % name)
cmnds.add('user-del', handle_delete, 'OPER')
examples.add('user-del', 'user-del <name> .. delete user with <username>' , 'user-del dunker')
## user-undel command
def handle_undelete(bot, ievent):
""" arguments: <name> - remove user. """
if not bot.ownercheck(ievent.userhost):
ievent.reply('only owner can use delete')
return
if len(ievent.args) == 0:
ievent.missing('<name>')
return
name = ievent.args[0]
result = 0
name = stripname(name)
name = name.lower()
user = bot.users.grab(name)
if user:
user.data.deleted = False
user.save()
ievent.reply('%s undeleted' % name)
return
else: ievent.reply('no %s item in database' % name)
cmnds.add('user-undel', handle_undelete, 'OPER')
examples.add('user-undel', 'user-del <name> .. undelete user with <username>' , 'user-undel dunker')
## user-scan command
def handle_userscan(bot, ievent):
""" arguments: <searchtxt> - scan for user. """
try:name = ievent.args[0]
except IndexError:
ievent.missing('<txt>')
return
name = name.lower()
names = bot.users.names()
result = []
for i in names:
if i.find(name) != -1: result.append(i)
if result: ievent.reply("users matching %s: " % name, result)
else: ievent.reply('no users matched')
cmnds.add('user-scan', handle_userscan, 'OPER')
examples.add('user-scan', '<txt> .. search database for matching usernames', 'user-scan dunk')
## user-names command
def handle_names(bot, ievent):
""" no arguments - show registered users. """
ievent.reply("usernames: ", bot.users.names())
cmnds.add('user-names', handle_names, 'OPER')
examples.add('user-names', 'show names of registered users', 'user-names')
## user-name command
def handle_name(bot, ievent):
""" no arguments - show name of user giving the command. """
ievent.reply('your name is %s' % bot.users.getname(ievent.auth))
cmnds.add('user-name', handle_name, ['USER', 'GUEST'])
examples.add('user-name', 'show name of user giving the commands', 'user-name')
## user-getname command
def handle_getname(bot, ievent):
""" arguments: <nick> - fetch username of nick. """
try: nick = ievent.args[0]
except IndexError:
ievent.missing("<nick>")
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
name = bot.users.getname(userhost)
if not name:
ievent.reply("can't find user for %s" % userhost)
return
ievent.reply(name)
cmnds.add('user-getname', handle_getname, ['USER', 'GUEST'])
examples.add('user-getname', 'user-getname <nick> .. get the name of <nick>', 'user-getname dunker')
## user-addperm command
def handle_addperm(bot, ievent):
""" arguments: <name> <permission> - add permission to user. """
if len(ievent.args) != 2:
ievent.missing('<name> <perm>')
return
name, perm = ievent.args
perm = perm.upper()
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
result = 0
if bot.users.gotperm(name, perm):
ievent.reply('%s already has permission %s' % (name, perm))
return
result = bot.users.adduserperm(name, perm)
if result: ievent.reply('%s perm added' % perm)
else: ievent.reply('perm add failed')
cmnds.add('user-addperm', handle_addperm, 'OPER')
examples.add('user-addperm', 'user-addperm <name> <perm> .. add permissions to user <name>', 'user-addperm dunker rss')
## user-getperms command
def handle_getperms(bot, ievent):
""" arguments: <name> - get permissions of name. """
try: name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
perms = bot.users.getuserperms(name)
if perms: ievent.reply("permissions of %s: " % name, perms)
else: ievent.reply('%s has no permissions set' % name)
cmnds.add('user-getperms', handle_getperms, 'OPER')
examples.add('user-getperms', 'user-getperms <name> .. get permissions of <name>', 'user-getperms dunker')
## user-perms command
def handle_perms(bot, ievent):
""" no arguments - get permissions of the user given the command. """
if ievent.rest:
ievent.reply("use getperms to get the permissions of somebody else")
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
perms = bot.users.getuserperms(name)
if perms: ievent.reply("you have permissions: ", perms)
cmnds.add('user-perms', handle_perms, ['USER', 'GUEST'])
examples.add('user-perms', 'get permissions', 'user-perms')
## user-delperm command
def handle_delperm(bot, ievent):
""" arguments: <name> <perm> - delete permission from user. """
if len(ievent.args) != 2:
ievent.missing('<name> <perm>')
return
name, perm = ievent.args
perm = perm.upper()
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserperm(name, perm)
if result: ievent.reply('%s perm removed' % perm)
else: ievent.reply("%s has no %s permission" % (name, perm))
cmnds.add('user-delperm', handle_delperm, 'OPER')
examples.add('user-delperm', 'delete from user <name> permission <perm>', 'user-delperm dunker rss')
## user-addstatus command
def handle_addstatus(bot, ievent):
""" arguments: <name> <status> - add status to a user. """
if len(ievent.args) != 2:
ievent.missing('<name> <status>')
return
name, status = ievent.args
status = status.upper()
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
if bot.users.gotstatus(name, status):
ievent.reply('%s already has status %s' % (name, status))
return
result = bot.users.adduserstatus(name, status)
if result: ievent.reply('%s status added' % status)
else: ievent.reply('add failed')
cmnds.add('user-addstatus', handle_addstatus, 'OPER')
examples.add('user-addstatus', 'user-addstatus <name> <status>', 'user-addstatus dunker #dunkbots')
## user-getstatus command
def handle_getstatus(bot, ievent):
""" arguments: <name> - get status of a user. """
try: name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
status = bot.users.getuserstatuses(name)
if status: ievent.reply("status of %s: " % name, status)
else: ievent.reply('%s has no status set' % name)
cmnds.add('user-getstatus', handle_getstatus, 'OPER')
examples.add('user-getstatus', 'user-getstatus <name> .. get status of <name>', 'user-getstatus dunker')
## user-status command
def handle_status(bot, ievent):
""" no arguments - get status of user given the command. """
status = bot.users.getstatuses(ievent.userhost)
if status: ievent.reply("you have status: ", status)
else: ievent.reply('you have no status set')
cmnds.add('user-status', handle_status, ['USER', 'GUEST'])
examples.add('user-status', 'get status', 'user-status')
## user-delstatus command
def handle_delstatus(bot, ievent):
""" arguments: <name> <status> - delete status. """
if len(ievent.args) != 2:
ievent.missing('<name> <status>')
return
name, status = ievent.args
status = status.upper()
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserstatus(name, status)
if result: ievent.reply('%s status deleted' % status)
else: ievent.reply("%s has no %s status" % (name, status))
cmnds.add('user-delstatus', handle_delstatus, 'OPER')
examples.add('user-delstatus', '<name> <status>', 'user-delstatus dunker #dunkbots')
## user-adduserhost command
def handle_adduserhost(bot, ievent):
""" arguments: <name> <userhost> - add to userhosts of user. """
if len(ievent.args) != 2:
ievent.missing('<name> <userhost>')
return
name, userhost = ievent.args
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
if bot.users.gotuserhost(name, userhost):
ievent.reply('%s already has userhost %s' % (name, userhost))
return
result = bot.users.adduserhost(name, userhost)
if result: ievent.reply('userhost added')
else: ievent.reply('add failed')
cmnds.add('user-adduserhost', handle_adduserhost, 'OPER')
examples.add('user-adduserhost', 'user-adduserhost <name> <userhost>', 'user-adduserhost dunker <EMAIL>')
## user-deluserhost command
def handle_deluserhost(bot, ievent):
""" arguments: <name> <userhost> - remove from userhosts of name. """
if len(ievent.args) != 2:
ievent.missing('<name> <userhost>')
return
name, userhost = ievent.args
name = name.lower()
if bot.ownercheck(userhost):
ievent.reply('can delete userhosts from owner')
return
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserhost(name, userhost)
if result: ievent.reply('userhost removed')
else: ievent.reply("%s has no %s in userhost list" % (name, userhost))
cmnds.add('user-deluserhost', handle_deluserhost, 'OPER')
examples.add('user-deluserhost', 'user-deluserhost <name> <userhost> .. delete from usershosts of <name> userhost <userhost>','user-deluserhost dunker <EMAIL>')
## user-getuserhosts command
def handle_getuserhosts(bot, ievent):
""" arguments: <name> - get userhosts of a user. """
try: who = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
who = who.lower()
userhosts = bot.users.getuserhosts(who)
if userhosts: ievent.reply("userhosts of %s: " % who, userhosts)
else: ievent.reply("can't find user %s" % who)
cmnds.add('user-getuserhosts', handle_getuserhosts, 'OPER')
examples.add('user-getuserhosts', 'user-getuserhosts <name> .. get userhosts of <name>', 'user-getuserhosts dunker')
## user-userhosts command
def handle_userhosts(bot, ievent):
""" no arguments - get userhosts of user giving the command. """
userhosts = bot.users.gethosts(ievent.userhost)
if userhosts: ievent.reply("you have userhosts: ", userhosts)
else: ievent.reply('no userhosts found')
cmnds.add('user-userhosts', handle_userhosts, ['USER', 'GUEST'])
examples.add('user-userhosts', 'get userhosts', 'user-userhosts')
## user-getemail command
def handle_getemail(bot, ievent):
""" arguments: <user> - get email addres of a user. """
try: name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
email = bot.users.getuseremail(name)
if email: ievent.reply(email)
else: ievent.reply('no email set')
cmnds.add('user-getemail', handle_getemail, ['USER', ])
examples.add('user-getemail', 'user-getemail <name> .. get email from user <name>', 'user-getemail dunker')
## user-setemail command
def handle_setemail(bot, ievent):
""" arguments: <name> <email> - set email of a user. """
try: name, email = ievent.args
except ValueError:
ievent.missing('<name> <email>')
return
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
bot.users.setemail(name, email)
ievent.reply('email set')
cmnds.add('user-setemail', handle_setemail, 'OPER')
examples.add('user-setemail', 'user-setemail <name> <email>.. set email of user <name>', 'user-setemail dunker <EMAIL>')
## user-email command
def handle_email(bot, ievent):
""" no arguments - show email of user giving the command. """
if len(ievent.args) != 0:
ievent.reply('use getemail to get the email address of an user .. email shows your own mail address')
return
email = bot.users.getemail(ievent.userhost)
if email: ievent.reply(email)
else: ievent.reply('no email set')
cmnds.add('user-email', handle_email, ['USER', 'GUEST'])
examples.add('user-email', 'get email', 'user-email')
## user-delemail command
def handle_delemail(bot, ievent):
""" no arguments - reset email of user giving the command. """
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("can't find user for %s" % ievent.userhost)
return
result = bot.users.delallemail(name)
if result: ievent.reply('email removed')
else: ievent.reply('delete failed')
cmnds.add('user-delemail', handle_delemail, 'OPER')
examples.add('user-delemail', 'reset email', 'user-delemail')
## user-addpermit command
def handle_addpermit(bot, ievent):
""" arguments: <name> <permit> - allow another user to perform actions on your data. """
try: who, what = ievent.args
except ValueError:
ievent.missing("<name> <permit>")
return
if not bot.users.exist(who):
ievent.reply("can't find username of %s" % who)
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("i dont know %s" % ievent.userhost)
return
if bot.users.gotpermit(name, (who, what)):
ievent.reply('%s is already allowed to do %s' % (who, what))
return
result = bot.users.adduserpermit(name, who, what)
if result: ievent.reply('permit added')
else: ievent.reply('add failed')
cmnds.add('user-addpermit', handle_addpermit, ['USER', 'GUEST'])
examples.add('user-addpermit', 'user-addpermit <nick> <what> .. permit nick access to <what> .. use setperms to add permissions', 'user-addpermit dunker todo')
## user-permit command
def handle_permit(bot, ievent):
""" no arguments - get permit list of user giving the command. """
if ievent.rest:
ievent.reply("use the user-addpermit command to allow somebody something .. use getname <nick> to get the username of somebody .. this command shows what permits you have")
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("can't find user for %s" % ievent.userhost)
return
permits = bot.users.getuserpermits(name)
if permits: ievent.reply("you permit the following: ", permits)
else: ievent.reply("you don't have any permits")
cmnds.add('user-permit', handle_permit, ['USER', 'GUEST'])
examples.add('user-permit', 'show permit of user giving the command', 'user-permit')
## user-delpermit command
def handle_userdelpermit(bot, ievent):
""" arguments: <name> <permit> - remove (name, permit) from permit list. """
try: who, what = ievent.args
except ValueError:
ievent.missing("<name> <permit>")
return
if not bot.users.exist(who):
ievent.reply("can't find registered name of %s" % who)
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("i don't know you %s" % ievent.userhost)
return
if not bot.users.gotpermit(name, (who, what)):
ievent.reply('%s is already not allowed to do %s' % (who, what))
return
result = bot.users.deluserpermit(name, (who, what))
if result: ievent.reply('%s denied' % what)
else: ievent.reply('delete failed')
cmnds.add('user-delpermit', handle_userdelpermit, ['USER', 'GUEST'])
examples.add('user-delpermit', 'user-delpermit <name> <permit>', 'user-delpermit dunker todo')
## user-check command
def handle_check(bot, ievent):
""" arguments: <nick> - get data of a user based on nick name. """
try: nick = ievent.args[0]
except IndexError:
ievent.missing('<nick>')
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
name = bot.users.getname(userhost)
if not name:
ievent.reply("can't find user")
return
userhosts = bot.users.getuserhosts(name)
perms = bot.users.getuserperms(name)
email = bot.users.getuseremail(name)
permits = bot.users.getuserpermits(name)
status = bot.users.getuserstatuses(name)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (name, str(userhosts), str(perms), str(email), str(permits), str(status)))
cmnds.add('user-check', handle_check, 'OPER')
examples.add('user-check', 'user-check <nick>', 'user-check dunker')
## user-show command
def handle_show(bot, ievent):
""" arguments: <name> - get data of a user based on username. """
try: name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
user = bot.users.byname(name)
if not user:
ievent.reply("can't find user %s" % name)
return
userhosts = str(user.data.userhosts)
perms = str(user.data.perms)
email = str(user.data.email)
permits = str(user.data.permits)
status = str(user.data.status)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (name, userhosts, perms, email, permits, status))
cmnds.add('user-show', handle_show, 'OPER')
examples.add('user-show', 'user-show <name> .. show data of <name>', 'user-show dunker')
## user-match command
def handle_match(bot, ievent):
""" arguments: <userhost> - get data of user based on userhost. """
try: userhost = ievent.args[0]
except IndexError:
ievent.missing('<userhost>')
return
user = bot.users.getuser(userhost)
if not user:
ievent.reply("can't find user with userhost %s" % userhost)
return
userhosts = str(user.data.userhosts)
perms = str(user.data.perms)
email = str(user.data.email)
permits = str(user.data.permits)
status = str(user.data.status)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (userhost, userhosts, perms, email, permits, status))
cmnds.add('user-match', handle_match, ['OPER', ])
examples.add('user-match', 'user-match <userhost>', 'user-match test@test')
## user-allstatus command
def handle_getuserstatus(bot, ievent):
""" arguments: <status> - list users with <status>. """
try: status = ievent.args[0].upper()
except IndexError:
ievent.missing('<status>')
return
result = bot.users.getstatususers(status)
if result: ievent.reply("users with %s status: " % status, result)
else: ievent.reply("no users with %s status found" % status)
cmnds.add('user-allstatus', handle_getuserstatus, 'OPER')
examples.add('user-allstatus', 'user-allstatus <status> .. get all users with <status> status', 'user-allstatus #dunkbots')
## user-allperm command
def handle_getuserperm(bot, ievent):
""" arguments: <perm> - list users with permission <perm>. """
try: perm = ievent.args[0].upper()
except IndexError:
ievent.missing('<perm>')
return
result = bot.users.getpermusers(perm)
if result: ievent.reply('users with %s permission: ' % perm, result)
else: ievent.reply("no users with %s permission found" % perm)
cmnds.add('user-allperm', handle_getuserperm, 'OPER')
examples.add('user-allperm', 'user-allperm <perm> .. get users with <perm> permission', 'user-allperm rss')
## user-search command
def handle_usersearch(bot, ievent):
""" arguments: <searchtxt> - search for user matching given userhost. """
try: what = ievent.args[0]
except IndexError:
ievent.missing('<searchtxt>')
return
result = bot.users.usersearch(what)
if result:
res = ["(%s) %s" % u for u in result]
ievent.reply('users matching %s: ' % what, res)
else: ievent.reply('no userhost matching %s found' % what)
cmnds.add('user-search', handle_usersearch, 'OPER')
examples.add('user-search', 'search users userhosts', 'user-search gozerbot')
| 2.203125 | 2 |
app/modules/names/models.py | WildMeOrg/houston | 6 | 12788381 | # -*- coding: utf-8 -*-
"""
Names database models
A structure for holding a (user-provided) name for an Individual.
--------------------
"""
import uuid
from app.extensions import db, HoustonModel, Timestamp
import logging
import app.extensions.logging as AuditLog
log = logging.getLogger(__name__) # pylint: disable=invalid-name
class NamePreferringUsersJoin(db.Model, HoustonModel):
name_guid = db.Column(
db.GUID, db.ForeignKey('name.guid', ondelete='CASCADE'), primary_key=True
)
user_guid = db.Column(
db.GUID, db.ForeignKey('user.guid', ondelete='CASCADE'), primary_key=True
)
name = db.relationship('Name', back_populates='preferring_user_joins')
user = db.relationship('User')
class Name(db.Model, HoustonModel, Timestamp):
"""
Names database model. For a name (one of possibly many) on an Individual.
"""
def __init__(self, *args, **kwargs):
AuditLog.user_create_object(
log, self, f"for Individual {kwargs.get('individual_guid')}"
)
super().__init__(*args, **kwargs)
guid = db.Column(
db.GUID, default=uuid.uuid4, primary_key=True
) # pylint: disable=invalid-name
value = db.Column(db.String(), index=True, nullable=False)
context = db.Column(db.String(), index=True, nullable=False)
individual_guid = db.Column(
db.GUID, db.ForeignKey('individual.guid'), index=True, nullable=False
)
individual = db.relationship('Individual', back_populates='names')
creator_guid = db.Column(
db.GUID, db.ForeignKey('user.guid'), index=True, nullable=False
)
creator = db.relationship(
'User',
backref=db.backref(
'names_created',
primaryjoin='User.guid == Name.creator_guid',
order_by='Name.guid',
),
)
preferring_user_joins = db.relationship(
'NamePreferringUsersJoin', back_populates='name'
)
# this will ensure individual+context is unique (one context per individual)
__table_args__ = (db.UniqueConstraint(context, individual_guid),)
def __repr__(self):
return (
'<{class_name}('
'guid={self.guid}, '
"context='{self.context}', "
'value={self.value} '
')>'.format(class_name=self.__class__.__name__, self=self)
)
def get_preferring_users(self):
return [join.user for join in self.preferring_user_joins]
def add_preferring_user(self, user):
if user in self.get_preferring_users():
raise ValueError(f'{user} already in list')
pref_join = NamePreferringUsersJoin(name_guid=self.guid, user_guid=user.guid)
with db.session.begin(subtransactions=True):
db.session.add(pref_join)
def add_preferring_users(self, users):
if not users or not isinstance(users, list):
return
for user in set(users): # forces unique
self.add_preferring_user(user)
def remove_preferring_user(self, user):
found = None
for pref_join in self.preferring_user_joins:
if pref_join.user_guid == user.guid:
found = pref_join
if found:
with db.session.begin(subtransactions=True):
db.session.delete(found)
return True
return False
def delete(self):
AuditLog.delete_object(log, self, f'from Individual {self.individual.guid}')
with db.session.begin(subtransactions=True):
for join in self.preferring_user_joins:
db.session.delete(join)
db.session.delete(self)
| 2.703125 | 3 |
random-images/WallpaperGenerator/constants.py | dominicschaff/random | 0 | 12788382 | from __future__ import division
from math import radians as rad, cos, sin
from PIL import Image, ImageDraw
from random import randint
width,height = 1280,1280
def randColour(s=0,e=255):
return (randint(s,e),randint(s,e),randint(s,e))
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def convert(r,theta):
return [r*cos(theta), r*sin(theta)]
def rotate(x,y, angle):
return [x * cos(angle) - y * sin(angle),x*sin(angle) + y*cos(angle)]
class DrawImage(object):
"""docstring for DrawImage"""
width = 1280
height = 1280
def __init__(self):
self.size = (self.width, self.height)
self.mid = (self.width/2, self.height/2)
def create(self, size=(1280,1280), colour=(0,0,0)):
self.image = Image.new("RGB", size, colour)
self.pixels = self.image.load()
self.size = size
self.width = size[0]
self.height = size[1]
self.mid = (self.width/2, self.height/2)
def open(self, name):
self.image = Image.open(name)
self.pixels = self.image.load()
self.size = self.image.size
self.width = self.size[0]
self.height = self.size[1]
self.mid = (self.width/2, self.height/2)
def plot(self, spot, colour=(255,255,255)):
x,y = spot[0], self.height - spot[1]
if x >= self.width or y >= self.height:
return
if x<0 or y < 0:
return
self.pixels[x,y] = colour
def plotRadians(self, function, start=0, end=100, offset=(0,0), steps=1.0, scale=1.0, colour=(255,255,255), rotation=0.0):
for i in drange(start, end, steps):
t = rad(i)
r = function(t) * scale
if type(r) is tuple:
x,y = r
else:
x,y = convert(r,t)
x,y = rotate(x,y,rad(rotation))
self.plot((x+offset[0],y+offset[1]), colour)
def addOver(self, image):
for x in xrange(self.width):
for y in xrange(self.height):
if x < image.width and y < image.height and self.pixels[x,y] != (0,0,0):
image.plot((x,y), self.pixels[x,y])
self.image = image.image
self.pixels = image.pixels
self.size = image.size
self.width = image.width
self.height = image.height
def addUnder(self, image):
w,h = image.size
for x in xrange(image.width):
for y in xrange(image.height):
if x < w and y < h and image.pixels[x,y] != (0,0,0):
self.plot((x,y), image.pixels[x,y])
def show(self):
self.image.show()
def save(self, name, imageType = None):
if type is None:
self.image.save(name)
else:
self.image.save(name, imageType) | 3.15625 | 3 |
tests/unit/test_typehinting.py | timothygebhard/hsr4hci | 1 | 12788383 | """
Tests for typehinting.py
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import pytest
from hsr4hci.typehinting import (
BaseLinearModel,
BaseLinearModelCV,
RegressorModel,
)
# -----------------------------------------------------------------------------
# TEST CASES
# -----------------------------------------------------------------------------
def test__regressor_model() -> None:
"""
Test `hsr4hci.typehinting.RegressorModel`.
"""
with pytest.raises(TypeError) as type_error:
RegressorModel() # type: ignore
assert 'Protocols cannot be instantiated' in str(type_error)
def test__base_linear_model() -> None:
"""
Test `hsr4hci.typehinting.BaseLinearModel`.
"""
with pytest.raises(TypeError) as type_error:
BaseLinearModel() # type: ignore
assert 'Protocols cannot be instantiated' in str(type_error)
def test__base_linear_model_cv() -> None:
"""
Test `hsr4hci.typehinting.BaseLinearModelCV`.
"""
with pytest.raises(TypeError) as type_error:
BaseLinearModelCV() # type: ignore
assert 'Protocols cannot be instantiated' in str(type_error)
| 2.484375 | 2 |
From_Colab/Test/Portfolio_UpDownZero.py | Jun-bitacademy/PyPortfolioOpt | 0 | 12788384 | '''
전략 : 하루의 주가를 놓고 보면 오른 경우가 42%, 내린 경우가 46%, 나머지 12%는 변동이 없다. 증명
알고리즘 : PyportfolioOpt 라이브러리 이용한 최적화
(max sharp, risk, return, fund remaining)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import FinanceDataReader as fdr
import datetime
from pykrx import stock
import requests
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
# 오늘 KOSPI&KOSDAQ 종목 전체 불러오기
today = datetime.datetime.today().strftime("%Y%m%d")
kospi = stock.get_market_fundamental_by_ticker(today, market='KOSPI').index
kosdaq = stock.get_market_fundamental_by_ticker(today, market='KOSDAQ').index
stocks = kospi.append(kosdaq)
def up_down_zero(code): # 종목과 연도에 맞는 상승/하락/변동 없는 날 수를 리스트 반환
today = datetime.datetime.today().strftime("%Y-%m-%d")
year = today[0:4]
month_day = today[4:]
one_year_ago = str(int(year) - 1) + month_day
data = fdr.DataReader(code, one_year_ago)[['Close']]
data_rtn = data.pct_change()
up = 0
nothing = 0
down = 0
for i, date in enumerate(data.index):
if data_rtn.Close.iloc[i] > 0:
up = up + 1
elif data_rtn.Close.iloc[i] == 0:
nothing = nothing + 1
else:
down = down + 1
total_days = len(data_rtn.index)
return up / total_days, down / total_days, nothing / total_days
def get_up_down_zero_df(stocks): # stocks 리스트를 넣으면, 상승/하락/변동없는 확률 데이터프레임 반환
up_list = []
down_list = []
zero_list = []
for i in stocks:
temp = up_down_zero(i)
up_list.append(temp[0])
down_list.append(temp[1])
zero_list.append(temp[2])
# 데이터 프레임 만들기
up_down_zero_df = pd.DataFrame()
up_down_zero_df['종목 코드'] = stocks # 종목코드
up_down_zero_df['상승 확률'] = up_list # 일간 변동률이 양수인 날의 수
up_down_zero_df['하락 확률'] = down_list # 일간 변동률이 음수인 날의 수
up_down_zero_df['변동 없는 확률'] = zero_list # 일간 변동률이 0인 날의 수
up_down_zero_df['상승 확률 높은 순위'] = up_down_zero_df['상승 확률'].rank(ascending=False)
up_down_zero_df = up_down_zero_df.sort_values(by='상승 확률 높은 순위')
return up_down_zero_df
up_down_zero_df = get_up_down_zero_df(stocks)
symbol_udz = []
for i in idx_list:
symbol_udz.append(up_down_zero_df.loc[i][0])
symbol_udz
# 급등주 종목 저장
assets = np.array(symbol_udz)
start_date = '2018-07-21'
end_date = '2021-07-21'
df = pd.DataFrame()
for stock in assets:
df[stock] = fdr.DataReader(stock, start_date, end_date)['Close']
df_dropna = df.dropna(axis = 1)
mu = expected_returns.mean_historical_return(df_dropna)
S = risk_models.sample_cov(df_dropna)
ef = EfficientFrontier(mu, S, solver="SCS")
weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
print(ef.portfolio_performance(verbose=True))
portfolio_val = 15000000
latest_prices = get_latest_prices(df_dropna)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)
allocation, leftover = da.lp_portfolio(verbose=False)
#rmse = da._allocation_rmse_error(verbose=False)
print('Discrete Allocaion: ', allocation)
print('Funds Remaining: ', leftover, ' KRW')
discrete_allocation_list = []
for symbol in allocation:
discrete_allocation_list.append(allocation.get(symbol))
portfolio_df = pd.DataFrame(columns = ['company_Ticker', 'Discrete_val_'+str(portfolio_val)])
portfolio_df['company_Ticker'] = allocation
portfolio_df['Discrete_val_'+str(portfolio_val)] = discrete_allocation_list
portfolio_df_sorted = portfolio_df.sort_values('Discrete_val_'+str(portfolio_val), ascending = False)
portfolio_df_sorted = portfolio_df_sorted.reset_index(drop=True)
print('Funds Remaining: ', leftover, ' KRW')
print(ef.portfolio_performance(verbose=True))
print('Allocation has RMSE: {:.3f}'.format(rmse)) | 2.828125 | 3 |
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_json_generic.py | usegalaxy-no/usegalaxy | 1 | 12788385 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_json_generic
short_description: Config Fortinet's FortiOS and FortiGate with json generic method.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify json feature and generic category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.4
version_added: "2.9"
author:
- <NAME> (@frankshen01)
- <NAME> (@fgtdev-hblu)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
json_generic:
description:
- json generic
default: null
type: dict
suboptions:
dictbody:
description:
- Body with YAML list of key/value format
type: dict
jsonbody:
description:
- Body with JSON string format, will always give priority to jsonbody
type: str
method:
description:
- HTTP methods
type: str
choices:
- GET
- PUT
- POST
- DELETE
path:
description:
- URL path, e.g./api/v2/cmdb/firewall/address
type: str
specialparams:
description:
- Extra URL parameters, e.g.start=1&count=10
type: str
'''
EXAMPLES = '''
---
# host
# [fortigates]
# fortigate01 ansible_host=192.168.52.177 ansible_user="admin" ansible_password="<PASSWORD>"
# [fortigates:vars]
# ansible_network_os=fortinet.fortios.fortios
# sample1.yml
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: test add with string
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "POST"
path: "/api/v2/cmdb/firewall/address"
jsonbody: |
{
"name": "111",
"type": "geography",
"fqdn": "",
"country": "AL",
"comment": "ccc",
"visibility": "enable",
"associated-interface": "port1",
"allow-routing": "disable"
}
register: info
- name: display vars
debug: msg="{{info}}"
# sample2.yml
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: test delete
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "DELETE"
path: "/api/v2/cmdb/firewall/address/111"
register: info
- name: display vars
debug: msg="{{info}}"
- name: test add with dict
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "POST"
path: "/api/v2/cmdb/firewall/address"
dictbody:
name: "111"
type: "geography"
fqdn: ""
country: "AL"
comment: "ccc"
visibility: "enable"
associated-interface: "port1"
allow-routing: "disable"
register: info
- name: display vars
debug: msg="{{info}}"
- name: test delete
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "DELETE"
path: "/api/v2/cmdb/firewall/address/111"
register: info
- name: display vars
debug: msg="{{info}}"
- name: test add with string
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "POST"
path: "/api/v2/cmdb/firewall/address"
jsonbody: |
{
"name": "111",
"type": "geography",
"fqdn": "",
"country": "AL",
"comment": "ccc",
"visibility": "enable",
"associated-interface": "port1",
"allow-routing": "disable"
}
register: info
- name: display vars
debug: msg="{{info}}"
- name: test speical params
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "PUT"
path: "/api/v2/cmdb/firewall/policy/1"
specialparams: "action=move&after=2"
register: info
- name: display vars
debug: msg="{{info}}"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
import json
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def json_generic(data, fos):
vdom = data['vdom']
json_generic_data = data['json_generic']
# Give priority to jsonbody
data = ""
if json_generic_data['jsonbody']:
data = json.loads(json_generic_data['jsonbody'])
else:
if json_generic_data['dictbody']:
data = json_generic_data['dictbody']
return fos.jsonraw(json_generic_data['method'],
json_generic_data['path'],
data=data,
specific_params=json_generic_data['specialparams'],
vdom=vdom)
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' \
or 'http_method' in resp and resp['http_method'] == 'DELETE' \
and 'http_status' in resp and resp['http_status'] == 404
def fortios_json(data, fos):
if data['json_generic']:
resp = json_generic(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"json_generic": {
"required": False, "type": "dict", "default": None,
"options": {
"dictbody": {"required": False, "type": "dict"},
"jsonbody": {"required": False, "type": "str"},
"method": {"required": True, "type": "str",
"choices": ["GET", "PUT", "POST",
"DELETE"]},
"path": {"required": True, "type": "str"},
"specialparams": {"required": False, "type": "str"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module)
is_error, has_changed, result = fortios_json(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Unable to precess the request, please provide correct parameters and make sure the path exists.", meta=result)
if __name__ == '__main__':
main()
| 1.398438 | 1 |
poisson_1d.py | lujiarui/gan_pde | 1 | 12788386 | <reponame>lujiarui/gan_pde<gh_stars>1-10
"""[4.2.1] Poisson equation only admits weak solution
Implemented with Pytorch. (torch version >= 1.8.1)
* Variable interpretation:
- x: torch.Tensor, (Number of points, dimension)
-
"""
import sys, os
from copy import deepcopy
import random
from random import randint
import json
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import grad, Variable
import matplotlib.pyplot as plt
from train import train
# >>> global params definition >>>
PARAMS = {
'name': 'Poisson_1d',
'dim': 2,
'left boundary': -1,
'right boundary': 1,
'K_primal': 1,
'K_adv': 1,
'lr_primal': 1e-1,
'lr_adv': 3e-1,
'Nr': 10000,
'Nb': 400,
'alpha': None,
'use elu': True,
'n_iter': 20000,
}
PARAMS['alpha'] = PARAMS['Nb'] * 10000
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# <<< global params definition <<<
torch.set_default_tensor_type(torch.DoubleTensor)
random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
np.random.seed(0)
# >>> Numerical function definition >>>
def g(x):
"""
R.H.S of PDE
"""
return torch.sin(x[:,0]) + 4. * torch.sin(4. * x[:,0]) - 8. * torch.sin(8. * x[:,0]) + 16. * torch.sin(24. * x[:,0])
def g0(x):
"""
:g_0 a component of grount truth solution
"""
return torch.sin(x[:,0]) + torch.sin(4. * x[:,0]) / 4. - torch.sin(8. * x[:,0]) / 8. + torch.sin(24. * x[:,0]) / 36.
def u(x):
"""
:u Dirichlet boundary / ground truth weak solution
"""
_device = x.device
_m1 = torch.Tensor([-1.]).reshape(1,1).to(_device)
_1 = torch.Tensor([1.]).reshape(1,1).to(_device)
_g0_m1 = g0(_m1)
_g0_1 = g0(_1)
c0 = -(_g0_m1 + _g0_1) / 2.
c1 = (_g0_m1 - _g0_1) / 2.
return g0(x) + c1 * x[:,0] + c0 # broadcast
# -----------------------------------------------------------------------------------------
def loss_all(xr: torch.Tensor, xb: torch.Tensor, u_theta, phi_eta, alpha, device):
"""
Args:
torch.Tensor: (Nr x d)
torch.Tensor: (Nb x d)
Network instance
Network instance
alpha: weight constant
Returns:
torch.Tensor: (1 x 1)
"""
# Calculate derivative w.r.t. to x
xr = Variable(xr, requires_grad=True)
# Calculate derivative w.r.t. to x[x1, x2, ...]
_u_theta = u_theta(xr).squeeze()
_out_u_theta = torch.sum(_u_theta)
_grad_u_theta = grad(_out_u_theta, xr, create_graph=True)[0]
# feed forward
_phi_eta = phi_eta(xr).squeeze() # comp. graph => loss
_u_theta_bdry = u_theta(xb).squeeze() # comp. graph => loss
_out_phi_eta = torch.sum(_phi_eta)
_grad_phi_eta = grad(_out_phi_eta, xr, create_graph=True)[0]
# >>> PDE-specific: calculate for I (integrand) >>>
t1_list = []
for i in range(xr.shape[1]):
for j in range(xr.shape[1]):
t1_list.append(_grad_u_theta[:, i] * _grad_phi_eta[:, j])
I = sum(t1_list) - g(xr) * _phi_eta
# <<< PDE-specific: calculate for I (integrand) <<<
loss_int = 2 * (torch.log(I.norm()) - torch.log(_phi_eta.norm()) )
loss_bdry = (_u_theta_bdry - 0.).norm()**2 / xb.shape[0]
return loss_int + loss_bdry * alpha
def _loss_all(xr: torch.Tensor, xb: torch.Tensor, u_theta, phi_eta, alpha, device):
"""
Args:
torch.Tensor: (Nr x d)
torch.Tensor: (Nb x d)
Network instance
Network instance
alpha: weight constant
Returns:
torch.Tensor: (1 x 1)
"""
# Calculate derivative w.r.t. to x
xr = Variable(xr, requires_grad=True)
# Calculate derivative w.r.t. to x[x1, x2, ...]
_u_theta = u_theta(xr)
_out_u_theta = torch.sum(_u_theta)
_grad_u_theta = grad(_out_u_theta, xr, create_graph=True)[0]
# feed forward
_phi_eta = phi_eta(xr) # comp. graph => loss
_u_theta_bdry = u_theta(xb) # comp. graph => loss
_out_phi_eta = torch.sum(_phi_eta)
_grad_phi_eta = grad(_out_phi_eta, xr, create_graph=True)[0]
# >>> PDE-specific: calculate for I (integrand) >>>
t1 = _grad_u_theta * _grad_phi_eta
I = t1 - g(xr) * _phi_eta
# <<< PDE-specific: calculate for I (integrand) <<<
loss_int = 2. * (torch.log(I.norm()) - torch.log(_phi_eta.norm()))
loss_bdry = (_u_theta_bdry - 0.).norm()**2 / xb.shape[0]
return loss_int + loss_bdry * alpha
# <<< Numerical function definition <<<
if __name__ == '__main__':
print('Use device: ', DEVICE)
train(params=PARAMS,
g=u,
loss_func=loss_all,
device=DEVICE,
# valid=True,
# model_path='./WAN_Poisson_1d_2.pt'
)
| 2.34375 | 2 |
problems/csp/real/Fillomino.py | xcsp3team/pycsp3 | 28 | 12788387 | <filename>problems/csp/real/Fillomino.py
"""
See https://en.wikipedia.org/wiki/Fillomino
Example of Execution:
python3 Fillomino.py -data=Fillomino-08.json
"""
from pycsp3 import *
puzzle = data
n, m = len(puzzle), len(puzzle[0])
preassigned = dict() # we collect pre-assigned starting squares for the first occurrences of specified values
for i in range(n):
for j in range(m):
if puzzle[i][j] != 0 and puzzle[i][j] not in preassigned: # the second part is important
preassigned[puzzle[i][j]] = (i + 1, j + 1) # +1 because of the border
nRegions = len(preassigned) + (n * m - sum(preassigned.keys()))
nValues = max(*preassigned.keys(), n * m - sum(preassigned.keys())) + 1 # this is the maximal distance + 1
def tables():
t = {(1, ANY, ANY, ANY, ANY, ANY, 0, ANY, ANY, ANY, ANY)}
for k in range(nRegions):
t.add((gt(1), k, k, ANY, ANY, ANY, 0, 1, ANY, ANY, ANY))
t.add((gt(1), k, ANY, k, ANY, ANY, 0, ANY, 1, ANY, ANY))
t.add((gt(1), k, ANY, ANY, k, ANY, 0, ANY, ANY, 1, ANY))
t.add((gt(1), k, ANY, ANY, ANY, k, 0, ANY, ANY, ANY, 1))
for v in range(1, nValues):
t.add((gt(1), k, k, ANY, ANY, ANY, v, v - 1, ANY, ANY, ANY))
t.add((gt(1), k, ANY, k, ANY, ANY, v, ANY, v - 1, ANY, ANY))
t.add((gt(1), k, ANY, ANY, k, ANY, v, ANY, ANY, v - 1, ANY))
t.add((gt(1), k, ANY, ANY, ANY, k, v, ANY, ANY, ANY, v - 1))
return t, {(v, v, k, k) for v in range(nValues) for k in range(nRegions)} | {(v, ne(v), k, ne(k)) for v in range(nValues) for k in range(nRegions)}
table_connection, table_region = tables()
# x[i][j] is the region (number) where the square at row i and column j belongs (borders are inserted for simplicity)
x = VarArray(size=[n + 2, m + 2], dom=lambda i, j: {-1} if i in {0, n + 1} or j in {0, m + 1} else range(nRegions))
# y[i][j] is the value of the square at row i and column j
y = VarArray(size=[n + 2, m + 2],
dom=lambda i, j: {-1} if i in {0, n + 1} or j in {0, m + 1} else {puzzle[i - 1][j - 1]} if puzzle[i - 1][j - 1] != 0 else range(nValues))
# d[i][j] is the distance of the square at row i and column j wrt the starting square of the (same) region
d = VarArray(size=[n + 2, m + 2], dom=lambda i, j: {-1} if i in {0, n + 1} or j in {0, m + 1} else range(nValues))
# s[k] is the size of the kth region
s = VarArray(size=nRegions, dom=range(nValues))
satisfy(
# setting starting squares of pre-assigned regions
[(x[i][j] == k, d[i][j] == 0, s[k] == sz) for k, (sz, (i, j)) in enumerate(preassigned.items())],
# setting values according to the size of the regions
[y[i][j] == s[x[i][j]] for i in range(1, n + 1) for j in range(1, m + 1) if puzzle[i - 1][j - 1] == 0 or (i, j) not in preassigned.values()],
# controlling the size of each region
[s[k] == Sum(x[i][j] == k for i in range(1, n + 1) for j in range(1, m + 1)) for k in range(nRegions)],
# ensuring connection
[(y[i][j], x.cross(i, j), d.cross(i, j)) in table_connection for i in range(1, n + 1) for j in range(1, m + 1)],
# two regions of the same size cannot have neighbouring squares
[
[(y[i][j], y[i][j + 1], x[i][j], x[i][j + 1]) in table_region for i in range(1, n + 1) for j in range(1, m)],
[(y[i][j], y[i + 1][j], x[i][j], x[i + 1][j]) in table_region for j in range(1, m + 1) for i in range(1, n)]
]
)
""" Comments
1) cross() is a predefined method on matrices of variables (of type ListVar).
Hence, x.cross(i, j) is equivalent to :
[t[i][j], t[i][j - 1], t[i][j + 1], t[i - 1][j], t[i + 1][j]]
2) gt(1) when building a tuple allows to handle all tuples with a value > 1
Later, it will be possible to generate smart tables instead of starred tables
"""
| 3.359375 | 3 |
SequenceAnnotation/phate_fastaSequence.py | carolzhou/multiPhATE | 19 | 12788388 | <reponame>carolzhou/multiPhATE
#############################################################
# Module: phate_fastaSequence.py
#
# Programmer: <NAME>
#
# Module containing classes and methods for representing a multi-fasta sequence and associated methods
# Classes and methods:
# fasta
# queryNRsequence(gi,nrLocation)
# enterGeneData(geneData/dict)
# assignType(type)
# assignHeader(hdr)
# assignCompoundHeader(hdr,parent)
# assignCustomHeader(customHdr)
# removeEMBOSSpostfix
# removeTerminalAsterisk
# getFullHeader
# getCleanHeader
# getShortHeader
# getTruncHeader
# getCompoundHeader
# getBlastHeader
# getHeader(hdrType)
# getCustomHeader
# assignSequence(seq)
# consolidate
# getSequenceLength
# getSubsequence(start,end)
# reverseComplement
# addAnnotation
# getStartCodon
# verifyProkaryoticStartCodon
# highlightAllStartCodons
# printFasta
# printFasta2file(fileH)
# printFasta2file_case(fileH,case)
# printAll
# printAll2file
# splitToList(lineLength)
# getAnnotationlist
# printAnnotations
# printAnnotations2file(fileH)
# multiFasta
# reportStats
# countParalogs
# addFasta(newFa)
# addFastas(lines,mtype)
# addFastasFromFile(mtype)
# addAnnotation(newAnnot)
# deleteFasta(oldFa)
# printMultiFasta
# printMultiFasta2file(fileH)
# printMultiFasta2file_case(fileH,case)
# printAll
# printAll2file(fileH)
# renumber
# matchHeader(hdr)
# removeEMBOSSpostfix
# removeTerminalAsterisk
#
####################################################################
# This code was developed by <NAME> at Lawrence Livermore National Laboratory.
# THIS CODE IS COVERED BY THE BSD LICENSE. SEE INCLUDED FILE BSD.PDF FOR DETAILS.
import re, string
import phate_annotation
from Bio import SeqIO
import os
DEBUG = False # For maximal verbosity
# For GFF formatting of output
EMPTY_COL = '.' # Any column without data get a '.'
SEQID_COL = 0 # First data column contains the sequence identifier: name of chromosome or scaffold
SOURCE_COL = 1 # Name of the program that generated this feature, or the data source (database or project name)
TYPE_COL = 2 # Type of feature; must be a term or accession from the SOFA sequence ontology (according to specs)
START_COL = 3 # Start position on feature, with sequence numbering starting at 1
END_COL = 4 # Start position on feature, with sequence numbering starting at 1
SCORE_COL = 5 # A floating point value
STRAND_COL = 6 # Defined as + (forward) or - (reverse)
PHASE_COL = 7 # One of '0', '1', or '2'. '0' indicates that the 1st base of the feature is the first base of a codon....
ATTRIBUTES_COL = 8 # A semicolon-separated list of tag-value pairs, providing additional information about each feature.
# Some of these tags are predefined, eg, ID, Name, Alias, Parent (see GFF documentation).
GFF_SEQ_NAME = "unknown" # default
GFF_SOURCE = "PhATE"
GFF_SCORE = EMPTY_COL # blast hit stats will not be reported here
GFF_PHASE = '0' # set to zero as per standard
GFF_VERSION = "gff-version 3"
# Patterns
p_extra = re.compile('(\s)|([0-9])|(\*)') # characters often included in sequence text
p_header = re.compile('^>(.*)')
p_comment = re.compile('^#')
p_up2space = re.compile('^\S*') # find 1st instance of everything that's not white space (check this)
p_startCodon = re.compile('atg') # standard start codon sequence (recall: storing sequence as lower case)
# Verbosity
CLEAN_RAW_DATA = os.environ["CLEAN_RAW_DATA"]
PHATE_WARNINGS = os.environ["PHATE_WARNINGS"]
PHATE_MESSAGES = os.environ["PHATE_MESSAGES"]
PHATE_PROGRESS = os.environ["PHATE_PROGRESS"]
#######################################################################################
class fasta(object):
# Class fasta represents any kind of fasta sequence. User can specify a parent sequence,
# comprising a header, for identifying a child relationship: eg, gene has a contig parent.
# Fasta header is stored as text only, without the conventional '>'.
# Get and print methods return header with the initial '>' symbol.
# There is a need for multiple headers (e.g., header and shortHeader) because...
# the header may be truncated after the first white space; the full header is
# the entire header string provided by the user. (Note: RAST truncates header after space)
# Sequence can be entered as a list of lines or as continuous sequence in a single string.
# Sequence can be converted back & forth between lines vs. single string.
def __init__(self):
self.header = "unknown" # full, original header
self.cleanHeader = "" # remove all special chars from original header
self.truncHeader = "" # truncated after N (self.truncation) characters
self.shortHeader = "" # truncated after 1st space (consistent w/RAST)
self.compoundHeader = "" # header with parentSequence (eg, contig name) appended
self.blastHeader = "" # header that results from blast, which truncates after 1st space
self.sequentialHeader = "hdr" # an assigned, benign header that will not break 3rd party codes
self.customHeader = "" # a customized header; could be anything, but written for pVOGs
self.name = "none" # name will be geneCaller + number, if gene|protein from gene call
self.sequence = "" # store sequence as continuous lower-case string, sans numbers, white space
self.sequenceLength = 0 # length of sequence
self.sequenceType = "unknown" # "nt" or "aa"; not "gene" or "dna" or the like
self.moleculeType = "unknown" # eg, 'contig', 'peptide', 'protein', or 'gene'
self.parentSequence = "" # eg, for gene, the sequence of the contig that the gene is on; ###*** should not duplicate!!!
self.parentSequenceLength = 0 # need this for passing info to method for printing GFF output; ###*** can now delete?
self.truncation = 15 # number of characters (N) in header to retain, by default
self.annotationList = [] # list of annotationRecord objects
self.paralogList = [] # list of paralog objects (header + blast hit)
self.startCodonCount = 0 # calculated possible start codons in forward strand (for genes)
self.codonStartLocs = [] # start positions of 'atg' (or alternate) sequences (for genes)
self.start = 0 # start on contig or gene (ie, parent structure)
self.end = 0 # end on contig or gene (ie, parent structure)
self.parentName = '' # name of parent sequence (eg, contig name or assigned gene name)
self.parentStart = 0 # start position of parent (eg, parent gene) on its parent structure (eg, contig)
self.parentEnd = 0 # end position of parent (eg, parent gene) on its parent structure (eg, contig)
self.parentStrand = '' # strand of parent sequence (eg, strand that gene was called on)
self.strand = '' # strand, if gene (or protein, referring to parent gene)
self.nrHeader = "" # "combined" header from NR database sequence entry
self.nrGInumber = "" # NCBI gi identifier
self.geneCallFile = "unknown" # name of file containing gene calls
self.geneCaller = "unknown" # name of gene caller used to predict gene
self.geneCallRank = 0 # priority label of gene call: lower number is more reliable (ie, 0 is best)
self.nextFastaNumber = 0 # for assigning sequential, benign header (sequentialHeader)
self.order = 0 # order in multi-fasta list (i.e., order in which object was added by code)
self.number = 0 # number in list, if input in that manner (e.g., gene call; warning: external data)
self.pVOGassociationList = [] # list of pVOGs associated with this fasta
self.pVOGcount = 0 # for dignostics in constructing pVOG fasta data set
self.contig = "unknown" # name of contig this fasta is associated with
def queryNRsequence(self,gi,nrLocation): # Specific to NR; any other database has different format
# Given an NCBI gi identifier and the dir/file of an NR database, pull the sequence from NR database
if gi != "" and int(gi) > 0:
giString = "gi\|" + gi + "\|"
else:
print("phate_fastaSequence says, WARNING: problem with gi")
return(0)
for record in SeqIO.parse(nrLocation,"fasta"):
match = re.findall(giString,record.id)
if match:
self.assignHeader(record.id)
self.assignSequence(str(record.seq))
def enterGeneData(self,geneData): #*** should create a gene class, which "inherits" fasta
if isinstance(geneData,dict): #*** should pass **kvargs and check for keys
if "header" in list(geneData.keys()):
self.assignHeader(geneData["header"])
if "name" in list(geneData.keys()):
self.name = geneData["name"]
if "sequence" in list(geneData.keys()):
self.sequence = geneData["sequence"]
if "type" in list(geneData.keys()):
self.sequenceType = geneData["type"]
if "parentSequence" in list(geneData.keys()):
self.parentSequence = geneData["parentSequence"]
if self.parentSequence != '':
self.parentSequenceLength = len(self.parentSequence)
else:
self.parentSequenceLength = -99 #***
print("phate_fastaSequence says, WARNING: sequence not entered for parent")
if "parentName" in list(geneData.keys()):
self.parentName = geneData["parentName"]
if "parentStart" in list(geneData.keys()):
self.parentStart = geneData["parentStart"]
if "parentEnd" in list(geneData.keys()):
self.parentEnd = geneData["parentEnd"]
if "order" in list(geneData.keys()):
self.order = geneData["order"]
self.moleculeType = 'gene'
return True
else:
return False
def assignType(self,mtype):
if mtype.lower() == "nt" or mtype.lower() == "nucl" or mtype.lower() == "nucleotide":
self.sequenceType = mtype.lower()
elif mtype.lower() == "aa" or mtype.lower() == "amino-acid" or mtype.lower() == "protein" or mtype.lower() == "peptide":
self.sequenceType = mtype.lower()
else:
self.sequenceType = "unknown"
def assignHeader(self,hdr): # Remove symbols and spaces, which may cause problems for open-source codes
cleanHeader = hdr.lstrip('>') # Remove '>' symbol if present; store header text only
self.header = cleanHeader # Store full, original header, but without the '>'
splitSpace = cleanHeader.split(' ') # Note: Blast truncates after the 1st space
self.blastHeader = splitSpace[0]
cleanHeader = re.sub(' ', '_', cleanHeader)
cleanHeader = re.sub('[-();:?\.\[\]]','',cleanHeader)
self.cleanHeader = cleanHeader
self.truncHeader = self.header[0:self.truncation]
# Assign a benign, sequential header
self.sequentialHeader = self.moleculeType + '-' + str(self.order)
match = p_up2space.match(self.header)
if match:
self.shortHeader = match.group()
else:
self.shortHeader = self.truncHeader
self.compoundHeader = self.header
if self.parentName:
self.compoundHeader = self.compoundHeader + '_' + self.parentName
def assignCompoundHeader(self,hdr,parent):
# Creates a compound header; user should self.assignHeader first, then input self.cleanHeader as hdr
self.compoundHeader = parent + '_' + hdr
def assignCustomHeader(self,customHdr):
self.customHeader = customHdr
def assignContig(self,contigName):
self.contig = contigName
def assignSequence(self,seq): # Input is single string or a list of strings
if isinstance(seq,str):
self.sequence = seq.lower()
self.consolidate() # Remove white spaces & numbers, if present
return True
elif isinstance(seq,list):
self.sequence = ''.join(seq.lower())
self.consolidate() # Remove white space and collapse
return True
else:
seqType = type(seq)
return False
self.sequenceLength = len(self.sequence)
def removeEMBOSSpostfix(self): # Remove the pesky "_1" that EMBOSS adds
self.assignHeader(self.header.rstrip("_1 "))
def removeTerminalAsterisk(self):
self.sequence = self.sequence.rstrip("* ")
def getFullHeader(self):
return ('>' + self.header) # Add '>' symbol
def getCleanHeader(self):
return ('>' + self.cleanHeader)
def getTruncHeader(self):
return ('>' + self.truncHeader)
def getShortHeader(self): #
return ('>' + self.shortHeader)
def getCompoundHeader(self): # parentSequence (e.g., contig name) is appended to header
return ('>' + self.compoundHeader)
def getBlastHeader(self):
return ('>' + self.blastHeader)
def getSequentialHeader(self):
return ('>' + self.sequentialHeader)
def getCustomHeader(self):
return ('>' + self.customHeader)
def getHeader(self,hdrType):
headerType = hdrType.lower()
if headerType == 'full':
return ('>' + self.header)
elif headerType == 'clean':
return ('>' + self.cleanHeader)
elif headerType == 'trunc':
return ('>' + self.truncHeader)
elif headerType == 'short':
return ('>' + self.shortHeader)
elif headerType == 'compound':
return ('>' + self.compoundHeader)
elif headerType == 'blast':
return ('>' + self.blastHeader)
elif headerType == 'sequential':
return ('>' + self.sequentialHeader)
elif headerType == 'custom':
return ('>' + self.customHeader)
else:
if PHATE_WARNINGS == 'True':
print("phate_fastaSequence says, WARNING: Invalid header type:", hdrType, "--Choose full, clean, trunc, short, compound, blast")
def getStartCodon(self):
if self.sequence != "":
return self.sequence[0:3]
else:
return False
def verifyProkaryoticStartCodon(self):
if self.sequence != "":
testCodon = self.sequence[0:3].lower()
if testCodon == "atg":
return "common"
elif testCodon == "gtg" or testCodon == "ttg":
return "alternate"
elif testCodon == "att" or testCodon == "ctg":
return "rare"
else:
return "incorrect"
def highlightAllStartCodons(self):
codonStarts = []
seqList = list(self.sequence)
codonsHighlighted = ""
if self.sequence != "":
codonStarts = [m.start() for m in re.finditer('atg',self.sequence)]
self.startCodonCount = len(codonStarts)
for start in codonStarts:
seqList[start] = seqList[start].upper()
seqList[start+1] = seqList[start+1].upper()
seqList[start+2] = seqList[start+2].upper()
codonsHighlighted = ''.join(seqList)
self.codonStartLocs = codonStarts
return codonsHighlighted
def consolidate(self): # Remove white space and collapse sequence
self.sequence = p_extra.sub('',self.sequence) #
def getSequenceLength(self):
return (len(self.sequence)) # Report how long the sequence is
def getSubsequence(self,start,end): # Recall: string position numbering starts with 0!
return (self.sequence[int(start):int(end)])
def getPVOGassociationList(self):
return (self.pVOGassociationList)
def reverseComplement(self):
if self.sequenceType.lower() == "nt":
complements = string.maketrans('acgtrymkbdhvACGTRYMKBDHV','tgcayrkmvhdbTGCAYRKMVHDB')
revCompl = self.sequence.translate(complements)[::-1]
self.sequence = revCompl
return True
return False
def addAnnotation(self,newAnnot):
self.annotationList.append(newAnnot)
def printFasta(self):
hdr = self.getFullHeader()
seq = self.sequence
print(hdr)
print(seq)
def printFasta2file(self,FILE_HANDLE,headerType="short"):
if headerType.lower() == "compound":
hdr = self.getCompoundHeader()
elif headerType.lower() == "full":
hdr = self.getFullHeader()
elif headerType.lower() == "truncated":
hdr = self.getTruncHeader()
elif headerType.lower() == "short":
hdr = self.getShortHeader()
elif headerType.lower() == "blast":
hdr = self.getBlastHeader()
elif headerType.lower() == "sequential":
hdr = self.getSequentialHeader()
elif headerType.lower() == "custom":
hdr = self.getCustomHeader()
else:
hdr = self.getShortHeader()
seq = self.sequence
FILE_HANDLE.write("%s%s" % (hdr,"\n"))
FILE_HANDLE.write("%s%s" % (self.sequence,"\n"))
def printFasta2file_case(self,FILE_HANDLE,case,headerType="short"):
if headerType.lower() == "compound":
hdr = self.getCompoundHeader()
elif headerType.lower() == "full":
hdr = self.getFullHeader()
elif headerType.lower() == "truncated":
hdr = self.getTruncHeader()
elif headerType.lower() == "short":
hdr = self.getShortHeader()
elif headerType.lower() == "sequential":
hdr = self.getSequentialHeader()
else:
hdr = self.getShortHeader()
seq = self.sequence
if case.lower() == "upper":
seq = seq.upper()
FILE_HANDLE.write("%s%s" % (hdr,"\n"))
FILE_HANDLE.write("%s%s" % (seq,"\n"))
def printAll(self): # Dump everything: useful for testing
print("Header: ", self.header)
print("CleanHeader: ", self.cleanHeader)
print("TruncHeader: ", self.truncHeader)
print("ShortHeader: ", self.shortHeader)
print("CompoundHeader: ", self.compoundHeader)
print("BlastHeader: ", self.blastHeader)
print("SequentialHeader: ", self.sequentialHeader)
print("Name: ", self.name)
print("Type: ", self.sequenceType)
print("ParentName: ", self.parentName)
print("ParentSequence: ", self.parentSequence)
print("ParentSequenceLength: ", self.parentSequenceLength)
print("ParentStart: ", self.parentStart)
print("ParentEnd: ", self.parentEnd)
print("Order in multi-fasta list:", self.order)
print("Truncation: ", self.truncation)
print("Start codon count: ", self.startCodonCount)
print("Codon start locations:", end=' ')
if self.codonStartLocs:
for location in self.codonStartLocs:
print(" ", location, end=' ')
print('\n')
else:
print("none")
print("Sequence length is:", self.getSequenceLength())
count = 0
if self.annotationList:
count += 1
self.printAnnotations()
else:
print("There are no annotations")
count = 0
if self.paralogList:
for paralog in paralogList:
count += 1
print("Paralog No.", count, ":", paralog)
else:
print("Paralog detection not yet in service") #***
print("Sequence:", self.sequence)
def printAll_tab(self):
tabLine = 'Header:' + self.header + '\tName:' + self.name + '\tType:' + self.sequenceType + '\tOrder:' + str(self.order)
print(tabLine)
if self.annotationList:
self.printAnnotations_tab()
else:
print("There are no annotations")
if len(self.sequence) < 1000:
print(self.sequence)
else:
print("Sequence too long to print. See file.")
def printAll2file_tab(self,FILE_HANDLE):
tabLine = 'Header:' + self.header + '\tName:' + self.name + '\tType:' + self.sequenceType + '\tOrder:' + str(self.order) + '\tparent:' + str(self.start) + '/' + str(self.end) + '/' + str(self.strand) + '/' + self.parentName + '\tlength: ' + str(len(self.sequence))
FILE_HANDLE.write("%s\n" % (tabLine))
if self.annotationList:
self.printAnnotations2file_tab(FILE_HANDLE)
else:
FILE_HANDLE.write("%s\n" % ("There are no annotations"))
if len(self.sequence) < 1000:
FILE_HANDLE.write("%s\n" % (self.sequence))
else:
FILE_HANDLE.write("%s\n" % ("Sequence too long to print. See file."))
def printData2file_GFF(self,FILE_HANDLE,feature,contigName):
# Note: pragmas are printed by calling method (ex: phate_genomeSequence/printGenomeData2file_GFF
GFF_annotationString = ''
GFF_type = "unknown"
FIRST = True
# Construct data fields
GFF_parentName = self.parentName # column 1
GFF_source = GFF_SOURCE # column 2
if self.moleculeType == 'peptide' or self.moleculeType == 'protein' or self.sequenceType == 'aa' or feature == 'CDS':
GFF_type = "CDS" # column 3
GFF_start = str(self.parentStart) # column 4
GFF_end = str(self.parentEnd) # column 5
elif self.moleculeType == 'gene' or self.sequenceType == 'nt' or feature == 'gene':
GFF_type = "gene" # column 3
GFF_start = str(self.start) # column 4
GFF_end = str(self.end) # column 5
GFF_score = GFF_SCORE # column 6
GFF_strand = self.strand # column 7
GFF_phase = GFF_PHASE # column 8
# Last one is complicated...
# Column 9 has many sub-fields, starting with sequence identifier and parent
if self.moleculeType == 'peptide' or self.moleculeType == 'protein' or self.sequenceType == 'aa':
GFF_identifier = "ID=" + self.header + "_cds"
elif self.moleculeType == 'gene' or self.sequenceType == 'nt':
GFF_identifier = "ID=" + self.header
# Write 1st 8 columns of data to file
FILE_HANDLE.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t" % (contigName,GFF_source,GFF_type,GFF_start,GFF_end,GFF_score,GFF_strand,GFF_phase))
# Write identifier to column 9
FILE_HANDLE.write("%s%s" % (GFF_identifier, ';'))
# Column 9 has many sub-fields, continuing with the annotation homologies
count = 1
if len(self.annotationList) > 0:
for annotation in self.annotationList:
annotNo = "annot" + str(count) + '='
if FIRST:
FILE_HANDLE.write("%s" % (annotNo))
annotation.returnGFFannotationRecord(FILE_HANDLE)
FIRST = False
else:
FILE_HANDLE.write("%s%s" % ('; ',annotNo))
annotation.returnGFFannotationRecord(FILE_HANDLE)
count += 1
FILE_HANDLE.write("\n" % ())
#*** Fill out this method as printAll() above
def printAll2file(self,FILE_HANDLE): # Dump everything: useful for testing
count = 0
FILE_HANDLE.write("%s%s%s" % ("Header:",self.header,"\n"))
FILE_HANDLE.write("%s%s%s" % ("ShortHeader:",self.shortHeader,"\n"))
FILE_HANDLE.write("%s%s%s" % ("TruncHeader:",self.truncHeader,"\n"))
FILE_HANDLE.write("%s%s%s" % ("BlastHeader:",self.blastHeader,"\n"))
FILE_HANDLE.write("%s%s%s" % ("SequentialHeader:",self.sequentialHeader,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Type:",self.sequenceType,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Order in list:",self.order,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Sequence length is:",self.getSequenceLength(),"\n"))
if (self.annotationList):
count += 1
FILE_HANDLE.write("%s%s%s" % ("Annotation Set No.",count,":\n"))
self.printAnnotations2file(FILE_HANDLE)
FILE_HANDLE.write("%s%s%s" % ("Sequence:",self.sequence,"\n"))
def splitToList(self,lineLength): # Returns a list of sequence lines
nextLine = ""
sequenceList = []
numberList = list(range(0,len(self.sequence),lineLength))
for number in numberList:
nextSegment = self.sequence[int(number):int(number+lineLength)]
sequenceList.append(nextSegment)
return(sequenceList)
def getAnnotationList(self):
return self.annotationList
def printAnnotations(self): # Verbose outout
count = 0
for annot in self.annotationList:
count += 1
print("Annotation item", count)
print('Source\tMethod\tType\t')
annot.printAnnotationRecord()
def printAnnotations_tab(self): # Streamlined output
if self.annotationList != []:
FIRST = True
for annot in self.annotationList:
if FIRST:
annot.printAnnotationRecord_tabHeader()
FIRST = False
annot.printAnnotationRecord_tab()
def printAnnotations2file_tab(self,FILE_HANDLE): # Streamlined output
if self.annotationList != []:
FIRST = True
for annot in self.annotationList:
if FIRST:
annot.printAnnotationRecord2file_tabHeader(FILE_HANDLE)
FIRST = False
annot.printAnnotationRecord2file_tab(FILE_HANDLE)
def printAnnotations2file(self,FILE_HANDLE):
count = 0
for annot in self.annotationList:
count += 1
FILE_HANDLE.write("%s%s\n" % ("Annotation item ",count))
annot.printAnnotationRecord2file(FILE_HANDLE)
#####################################################################################
class multiFasta(object):
# Class multiFasta is essentially a list of fasta objects.
# Usage: Draft or finished genome; set of genes or proteins
# The class keeps track of the order in which the fasta objects occur in the list.
# The order is needed so that it can be re-ordered based on, for example,
# ...shifting the start position on the genome.
def __init__(self):
self.fastaList = [] # list of fasta objects
self.annotationList = []
self.filename = 'unknown'
self.moleculeType = 'unknown'
self.contig = 'unknown' # redundant (use parentName)
self.parentName = '' # contig name for gene or protein set; genome name for contig set
def findStringInHeader(self,searchString):
found = False
for fasta in self.fastaList:
match_string2header = re.search(searchString,fasta.header)
if match_string2header:
if DEBUG:
print("Found the header:", fasta.header, "for string:", searchString)
return(fasta)
if PHATE_WARNINGS == 'True':
print("phate_fastaSequence says, WARNING: Fasta not found for", searchString)
return(0)
def reportStats(self):
stats = []
print("Sequence from file name:", self.filename)
stats.append("Sequence from file name:" + self.filename)
print("Number of fasta sequences:", len(self.fastaList))
stats.append("Number of fasta sequence:" + str(len(self.fastaList)))
print("Number of annotations:", len(self.annotationList))
stats.append("Number of annotations:" + str(len(self.annotationList)))
print("Annotations:", self.annotationList)
print("No. of fasta sequences with paralogs:", self.countParalogs())
stats.append("No. of fasta sequence with paralogs: " + str(self.countParalogs()))
return stats
def countParalogs(self): # count no. of fastas that have paralogs (not total paralog hits)
count = 0
for fasta in self.fastaList:
if fasta.paralogList:
count += 1
return count
def assignContig(self,contigName):
self.contig = contigName
def assignContig2all(self,contigName):
for fa in self.fastaList:
fa.assignContig(contigName)
def assignParent(self,parentName):
self.parentName = parentName
def assignCompoundHeaders(self,prependString):
for fa in self.fastaList:
fa.assignCompoundHeader(prependString)
def assignMoleculeType(self,molType):
for fasta in self.fastaList:
fasta.moleculeType = molType
def addFasta(self,newFa):
newFa.order = len(self.fastaList) + 1
newFa.moleculeType = self.moleculeType
self.fastaList.append(newFa)
def addFastas(self,lines,mtype): # Given multi-fasta file read into line set, create multi-fasta object
# This method is to be called in order to fill fasta lists (e.g., geneList)
sequence = ""
numberAdded = 0
if lines:
header = lines[0] # capture 1st header (should be first line in lineSet!)
lines.pop(0)
for line in lines:
match = re.search(p_header, line) # detect start of a new fasta
if match:
newFasta = fasta() # create new object
newFasta.moleculeType = self.moleculeType # elements of the list inherit moleculeType (eg, 'gene') from parent (the list)
numberAdded += 1 # no. of fasta objects added so far from lines
newFasta.order = numberAdded
newFasta.assignHeader(header) #
newFasta.assignSequence(sequence)
newFasta.sequenceLength = len(sequence)
newFasta.assignType(mtype)
self.addFasta(newFasta)
sequence = "" # reset
header = line # capture next header
continue
sequence += line
newFasta = fasta()
newFasta.moleculeType = self.moleculeType
numberAdded += 1 # no. of fasta objects added so far from lines
newFasta.order = numberAdded
newFasta.assignHeader(header)
newFasta.assignSequence(sequence)
newFasta.assignType(mtype)
self.addFasta(newFasta)
numberAdded += 1
return numberAdded
def addFastasFromFile(self,mtype):
if self.filename == "unknown" or self.filename == '':
if PHATE_WARNINGS == 'True':
print("phate_fastaSequence says, ERROR: First you must set the filename in addFastasFromFile()")
else:
fastaFile = open(self.filename,"r")
fLines = fastaFile.read().splitlines()
self.addFastas(fLines,mtype)
def addAnnotation(self,newAnnot):
self.annotationList.append(newAnnot)
def deleteFasta(self,oldFasta):
for fa in self.fastaList:
if fa == oldFasta:
self.fastaList.remove(fa)
return True
return False
def printMultiFasta(self):
for fa in self.fastaList:
fa.printFasta()
def printMultiFasta2file(self,FILE_HANDLE):
for fa in self.fastaList:
fa.printFasta2file(FILE_HANDLE)
def printMultiFasta2file_case(self,FILE_HANDLE,case):
for fa in self.fastaList:
fa.printFasta2file_case(FILE_HANDLE,case)
def printMultiFasta2file_custom(self,FILE_HANDLE):
for fa in self.fastaList:
if fa.customHeader: # If the custom header is not empty string, then ok to print
fa.printFasta2file(FILE_HANDLE,"custom")
def printAll(self):
count = 0
print("Number of fastas:", len(self.fastaList))
for fa in self.fastaList:
count += 1
print("*****List item no.", count, ":")
fa.printAll()
print("\n")
def printAll2file(self,FILE_HANDLE):
count = 0
FILE_HANDLE.write("%s%s%s" % ("Number of fastas:",len(self.fastaList),"\n"))
for fa in self.fastaList:
count += 1
FILE_HANDLE.write("%s%s%s" % ("*****List item no.",count,":\n"))
fa.printAll2file(FILE_HANDLE)
FILE_HANDLE.write("%s" % ("\n"))
def renumber(self): # If any fasta object was deleted, then renumber to close gaps in ordering
newOrder = 0 # Caution: this will re-order fasta objects in sequence!
for fa in self.fastaList:
newOrder += 1
fa.order = newOrder
def matchHeader(self,hdr):
for fa in self.fastaList:
if fa.header == hdr:
return fa
return False
def removeEMBOSSpostfix(self): # remove pesky '_1' that EMBOSS adds to translated sequence
for prot in self.fastaList:
prot.removeEMBOSSpostfix()
def removeTerminalAsterisk(self): # remove '*' that EMBOSS adds to end of protein translation
for prot in self.fastaList:
prot.removeTerminalAsterisk()
| 2 | 2 |
dataset/acquisition/convert_annotated_video_directory.py | MannyKayy/PlayableVideoGeneration | 1 | 12788389 | <reponame>MannyKayy/PlayableVideoGeneration
import subprocess
import glob
import os
import pandas as pd
import cv2
import shutil
import multiprocessing as mp
from pathlib import Path
from PIL import Image
from dataset.video import Video
video_extension = "mp4"
frames_extension = "png"
root_directory = "tmp"
output_directory = "tmp/tennis_ours"
annotations_filename = "dataset/acquisition/tennis_annotations/annotations.csv"
frameskip = 0
#frameskip = 4
processes = 8
target_size = [256, 96]
def acquire_sequence(video_capture: cv2.VideoCapture, capture_index: int, sequence_data, output_path: str):
'''
Acquires the video sequence specified by sequence data from the video_capture_stream and saves it to output_path
:param video_capture: video capture object representing the current input video
:param capture_index: index of the next frame that will be read from the video capture
:param sequence_data: (original_filename, begin_frame, end_frame, box top, box left, box bottom, box right) specifying
the sequence to acquire
:param output_path: path at which to save the captured sequence
:return: next index that will be read from the video_capture object
'''
if not video_capture.isOpened():
raise Exception("VideoCapture object is not open")
_, begin_frame, end_frame, top, left, bottom, right = sequence_data
if capture_index > begin_frame:
raise Exception(f"The current capture position {capture_index} succeeds the beginning of the sequence to acquire {begin_frame}\n"
f"Ensure that sequences in the same video are ordered by indexes and not overlapping")
# Seeks to the beginning of the sequence
while capture_index < begin_frame:
_, _ = video_capture.read()
capture_index += 1
assert(capture_index == begin_frame)
images = []
while capture_index <= end_frame:
read_correctly, frame = video_capture.read()
# Checks end of video
if not read_correctly:
break
capture_index += 1
# Converts frame to rgb and creates PIL image
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
current_image = Image.fromarray(frame).crop((left, top, right, bottom)).resize(target_size, Image.BICUBIC)
images.append(current_image)
# Skip the specified number of frames between frames to acquire
skipped_frames = 0
while skipped_frames < frameskip and capture_index <= end_frame:
read_correctly, _ = video_capture.read()
# Checks end of video
if not read_correctly:
break
skipped_frames += 1
capture_index += 1
frames_count = len(images)
actions = [None] * frames_count
rewards = [None] * frames_count
dones = [None] * frames_count
metadata = [None] * frames_count
# Saves the acquired video in the dataset format
acquired_video = Video()
acquired_video.add_content(images, actions, rewards, metadata, dones)
acquired_video.save(output_path, frames_extension)
return capture_index
def acquire_video(args):
annotations, begin_idx = args
annotations = annotations.sort_values("begin_frame")
opened_video_filename = os.path.join(root_directory, annotations.iloc[0]["original_filename"])
video_capture = cv2.VideoCapture(opened_video_filename)
capture_index = 0
# Elaborates all the sequences
for sequence_idx in range(len(annotations)):
# If the current sequence is from a new video, open it
current_video_filename = os.path.join(root_directory, annotations.iloc[sequence_idx]["original_filename"])
if current_video_filename != opened_video_filename:
video_capture = cv2.VideoCapture(current_video_filename)
capture_index = 0
opened_video_filename = current_video_filename
print(f"- Acquiring sequence {sequence_idx} in '{current_video_filename}'")
sequence_data = tuple(annotations.iloc[sequence_idx].to_numpy()[1:]) # Discards the index
output_path = os.path.join(output_directory, f"{sequence_idx + begin_idx:05d}")
capture_index = acquire_sequence(video_capture, capture_index, sequence_data, output_path)
if __name__ == "__main__":
# Reads the video annotations
annotations = pd.read_csv(annotations_filename)
# Creates the output directory
Path(output_directory).mkdir(parents=True, exist_ok=True)
dataframes = annotations.groupby('original_filename')
dataframes = [current_element[1] for current_element in dataframes] # Extracts the dataframe objects
work_items = []
begin_index = 0
for dataframe in dataframes:
work_items.append((dataframe, begin_index))
begin_index += len(dataframe)
pool = mp.Pool(processes)
pool.map(acquire_video, work_items)
pool.close()
| 2.75 | 3 |
android_appium_profiler/apps/amaze.py | qiangxu1996/android-appium-profiler | 0 | 12788390 | import time
import logging
import os
import adb
from .. import app_test
from ..dummy_driver import TouchAction
logger = logging.getLogger(__name__)
IMG_TO_MV = 'IMG_1555.jpg'
ANDROID_PIC_DIR = '/sdcard/Pictures'
ANDROID_DL_DIR = '/sdcard/Download'
class App(app_test.AppTest):
def __init__(self, **kwargs):
extra_cap = kwargs.setdefault('extra_cap', {})
extra_cap.setdefault('noReset', False)
super().__init__('com.amaze.filemanager.debug',
'com.amaze.filemanager.activities.MainActivity',
**kwargs)
self.grant_permissions(['WRITE_EXTERNAL_STORAGE'])
self.res_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'res', 'amaze')
def _tap_properties(self, el):
rect = el.rect
x = rect['x'] + 800
y = rect['y'] + rect['height']
TouchAction(self.driver).tap(x=x, y=y).perform()
def actions(self):
for img in os.listdir(self.res_dir):
adb.push(os.path.join(self.res_dir, img), ANDROID_PIC_DIR, True)
adb.shell(['mv', ANDROID_PIC_DIR + '/' + IMG_TO_MV, ANDROID_DL_DIR])
# time.sleep(30) # cov
self.may_start_profiler()
time.sleep(1)
self.find_element_by_name('Download').click()
time.sleep(2)
img_to_mv = self.find_element_by_name(IMG_TO_MV)
self._tap_properties(img_to_mv)
time.sleep(1)
self.find_element_by_name('Cut').click()
time.sleep(1)
self.back()
time.sleep(2)
self.swipe()
self.find_element_by_name('Pictures').click()
time.sleep(2)
self.swipe('down')
time.sleep(2)
self.find_element_by_res_id('paste').click()
time.sleep(2)
img_to_del = self.find_element_by_res_id('firstline')
self._tap_properties(img_to_del)
time.sleep(1)
self.find_element_by_name('Delete').click()
time.sleep(1)
self.find_element_by_res_id('md_buttonDefaultPositive').click()
time.sleep(2)
self.back()
time.sleep(2)
self.may_stop_profiler()
| 2.171875 | 2 |
attic/concurrency/flags/getthreadpool.py | matteoshen/example-code | 5,651 | 12788391 | <reponame>matteoshen/example-code
from concurrent import futures
import sys
import requests
import countryflags as cf
import time
from getsequential import fetch
DEFAULT_NUM_THREADS = 100
GLOBAL_TIMEOUT = 300 # seconds
times = {}
def main(source, num_threads):
pool = futures.ThreadPoolExecutor(num_threads)
pending = {}
t0 = time.time()
# submit all jobs
for iso_cc in sorted(cf.cc2name):
print('get:', iso_cc)
times[iso_cc] = [time.time() - t0]
job = pool.submit(fetch, iso_cc, source)
pending[job] = iso_cc
to_download = len(pending)
downloaded = 0
# get results as jobs are done
for job in futures.as_completed(pending, timeout=GLOBAL_TIMEOUT):
try:
octets, file_name = job.result()
times[pending[job]].append(time.time() - t0)
downloaded += 1
print('\t--> {}: {:5d} bytes'.format(file_name, octets))
except Exception as exc:
print('\t***', pending[job], 'generated an exception:', exc)
ratio = downloaded / to_download
print('{} of {} downloaded ({:.1%})'.format(downloaded, to_download, ratio))
for iso_cc in sorted(times):
start, end = times[iso_cc]
print('{}\t{:.6g}\t{:.6g}'.format(iso_cc, start, end))
if __name__ == '__main__':
import argparse
source_names = ', '.join(sorted(cf.SOURCE_URLS))
parser = argparse.ArgumentParser(description='Download flag images.')
parser.add_argument('source', help='one of: ' + source_names)
parser.add_argument('-t', '--threads', type=int, default=DEFAULT_NUM_THREADS,
help='number of threads (default: %s)' % DEFAULT_NUM_THREADS)
args = parser.parse_args()
main(args.source, args.threads)
"""
From CIA, 1 thread:
real 2m0.832s
user 0m4.685s
sys 0m0.366s
"""
| 2.875 | 3 |
FATERUI/common/camera/mindvision/CameraMindVision.py | LynnChan706/Fater | 4 | 12788392 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_CameraMindVision', [dirname(__file__)])
except ImportError:
from . import _CameraMindVision
return _CameraMindVision
if fp is not None:
try:
_mod = imp.load_module('_CameraMindVision', fp, pathname, description)
finally:
fp.close()
return _mod
_CameraMindVision = swig_import_helper()
del swig_import_helper
else:
from . import _CameraMindVision
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _CameraMindVision.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _CameraMindVision.SwigPyIterator_value(self)
def incr(self, n=1): return _CameraMindVision.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _CameraMindVision.SwigPyIterator_decr(self, n)
def distance(self, *args): return _CameraMindVision.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _CameraMindVision.SwigPyIterator_equal(self, *args)
def copy(self): return _CameraMindVision.SwigPyIterator_copy(self)
def __next__(self): return _CameraMindVision.SwigPyIterator_next(self)
def __next__(self): return _CameraMindVision.SwigPyIterator___next__(self)
def previous(self): return _CameraMindVision.SwigPyIterator_previous(self)
def advance(self, *args): return _CameraMindVision.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _CameraMindVision.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _CameraMindVision.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _CameraMindVision.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _CameraMindVision.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _CameraMindVision.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _CameraMindVision.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _CameraMindVision.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
CAPTURE_RETRY_TIME = _CameraMindVision.CAPTURE_RETRY_TIME
class CameraMindVision(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CameraMindVision, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CameraMindVision, name)
__repr__ = _swig_repr
def __init__(self, _mode=0, _single_mode=True, _grabTimeout=5000, _strobe_enable=True, _trigger_delay=0.018,
_packetSize=9000, _interPacketDelay=1000, _intp_method=7, _debug=False,
_is_hardware_trigger=False):
this = _CameraMindVision.new_CameraMindVision(_mode, _single_mode, _grabTimeout, _strobe_enable, _trigger_delay, _packetSize, _interPacketDelay,
_intp_method, _debug, _is_hardware_trigger)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _CameraMindVision.delete_CameraMindVision
__del__ = lambda self : None;
def init_SDK(self): return _CameraMindVision.CameraMindVision_init_SDK(self)
def open(self): return _CameraMindVision.CameraMindVision_open(self)
def capture_one_frame(self): return _CameraMindVision.CameraMindVision_capture_one_frame(self)
def is_connected(self): return _CameraMindVision.CameraMindVision_is_connected(self)
def release_camera(self): return _CameraMindVision.CameraMindVision_release_camera(self)
def reboot_camera(self): return _CameraMindVision.CameraMindVision_reboot_camera(self)
def get_frame_count(self): return _CameraMindVision.CameraMindVision_get_frame_count(self)
def get_error_frame_count(self): return _CameraMindVision.CameraMindVision_get_error_frame_count(self)
def get_frame_rate(self): return _CameraMindVision.CameraMindVision_get_frame_rate(self)
def set_wb(self, *args): return _CameraMindVision.CameraMindVision_set_wb(self, *args)
def set_wb_red(self, *args): return _CameraMindVision.CameraMindVision_set_wb_red(self, *args)
def set_wb_green(self, *args): return _CameraMindVision.CameraMindVision_set_wb_green(self, *args)
def set_wb_blue(self, *args): return _CameraMindVision.CameraMindVision_set_wb_blue(self, *args)
def set_shutter(self, *args): return _CameraMindVision.CameraMindVision_set_shutter(self, *args)
def get_shutter(self): return _CameraMindVision.CameraMindVision_get_shutter(self)
def get_white_balance_red(self): return _CameraMindVision.CameraMindVision_get_white_balance_red(self)
def get_white_balance_green(self): return _CameraMindVision.CameraMindVision_get_white_balance_green(self)
def get_white_balance_blue(self): return _CameraMindVision.CameraMindVision_get_white_balance_blue(self)
def get_firmware_version(self): return _CameraMindVision.CameraMindVision_get_firmware_version(self)
def get_camera_id(self): return _CameraMindVision.CameraMindVision_get_camera_id(self)
def get_camera_temperature(self): return _CameraMindVision.CameraMindVision_get_camera_temperature(self)
def get_frame_w_h(self, *args): return _CameraMindVision.CameraMindVision_get_frame_w_h(self, *args)
def save_parmeter(self): return _CameraMindVision.CameraMindVision_save_parmeter(self)
def load_parmeter(self): return _CameraMindVision.CameraMindVision_load_parmeter(self)
def get_image_in_numpy(self): return _CameraMindVision.CameraMindVision_get_image_in_numpy(self)
CameraMindVision_swigregister = _CameraMindVision.CameraMindVision_swigregister
CameraMindVision_swigregister(CameraMindVision)
# This file is compatible with both classic and new-style classes.
| 1.992188 | 2 |
backend/app/views.py | Yscorexm/ARGo | 0 | 12788393 | <gh_stars>0
from django.shortcuts import render
from django.db import connection
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
import json
import os, time
from django.conf import settings
from django.core.files.storage import FileSystemStorage
@csrf_exempt
def postnoteplace(request):
if request.method != 'POST':
return HttpResponse(status=400)
# loading form-encoded data
message = request.POST.get("message")
lat = request.POST.get("lat")
lng = request.POST.get("lng")
x = request.POST.get("x")
y = request.POST.get("y")
z = request.POST.get("z")
orientation = request.POST.get("orientation")
if request.FILES.get("image"):
content = request.FILES['image']
filename = username+str(time.time())+".jpeg"
fs = FileSystemStorage()
filename = fs.save(filename, content)
imageurl = fs.url(filename)
else:
imageurl = None
cursor = connection.cursor()
cursor.execute('INSERT INTO notes (message, lat, lng, x, y, z, orientation, imageUri, type) VALUES'
'(%s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING ID;', (message, lat, lng, x, y, z, orientation, imageurl, 'gps'))
ID = cursor.fetchone()[0]
return JsonResponse({"ID":ID})
@csrf_exempt
def getnote(request):
if request.method != 'GET':
return HttpResponse(status=400)
uid = request.GET['ID']
cursor = connection.cursor()
cursor.execute(f"SELECT * FROM notes WHERE ID = '{uid}';")
rows = cursor.fetchone()
response = {}
response['notes'] = rows
return JsonResponse(response)
@csrf_exempt
def postnoteimage(request):
if request.method != 'POST':
return HttpResponse(status=400)
# loading form-encoded data
message = request.POST.get("message")
if request.FILES.get("image"):
content = request.FILES['image']
filename = "argo"+str(time.time())+".png"
fs = FileSystemStorage()
filename = fs.save(filename, content)
imageurl = fs.url(filename)
else:
imageurl = None
cursor = connection.cursor()
cursor.execute('INSERT INTO notes (message, imageUri, type) VALUES'
'(%s, %s, %s) RETURNING ID;', (message, imageurl, 'item'))
ID = cursor.fetchone()[0]
return JsonResponse({"ID":ID})
| 2.0625 | 2 |
spinta/backends/mongo/commands/migrate.py | atviriduomenys/spinta | 2 | 12788394 | <filename>spinta/backends/mongo/commands/migrate.py
from spinta import commands
from spinta.components import Context
from spinta.manifests.components import Manifest
from spinta.backends.mongo.components import Mongo
@commands.migrate.register(Context, Manifest, Mongo)
def migrate(context: Context, manifest: Manifest, backend: Mongo):
pass
| 1.609375 | 2 |
vstb_launch.py | gabrik/vstb_launcher | 0 | 12788395 | #!/usr/bin/env python3
import libvirt
##
## vstb_launch.py
## EU INPUT
##
## Created by <NAME> on 28/10/2017
## Copyright (c) 2017 <NAME>. All rights reserved.
##
class VSTB(object):
'''
This class define, start, and then destroy and undefine the vSTB VMs
'''
def __init__(self, base_path, domains):
self.base_path = base_path
self.conn = libvirt.open("qemu:///system")
self.domains = domains
def define_domans(self):
'''
This methods load the proper xml file for each domain and then define the domain
'''
for d in self.domains:
path = str("%s/%s/%s.xml" % (self.base_path, d, d))
vm_xml = self.read_file(path)
self.conn.defineXML(vm_xml)
def launch_domains(self):
'''
This method start each domain
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.create()
def stop_domains(self):
'''
This method stop each domain (stop means that the vm is destroyed)
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.destroy()
def undefine_domains(self):
'''
This method undefine each domain
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.undefine()
def read_file(self, file_path):
'''
This method read a file from the filesystem
'''
data = ""
with open(file_path, 'r') as data_file:
data = data_file.read()
return data
if __name__ == '__main__':
print("########################################")
print("###### vSTB VM Launcher ######")
print("########################################")
images_path = "/home/ubuntu/Scrivania/images"
components = ['es','ea','cp','pa','dms','dmc','vdi']
vstb = VSTB(images_path, components)
print(">>>> Defining Domains... <<<<")
vstb.define_domans()
print(">>>> [ DONE ] Defining Domains <<<<")
print(">>>> Starting Domains... <<<<")
vstb.launch_domains()
print(">>>> [ DONE ] Starting Domains <<<<")
print("########################################")
print("##### vSTB Running #####")
print("########################################")
input("<<<< Press enter to stop the vSTB >>>>")
print(">>>> Stopping Domains... <<<<")
vstb.stop_domains()
print(">>>> [ DONE ] Stopping Domains <<<<")
print(">>>> Undefining Domains... <<<<")
vstb.undefine_domains()
print(">>>> [ DONE ] Undefining Domains <<<<")
print("########################################")
print("##### vSTB Stopped #####")
print("########################################")
print(">>>> Bye <<<<")
| 2.453125 | 2 |
copulae/copula/summary.py | CrisDS81/copulae | 100 | 12788396 | <reponame>CrisDS81/copulae<filename>copulae/copula/summary.py
from abc import ABC, abstractmethod
from typing import Any, Dict
import numpy as np
import pandas as pd
class SummaryType(ABC):
def as_html(self):
return self._repr_html_()
@abstractmethod
def _repr_html_(self) -> str:
...
def __repr__(self):
return self.__str__()
@abstractmethod
def __str__(self) -> str:
...
class Summary(SummaryType):
"""A general summary to describe the copula instance"""
def __init__(self, copula, params: Dict[str, Any]):
self.copula = copula
self.params = params
def _repr_html_(self):
params = []
for k, v in self.params.items():
if isinstance(v, (int, float, complex, str)):
params.append(f"<strong>{k}</strong><span>{v}</span>")
if isinstance(v, np.ndarray) and v.ndim == 2: # correlation matrix
params.append(f"<strong>{k}</strong>" + pd.DataFrame(v).to_html(header=False, index=False))
param_content = '' if len(params) == 0 else f"""
<div>
<h3>Parameters</h3>
{'<br/>'.join(params)}
</div>
"""
return f"""
<div>
<h2>{self.copula.name} Copula Summary</h2>
<div>{self.copula.name} Copula with {self.copula.dim} dimensions</div>
<hr/>
{param_content}
</div>
"""
def __str__(self):
msg = [
f"{self.copula.name} Copula Summary",
"=" * 80,
f"{self.copula.name} Copula with {self.copula.dim} dimensions",
"\n",
]
if len(self.params) > 0:
msg.extend(["Parameters", "-" * 80])
for k, v in self.params.items():
if isinstance(v, (int, float, complex, str)):
msg.extend([f"{k:^20}: {v}", ''])
if isinstance(v, np.ndarray) and v.ndim == 2: # correlation matrix
msg.extend([f"{k:^20}", pd.DataFrame(v).to_string(header=False, index=False), ''])
return '\n'.join(msg)
| 3.125 | 3 |
deep_qa/testing/test_case.py | richarajpal/deep_qa | 459 | 12788397 | # pylint: disable=invalid-name,protected-access
from copy import deepcopy
from unittest import TestCase
import codecs
import gzip
import logging
import os
import shutil
from keras import backend as K
import numpy
from numpy.testing import assert_allclose
from deep_qa.common.checks import log_keras_version_info
from deep_qa.data.instances.instance import TextInstance
from deep_qa.data.tokenizers import tokenizers
from deep_qa.common.params import Params
class DeepQaTestCase(TestCase): # pylint: disable=too-many-public-methods
TEST_DIR = './TMP_TEST/'
TRAIN_FILE = TEST_DIR + 'train_file'
VALIDATION_FILE = TEST_DIR + 'validation_file'
TEST_FILE = TEST_DIR + 'test_file'
TRAIN_BACKGROUND = TEST_DIR + 'train_background'
VALIDATION_BACKGROUND = TEST_DIR + 'validation_background'
SNLI_FILE = TEST_DIR + 'snli_file'
PRETRAINED_VECTORS_FILE = TEST_DIR + 'pretrained_glove_vectors_file'
PRETRAINED_VECTORS_GZIP = TEST_DIR + 'pretrained_glove_vectors_file.gz'
def setUp(self):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.DEBUG)
log_keras_version_info()
os.makedirs(self.TEST_DIR, exist_ok=True)
def tearDown(self):
shutil.rmtree(self.TEST_DIR)
TextInstance.tokenizer = tokenizers["words"](Params({}))
K.clear_session()
def get_model_params(self, additional_arguments=None):
params = Params({})
params['save_models'] = False
params['model_serialization_prefix'] = self.TEST_DIR
params['train_files'] = [self.TRAIN_FILE]
params['validation_files'] = [self.VALIDATION_FILE]
params['embeddings'] = {'words': {'dimension': 6}, 'characters': {'dimension': 2}}
params['encoder'] = {"default": {'type': 'bow'}}
params['num_epochs'] = 1
params['validation_split'] = 0.0
if additional_arguments:
for key, value in additional_arguments.items():
params[key] = deepcopy(value)
return params
def get_model(self, model_class, additional_arguments=None):
params = self.get_model_params(additional_arguments)
return model_class(params)
def ensure_model_trains_and_loads(self, model_class, args: Params):
args['save_models'] = True
# Our loading tests work better if you're not using data generators. Unless you
# specifically request it in your test, we'll avoid using them here, and if you _do_ use
# them, we'll skip some of the stuff below that isn't compatible.
args.setdefault('data_generator', None)
model = self.get_model(model_class, args)
model.train()
# load the model that we serialized
loaded_model = self.get_model(model_class, args)
loaded_model.load_model()
# verify that original model and the loaded model predict the same outputs
if model._uses_data_generators():
# We shuffle the data in the data generator. Instead of making that logic more
# complicated, we'll just pass on the loading tests here. See comment above.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(model.validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
# We should get the same result if we index the data from the original model and the loaded
# model.
_, indexed_validation_arrays = loaded_model.load_data_arrays(model.validation_files)
if model._uses_data_generators():
# As above, we'll just pass on this.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(indexed_validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
return model, loaded_model
@staticmethod
def one_hot(index, length):
vector = numpy.zeros(length)
vector[index] = 1
return vector
def write_snli_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ttext 1\thypothesis1\tentails\n')
train_file.write('2\ttext 2\thypothesis2\tcontradicts\n')
train_file.write('3\ttext3\thypothesis3\tentails\n')
train_file.write('4\ttext 4\thypothesis4\tneutral\n')
train_file.write('5\ttext5\thypothesis 5\tentails\n')
train_file.write('6\ttext6\thypothesis6\tcontradicts\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\ttext 1 with extra words\thypothesis1\tentails\n')
validation_file.write('2\ttext 2\tlonger hypothesis 2\tcontradicts\n')
validation_file.write('3\ttext3\thypothesis withreallylongfakeword\tentails\n')
def write_sequence_tagging_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('cats###N\tare###V\tanimals###N\t.###N\n')
train_file.write('dogs###N\tare###V\tanimals###N\t.###N\n')
train_file.write('snakes###N\tare###V\tanimals###N\t.###N\n')
train_file.write('birds###N\tare###V\tanimals###N\t.###N\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('horses###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('blue###N\tcows###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('monkeys###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('caterpillars###N\tare###V\tanimals###N\t.###N\n')
def write_verb_semantics_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
train_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
train_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
validation_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
validation_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
def write_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq1a1\t0\n')
validation_file.write('2\tq1a2\t1\n')
validation_file.write('3\tq1a3\t0\n')
validation_file.write('4\tq1a4\t0\n')
validation_file.write('5\tq2a1\t0\n')
validation_file.write('6\tq2a2\t0\n')
validation_file.write('7\tq2a3\t1\n')
validation_file.write('8\tq2a4\t0\n')
validation_file.write('9\tq3a1\t0\n')
validation_file.write('10\tq3a2\t0\n')
validation_file.write('11\tq3a3\t0\n')
validation_file.write('12\tq3a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence1\t0\n')
train_file.write('2\tsentence2 word2 word3\t1\n')
train_file.write('3\tsentence3 word2\t0\n')
train_file.write('4\tsentence4\t1\n')
train_file.write('5\tsentence5\t0\n')
train_file.write('6\tsentence6\t0\n')
with codecs.open(self.TEST_FILE, 'w', 'utf-8') as test_file:
test_file.write('1\ttestsentence1\t0\n')
test_file.write('2\ttestsentence2 word2 word3\t1\n')
test_file.write('3\ttestsentence3 word2\t0\n')
test_file.write('4\ttestsentence4\t1\n')
test_file.write('5\ttestsentence5 word4\t0\n')
test_file.write('6\ttestsentence6\t0\n')
def write_additional_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq4a1\t0\n')
validation_file.write('2\tq4a2\t1\n')
validation_file.write('3\tq4a3\t0\n')
validation_file.write('4\tq4a4\t0\n')
validation_file.write('5\tq5a1\t0\n')
validation_file.write('6\tq5a2\t0\n')
validation_file.write('7\tq5a3\t1\n')
validation_file.write('8\tq5a4\t0\n')
validation_file.write('9\tq6a1\t0\n')
validation_file.write('10\tq6a2\t0\n')
validation_file.write('11\tq6a3\t0\n')
validation_file.write('12\tq6a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence7\t0\n')
train_file.write('2\tsentence8 word4 word5\t1\n')
train_file.write('3\tsentence9 word4\t0\n')
train_file.write('4\tsentence10\t1\n')
train_file.write('5\tsentence11 word3 word2\t0\n')
train_file.write('6\tsentence12\t0\n')
def write_question_answer_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion1\tanswer1###answer2\t0\n')
with codecs.open(self.VALIDATION_BACKGROUND, 'w', 'utf-8') as validation_background:
validation_background.write('1\tvb1\tvb2\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ta b e i d\tanswer 1###answer2\t0\n')
train_file.write('2\ta b c d\tanswer3###answer4\t1\n')
train_file.write('3\te d w f d s a b\tanswer5###answer6###answer9\t2\n')
train_file.write('4\te fj k w q\tanswer7###answer8\t0\n')
with codecs.open(self.TRAIN_BACKGROUND, 'w', 'utf-8') as train_background:
train_background.write('1\tsb1\tsb2\n')
train_background.write('2\tsb3\n')
train_background.write('3\tsb4\n')
train_background.write('4\tsb5\tsb6\n')
def write_who_did_what_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tHe went to the store to buy goods, because he wanted to.'
'\tHe bought xxxxx\tgoods###store\t0\n')
validation_file.write('1\tShe hiking on the weekend with her friend.'
'\tShe went xxxxx\thiking###friend###weekend###her friend\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tFred hit the ball with the bat.\tHe hit the ball with the xxxxx\tbat###ball\t0\n')
train_file.write('1\tShe walked the dog today.\tThe xxxxx was walked today.\tShe###dog###today\t1\n')
train_file.write('1\tHe kept typing at his desk.\tHe typed at his xxxxx\tdesk###kept\t0\n')
train_file.write('1\tThe pup at the bone but not the biscuit.\tThe pup ate the xxxxx\t'
'bone###biscuit\t0\n')
def write_tuple_inference_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tss<>v f d<>oo o<>c$$$s<>v ff<>o i###ss r<>v<>o e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tss<>v<>oo o<>c$$$s e<>ff<>o ii i###ss r<>rr<>o e<>o ee\t'
'ss<>ve gg<>o sd<>ccs\t0\n')
train_file.write('2\tsg g<>vg<>oo o<>c$$$s e<>v ff<>o ii i###ss<>v rr<>o e<>o ee'
'###hh kk<>hdj d<>hh\tss ss<>ve gg<>o sd<>ccs\t2\n')
train_file.write('3\ts r<>v f d<>o ss<>c$$$s e<>v ff<>o ss i$$$r<>v ss<>s o e<>o ee\t'
'ss ss<>v g<>o sd<>ccs\t0\n')
train_file.write('4\tty y<>cf fv ss<>s ss<>c$$$rt e<>vv f<>oss i i###ss<>v<>os e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t1\n')
def write_span_prediction_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion 1 with extra words\t'
'passage with answer and a reallylongword\t13,18\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tquestion 1\tpassage1 with answer1\t14,20\n')
train_file.write('2\tquestion 2\tpassage2 with answer2\t0,8\n')
train_file.write('3\tquestion 3\tpassage3 with answer3\t9,13\n')
train_file.write('4\tquestion 4\tpassage4 with answer4\t14,20\n')
def write_sentence_selection_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tWhere is Paris?\tParis is the capital of France.###It '
'is by the Seine.###It is quite old###this is a '
'very long sentence meant to test that loading '
'and padding works properly in the model.\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tWho won Super Bowl 50?\tSuper Bowl 50 was in Santa '
'Clara.###The Patriots beat the Broncos.\t1\n')
train_file.write('2\tWhen is Thanksgiving?\tFolk tales tell '
'of the Pilgrims celebrating the holiday.###Many '
'people eat a lot.###It is in November.\t2\n')
train_file.write('3\tWhen were computers invented?\tThe ancient Chinese used '
'abacuses.###Alan Turing cracked Enigma.###It is hard to '
'pinpoint an inventor of the computer.\t2\n')
def write_pretrained_vector_files(self):
# write the file
with codecs.open(self.PRETRAINED_VECTORS_FILE, 'w', 'utf-8') as vector_file:
vector_file.write('word2 0.21 0.57 0.51 0.31\n')
vector_file.write('sentence1 0.81 0.48 0.19 0.47\n')
# compress the file
with open(self.PRETRAINED_VECTORS_FILE, 'rb') as f_in:
with gzip.open(self.PRETRAINED_VECTORS_GZIP, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def write_sentence_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write("This is a sentence for language modelling.\n")
train_file.write("Here's another one for language modelling.\n")
def write_original_snli_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# pylint: disable=line-too-long
train_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
# pylint: disable=line-too-long
validation_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
| 2.046875 | 2 |
week11/helloWorld.py | kmcooper/BMI8540 | 0 | 12788398 | <filename>week11/helloWorld.py
# This program prints out Hello World!
#/usr/bin/python3.7
# Prints out Hello World
# Thats it. Thats the code.
print("Hello world!")
| 2.75 | 3 |
src/larksuiteoapi/api/response/__init__.py | keeperlibofan/oapi-sdk-python | 50 | 12788399 | <reponame>keeperlibofan/oapi-sdk-python<gh_stars>10-100
# -*- coding: UTF-8 -*-
from .response import *
| 0.882813 | 1 |
cpmoptimize/recompiler.py | borzunov/cpmoptimize | 121 | 12788400 | <reponame>borzunov/cpmoptimize<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import byteplay
from matcode import *
class RecompilationError(Exception):
def __init__(self, message, state):
self.message = "Can't optimize loop: %s" % message
if state.lineno is not None:
self.message += ' at line %s' % state.lineno
self.message += ' in %s' % state.settings['function_info']
def __str__(self):
return self.message
class UnpredictableArgsError(Exception):
pass
class RecompilerState(object):
def __init__(self, settings):
self._settings = settings
self.lineno = settings['head_lineno']
self.stack = []
self._content = []
# List of straight references of all used variables
self._vars_storage = []
# List of indexes of really existing variables in
# self._vars_storage (we need to save their values at the end of
# the loop)
self._real_vars_indexes = []
# Map from a straight variable reference to a pair of variable
# index in a unified storage (actually in
# self._vars_storage) and its effective unified
# reference (VAR, index) or (CONST, const_no)
self._vars_map = {}
# Storage for folded instructions sets of constants. This
# instructions will be executed during run-time once. Calculated
# values will be inserted into matrices.
self._consts = []
@property
def settings(self):
return self._settings
@property
def content(self):
return self._content
@property
def consts(self):
return self._consts
@property
def vars_storage(self):
return self._vars_storage
@property
def real_vars_indexes(self):
return self._real_vars_indexes
_real_folded_arr = '__cpm::folded'
@property
def real_folded_arr(self):
return self._real_folded_arr
def add_const(self, straight):
arg_type, arg = straight
if arg_type == FOLD_TOS:
lines = self.stack[-arg - 1]
if lines is None:
raise ValueError(
'Unpredictable value to fold for FOLD_TOS'
)
elif arg_type == FOLD:
lines = arg
else:
raise ValueError((
"Can't add constant from argument with type %s " +
"to matrix code"
) % arg_type)
index = len(self._consts)
self._consts.append(lines)
return CONST, index
def add_var(self, straight, mutation):
# If a variable was changed at least once in a loop's body, we need
# mark it as mutable at the beginning of compilation.
# During the compilation its value can become predictable.
try:
index, unified = self._vars_map[straight]
if mutation and unified[0] != VAR:
unified = VAR, index
self.store_var(straight, unified)
except KeyError:
index = len(self._vars_storage)
self._vars_storage.append(straight)
var_type = straight[0]
if var_type in (NAME, GLOBAL, FAST, DEREF):
self._real_vars_indexes.append(index)
if mutation:
unified = VAR, index
else:
load_oper = VARIABLE_OPERATION_MAP[var_type][0]
unified = self.add_const((FOLD, [
(load_oper, straight[1]),
]))
self._vars_map[straight] = [index, unified]
return unified
def _translate_arg(self, arg):
# Translate argument of types used in matcode generation to
# argument with type VALUE, CONST or VAR (make unified
# reference from straight)
arg_type = arg[0]
if arg_type in (VALUE, CONST, PARAM):
return arg
if arg_type == FOLD_TOS:
return self.add_const(arg)
if arg_type not in VARIABLE_OPERATION_MAP.keys() + [COUNTER, TOS]:
raise ValueError((
"Can't add variable from argument with type %s " +
"to matrix code"
) % arg_type)
if arg_type == TOS:
# If argument type was TOS, translate it to argument with
# type STACK first (make absolute reference from relative)
arg = STACK, len(self.stack) - 1 - arg[1]
return self.add_var(arg, True)
def append(self, *instrs):
for instr in instrs:
oper = instr[0]
args = map(self._translate_arg, instr[1:])
self._content.append([oper] + args)
def load_var(self, straight):
return self._vars_map[straight][1]
def store_var(self, straight, unified):
self._vars_map[straight][1] = unified
def handle_nop(state, instr):
pass
def handle_pop_top(state, instr):
state.stack.pop()
def create_rot(count):
def handle_rot(state, instr):
for index in xrange(-1, count - 1):
if state.stack[-index - 2] is None:
state.append(
[MOV, (TOS, index), (TOS, index + 1)],
)
if state.stack[-1] is None:
state.append(
[MOV, (TOS, count - 1), (TOS, -1)],
)
if state.settings['opt_clear_stack']:
# Stack clearing is busy because program will works
# slower if big values will remains on the stack
state.append(
[MOV, (TOS, -1), (VALUE, 0)],
)
state.stack[-count:] = (
[state.stack[-1]] + state.stack[-count:-1]
)
return handle_rot
def create_dup(count):
def handle_dup(state, instr):
for index in xrange(count):
if state.stack[-count + index] is None:
state.append(
[MOV, (TOS, index - count), (TOS, index)],
)
state.stack += state.stack[-count:]
return handle_dup
def handle_dup_topx(state, instr):
create_dup(instr[1])(state, instr)
def handle_unary_negative(state, instr):
if state.stack[-1] is not None:
state.stack[-1].append(instr)
else:
state.append(
[MOV, (TOS, -1), (TOS, 0)],
[MOV, (TOS, 0), (VALUE, 0)],
[SUB, (TOS, 0), (TOS, -1)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, -1), (VALUE, 0)],
)
def handle_unary_const(state, instr):
if state.stack[-1] is not None:
state.stack[-1].append(instr)
else:
raise UnpredictableArgsError
def handle_binary_multiply(state, instr):
if state.stack[-2] is not None and state.stack[-1] is not None:
state.stack[-2] += state.stack[-1] + [instr]
state.stack.pop()
elif state.stack[-2] is not None:
state.append(
[MUL, (TOS, 0), (FOLD_TOS, 1)],
[MOV, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack[-2] = None
state.stack.pop()
elif state.stack[-1] is not None:
state.append(
[MUL, (TOS, 1), (FOLD_TOS, 0)],
)
state.stack.pop()
else:
raise RecompilationError((
'Multiplication of two unpredictable values is unsupported'
), state)
def handle_binary_add(state, instr):
if state.stack[-2] is not None and state.stack[-1] is not None:
state.stack[-2] += state.stack[-1] + [instr]
state.stack.pop()
elif state.stack[-2] is not None:
state.append(
[ADD, (TOS, 0), (FOLD_TOS, 1)],
[MOV, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack[-2] = None
state.stack.pop()
elif state.stack[-1] is not None:
state.append(
[ADD, (TOS, 1), (FOLD_TOS, 0)],
)
state.stack.pop()
else:
state.append(
[ADD, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack.pop()
def handle_binary_subtract(state, instr):
if state.stack[-2] is not None and state.stack[-1] is not None:
state.stack[-2] += state.stack[-1] + [instr]
state.stack.pop()
elif state.stack[-2] is not None:
state.append(
[SUB, (TOS, 0), (FOLD_TOS, 1)],
[MOV, (TOS, 1), (VALUE, 0)],
[SUB, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack[-2] = None
state.stack.pop()
elif state.stack[-1] is not None:
state.append(
[SUB, (TOS, 1), (FOLD_TOS, 0)],
)
state.stack.pop()
else:
state.append(
[SUB, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack.pop()
def handle_binary_const(state, instr):
if state.stack[-2] is not None and state.stack[-1] is not None:
state.stack[-2] += state.stack[-1] + [instr]
state.stack.pop()
else:
raise UnpredictableArgsError
def handle_load_const(state, instr):
arg = instr[1]
if not isinstance(arg, state.settings['types']):
allowed_types = ', '.join(map(repr, state.settings['types']))
raise RecompilationError((
'Constant %s has an unallowed type %s instead of ' +
'one of allowed types: %s'
) % (repr(arg), type(arg), allowed_types), state)
state.stack.append([instr])
def handle_load_var(state, instr):
oper, name = instr
straight = VARIABLE_TYPE_MAP[oper][0], name
unified = state.load_var(straight)
if unified[0] == CONST:
state.stack.append([
(byteplay.LOAD_FAST, state.real_folded_arr),
(byteplay.LOAD_CONST, unified[1]),
(byteplay.BINARY_SUBSCR, None),
])
else:
state.append(
[MOV, (TOS, -1), straight],
)
state.stack.append(None)
def handle_store_var(state, instr):
oper, name = instr
straight = VARIABLE_TYPE_MAP[oper][0], name
lines = state.stack[-1]
if lines is not None:
if (
len(lines) == 3 and
lines[0] == (byteplay.LOAD_FAST, state.real_folded_arr) and
lines[1][0] == byteplay.LOAD_CONST and
isinstance(lines[1][1], int) and
lines[2] == (byteplay.BINARY_SUBSCR, None)
):
const_ref = CONST, lines[1][1]
else:
const_ref = state.add_const((FOLD_TOS, 0))
state.append(
[MOV, straight, const_ref],
)
state.store_var(straight, const_ref)
else:
state.append(
[MOV, straight, (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.store_var(straight, straight)
state.stack.pop()
LOAD_OPERATIONS, STORE_OPERATIONS = zip(*VARIABLE_OPERATION_MAP.values())
BYTECODE_HANDLERS = [
(handle_nop, [byteplay.NOP]),
(handle_pop_top, [byteplay.POP_TOP]),
(create_rot(2), [byteplay.ROT_TWO]),
(create_rot(3), [byteplay.ROT_THREE]),
(create_rot(4), [byteplay.ROT_FOUR]),
(create_dup(1), [byteplay.DUP_TOP]),
(handle_nop, [byteplay.UNARY_POSITIVE]),
(handle_unary_negative, [byteplay.UNARY_NEGATIVE]),
(handle_unary_const, [
byteplay.UNARY_NOT, byteplay.UNARY_INVERT,
]),
(handle_binary_const, [byteplay.BINARY_POWER]),
(handle_binary_multiply, [byteplay.BINARY_MULTIPLY]),
(handle_binary_const, [
byteplay.BINARY_DIVIDE, byteplay.BINARY_FLOOR_DIVIDE,
byteplay.BINARY_TRUE_DIVIDE, byteplay.BINARY_MODULO,
]),
(handle_binary_add, [byteplay.BINARY_ADD]),
(handle_binary_subtract, [byteplay.BINARY_SUBTRACT]),
(handle_binary_const, [
byteplay.BINARY_LSHIFT, byteplay.BINARY_RSHIFT,
byteplay.BINARY_AND, byteplay.BINARY_XOR, byteplay.BINARY_OR,
]),
(handle_binary_const, [byteplay.INPLACE_POWER]),
(handle_binary_multiply, [byteplay.INPLACE_MULTIPLY]),
(handle_binary_const, [
byteplay.INPLACE_DIVIDE, byteplay.INPLACE_FLOOR_DIVIDE,
byteplay.INPLACE_TRUE_DIVIDE, byteplay.INPLACE_MODULO,
]),
(handle_binary_add, [byteplay.INPLACE_ADD]),
(handle_binary_subtract, [byteplay.INPLACE_SUBTRACT]),
(handle_binary_const, [
byteplay.INPLACE_LSHIFT, byteplay.INPLACE_RSHIFT,
byteplay.INPLACE_AND, byteplay.INPLACE_XOR, byteplay.INPLACE_OR,
]),
(handle_dup_topx, [byteplay.DUP_TOPX]),
(handle_load_const, [byteplay.LOAD_CONST]),
(handle_load_var, LOAD_OPERATIONS),
(handle_store_var, STORE_OPERATIONS),
]
SUPPORTED_OPERATIONS = {}
for handler, opers in BYTECODE_HANDLERS:
for oper in opers:
SUPPORTED_OPERATIONS[oper] = handler
def browse_vars(state, body):
# Browse used in loop's body variables to determine their mutability
for oper, arg in body:
try:
arg_type, mutation = VARIABLE_TYPE_MAP[oper]
state.add_var((arg_type, arg), mutation)
except KeyError:
pass
def browse_counter(state, body):
store_instr = body[0]
oper, name = store_instr
try:
arg_type, mutation = VARIABLE_TYPE_MAP[oper]
if not mutation:
raise KeyError
except KeyError:
raise RecompilationError((
'Unsupported iterator usage in instruction %s' % repr(instr)
), state)
load_instr = VARIABLE_OPERATION_MAP[arg_type][0], name
if state.settings['opt_min_rows']:
status = 'n' # A loop counter was not used
for index in xrange(1, len(body)):
instr = body[index]
if instr == store_instr:
status = 'w' # The counter was changed at least once
break
if instr == load_instr:
status = 'r' # The counter was not changed but was read at least once
else:
status = 'w'
return (arg_type, name), status, body[1:]
def recompile_body(settings, body):
state = RecompilerState(settings)
elem_straight, counter_status, rem_body = browse_counter(
state, body,
)
if counter_status == 'w':
# If real counter is mutable, we need special variable to
# store real counter value
counter_service = COUNTER, None
elif counter_status == 'r':
# If real counter isn't mutable but used, we need to
# maintain its value
counter_service = elem_straight
if counter_status == 'n':
# If real counter isn't used at all, we don't need to
# maintain this variable in the loop, but we need to save
# its final value after the loop
state.manual_store_counter = elem_straight
else:
# We must mark real counter as mutable at the beginning of the
# loop, because first instruction (counter storing) was removed
# from rem_body and system doesn't know that counter is mutable
state.add_var(elem_straight, True)
state.manual_store_counter = None
browse_vars(state, rem_body)
if counter_status != 'n':
state.append(
[MOV, counter_service, (PARAM, 'start')],
)
state.append(
[LOOP, (PARAM, 'iters_count')],
)
if counter_status == 'w':
state.append(
[MOV, elem_straight, (COUNTER, None)],
)
for instr in rem_body:
oper = instr[0]
if oper == byteplay.SetLineno:
state.lineno = instr[1]
continue
try:
SUPPORTED_OPERATIONS[oper](state, instr)
except UnpredictableArgsError:
raise RecompilationError(('All operands of instruction %s must be a constant ' +
'or must have a predictable value') % oper, state)
except IndexError:
raise RecompilationError('Unsupported loop type or invalid stack usage in bytecode', state)
except KeyError:
raise RecompilationError('Unsupported instruction %s' % repr(instr), state)
if counter_status != 'n':
state.append(
[ADD, counter_service, (PARAM, 'step')],
)
state.append(
[END],
)
if counter_status == 'r':
state.append(
[SUB, counter_service, (PARAM, 'step')],
)
return state
| 2.359375 | 2 |
bassist/bassist/scripts/common.py | Doveps/mono | 0 | 12788401 | # Copyright (c) 2015 <NAME>
# See the file LICENSE for copying permission.
# This object is intended to run from script bassist.py
import logging
import logging.config
import argparse
import ConfigParser
from ..parser import host as parser_host
from ..flavor import directory as flavor_directory
class Script(object):
'''This module contains things that are available for use of our packages'
scripts. This includes setting up argparse, logger, etc.'''
def set_logging(self):
'''If we're running from the root of the project, this stuff will work.
If not, load logging_config.'''
try:
logging.config.fileConfig('log.conf', disable_existing_loggers=False)
except ConfigParser.NoSectionError:
# probably no log.conf file
logging.basicConfig(
format='%(message)s',
)
self.logger = logging.getLogger(__name__)
try:
from log_override import LOG_OVERRIDES
logging.config.dictConfig(LOG_OVERRIDES)
except:
self.logger.debug('unable to load log_override; ignoring')
def set_arg_parser(self):
self.arg_parser = argparse.ArgumentParser( description=self.description )
self.required_args = self.arg_parser.add_argument_group('required arguments')
self.required_args.add_argument(
'-f', '--flavor-db',
required=True,
help='The path to the directory containing the flavor ZODB files')
self.required_args.add_argument(
'-n', '--flavor-name',
required=True,
help=self.flavor_arg_description)
self.required_args.add_argument(
'-s', '--scanner-directory',
required=True,
help='The path to the directory containing scanner results')
def read_flavors(self):
self.logger.debug('reading flavors')
self.flavors = flavor_directory.Directory(self.args.flavor_db).db
self.requested_flavor = self.flavors.get_obj_from_name(self.args.flavor_name)
self.logger.debug('retrieved requested flavor %s', self.requested_flavor)
def parse(self):
self.logger.debug('importing parsers')
self.parsed_host = parser_host.Host(self.args.scanner_directory)
self.logger.debug('finished importing parsers')
for parser in self.parsed_host.parsers:
self.logger.debug('parser log: %s', parser.log)
self.logger.debug('parsing: %s', parser.path)
parser.parse()
def finish(self):
self.flavors.close()
| 2.25 | 2 |
train.py | Nico-Adamo/CNNF | 0 | 12788402 | from __future__ import print_function
import os
import logging
import numpy as np
import random
import math
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import shutil
from shutil import copyfile
from datetime import datetime
from tensorboardX import SummaryWriter
from cnnf.model_cifar import WideResNet
from cnnf.model_mnist import CNNF
from utils import *
from advertorch.attacks import GradientSignAttack, LinfPGDAttack
from advertorch.context import ctx_noparamgrad_and_eval
def train_adv(args, model, device, train_loader, optimizer, scheduler, epoch,
cycles, mse_parameter=1.0, clean_parameter=1.0, clean='supclean'):
model.train()
correct = 0
train_loss = 0.0
model.reset()
adversary = LinfPGDAttack(
model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=args.eps,
nb_iter=args.nb_iter, eps_iter=args.eps_iter, rand_init=True, clip_min=-1.0, clip_max=1.0, targeted=False)
print(len(train_loader))
for batch_idx, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
images = images.cuda()
targets = targets.cuda()
model.reset()
with ctx_noparamgrad_and_eval(model):
adv_images = adversary.perturb(images, targets)
images_all = torch.cat((images, adv_images), 0)
# Reset the model latent variables
model.reset()
if (args.dataset == 'cifar10'):
logits, orig_feature_all, block1_all, block2_all, block3_all = model(images_all, first=True, inter=True)
elif (args.dataset == 'fashion'):
logits, orig_feature_all, block1_all, block2_all = model(images_all, first=True, inter=True)
ff_prev = orig_feature_all
# f1 the original feature of clean images
orig_feature, _ = torch.split(orig_feature_all, images.size(0))
block1_clean, _ = torch.split(block1_all, images.size(0))
block2_clean, _ = torch.split(block2_all, images.size(0))
if (args.dataset == 'cifar10'):
block3_clean, _ = torch.split(block3_all, images.size(0))
logits_clean, logits_adv = torch.split(logits, images.size(0))
if not ('no' in clean):
loss = (clean_parameter * F.cross_entropy(logits_clean, targets) + F.cross_entropy(logits_adv, targets)) / (2*(cycles+1))
else:
loss = F.cross_entropy(logits_adv, targets) / (cycles+1)
for i_cycle in range(cycles):
if (args.dataset == 'cifar10'):
recon, block1_recon, block2_recon, block3_recon = model(logits, step='backward', inter_recon=True)
elif (args.dataset == 'fashion'):
recon, block1_recon, block2_recon = model(logits, step='backward', inter_recon=True)
recon_clean, recon_adv = torch.split(recon, images.size(0))
recon_block1_clean, recon_block1_adv = torch.split(block1_recon, images.size(0))
recon_block2_clean, recon_block2_adv = torch.split(block2_recon, images.size(0))
if (args.dataset == 'cifar10'):
recon_block3_clean, recon_block3_adv = torch.split(block3_recon, images.size(0))
loss += (F.mse_loss(recon_adv, orig_feature) + F.mse_loss(recon_block1_adv, block1_clean) + F.mse_loss(recon_block2_adv, block2_clean) + F.mse_loss(recon_block3_adv, block3_clean)) * mse_parameter / (4*cycles)
elif (args.dataset == 'fashion'):
loss += (F.mse_loss(recon_adv, orig_feature) + F.mse_loss(recon_block1_adv, block1_clean) + F.mse_loss(recon_block2_adv, block2_clean)) * mse_parameter / (3*cycles)
# feedforward
ff_current = ff_prev + args.res_parameter * (recon - ff_prev)
logits = model(ff_current, first=False)
ff_prev = ff_current
logits_clean, logits_adv = torch.split(logits, images.size(0))
if not ('no' in clean):
loss += (clean_parameter * F.cross_entropy(logits_clean, targets) + F.cross_entropy(logits_adv, targets)) / (2*(cycles+1))
else:
loss += F.cross_entropy(logits_adv, targets) / (cycles+1)
pred = logits_clean.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(targets.view_as(pred)).sum().item()
loss.backward()
if (args.grad_clip):
nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
scheduler.step()
train_loss += loss
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(images[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_loss /= len(train_loader)
acc = correct / len(train_loader.dataset)
return train_loss, acc
def train(args, model, device, train_loader, optimizer, scheduler, epoch,
cycles, mse_parameter=1.0, clean_parameter=1.0,clean="no"):
model.train()
correct = 0
train_loss = 0.0
model.reset()
for batch_idx, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
images = images.cuda()
targets = targets.cuda()
model.reset()
# Reset the model latent variables
model.reset()
if (args.dataset == 'cifar10'):
logits, orig_feature, block1, block2, block3 = model(images, first=True, inter=True)
elif (args.dataset == 'fashion'):
logits, orig_feature_all, block1, block2 = model(images, first=True, inter=True)
ff_prev = orig_feature
# find the original feature of clean images
loss = F.cross_entropy(logits, targets) / (cycles+1)
for i_cycle in range(cycles):
if (args.dataset == 'cifar10'):
recon, block1_recon, block2_recon, block3_recon = model(logits, step='backward', inter_recon=True)
elif (args.dataset == 'fashion'):
recon, block1_recon, block2_recon = model(logits, step='backward', inter_recon=True)
if (args.dataset == 'cifar10'):
loss += (F.mse_loss(recon, orig_feature) + F.mse_loss(block1_recon, block1) + F.mse_loss(block2_recon, block2) + F.mse_loss(block3_recon, block3)) * mse_parameter / (4*cycles)
elif (args.dataset == 'fashion'):
loss += (F.mse_loss(recon, orig_feature) + F.mse_loss(block1_recon, block1) + F.mse_loss(block2_recon, block2)) * mse_parameter / (3*cycles)
# feedforward
ff_current = ff_prev + args.res_parameter * (recon - ff_prev)
logits = model(ff_current, first=False)
ff_prev = ff_current
loss += F.cross_entropy(logits, targets) / (cycles+1)
pred = logits.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(targets.view_as(pred)).sum().item()
loss.backward()
if (args.grad_clip):
nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
scheduler.step()
train_loss += loss
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(images[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_loss /= len(train_loader)
acc = correct / len(train_loader.dataset)
return train_loss, acc
def test(args, model, device, test_loader, cycles, epoch):
model.eval()
test_loss = 0
correct = 0
noise_loss = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
# Calculate accuracy with the original images
model.reset()
if (args.dataset == 'cifar10'):
output, orig_feature, _, _, _ = model(data, first=True, inter=True)
else:
output, orig_feature, _, _ = model(data, first=True, inter=True)
ff_prev = orig_feature
for i_cycle in range(cycles):
recon = model(output, step='backward')
ff_current = ff_prev + args.res_parameter * (recon - ff_prev)
output = model(ff_current, first=False)
test_loss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss, correct / len(test_loader.dataset)
def test_pgd(args, model, device, test_loader, epsilon=0.063):
model.eval()
model.reset()
adversary = LinfPGDAttack(
model.forward_adv, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=epsilon,
nb_iter=args.nb_iter, eps_iter=args.eps_iter, rand_init=True, clip_min=-1.0, clip_max=1.0, targeted=False)
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
model.reset()
with ctx_noparamgrad_and_eval(model):
adv_images = adversary.perturb(data, target)
output = model.run_cycles(adv_images)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
acc = correct / len(test_loader.dataset)
print('PGD attack Acc {:.3f}'.format(100. * acc))
return acc
def main():
parser = argparse.ArgumentParser(description='CNNF training')
# optimization parameters
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128 for CIFAR, 64 for MNIST)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.15, metavar='LR',
help='learning rate (default: 0.05 for SGD)')
parser.add_argument('--power', type=float, default=0.9, metavar='LR',
help='learning rate for poly scheduling')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--grad-clip', action='store_true', default=False,
help='enable gradient clipping')
parser.add_argument('--dataset', choices=['cifar10', 'fashion'],
default='fashion', help='the dataset for training the model')
parser.add_argument('--schedule', choices=['poly', 'cos', 'stepLR'],
default='poly', help='scheduling for learning rate')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging training status')
# adversarial training parameters
parser.add_argument('--eps', type=float, default=0.063,
help='Perturbation magnitude for adv training')
parser.add_argument('--eps-iter', type=float, default=0.02,
help='attack step size')
parser.add_argument('--nb_iter', type=int, default=7,
help='number of steps in pgd attack')
parser.add_argument('--clean', choices=['no', 'supclean'],
default='no', help='whether to use clean data in adv training')
# hyper-parameters
parser.add_argument('--mse-parameter', type=float, default=1.0,
help='weight of the reconstruction loss')
parser.add_argument('--clean-parameter', type=float, default=1.0,
help='weight of the clean Xentropy loss')
parser.add_argument('--res-parameter', type=float, default=0.1,
help='step size for residuals')
# model parameters
parser.add_argument('--layers', default=40, type=int, help='total number of layers for WRN')
parser.add_argument('--widen-factor', default=2, type=int, help='Widen factor for WRN')
parser.add_argument('--droprate', default=0.0, type=float, help='Dropout probability')
parser.add_argument('--ind', type=int, default=2,
help='index of the intermediate layer to reconstruct to')
parser.add_argument('--max-cycles', type=int, default=2,
help='the maximum cycles that the CNN-F uses')
parser.add_argument('--save-model', default="model", # None
help='Name for Saving the current Model')
parser.add_argument('--model-dir', default="runs", # None
help='Directory for Saving the current Model')
args = parser.parse_args()
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
seed_torch(args.seed)
Tensor_writer = SummaryWriter(os.path.join(args.model_dir, args.save_model))
train_transform_cifar = transforms.Compose(
[transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)])
test_transform_cifar = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)])
transform_mnist = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Load datasets and architecture
if args.dataset == 'fashion':
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('data', train=True, download=True,
transform=transform_mnist),
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('data', train=False, transform=transform_mnist),
batch_size=args.test_batch_size, shuffle=True, drop_last=True)
num_classes = 10
model = CNNF(num_classes, ind=args.ind, cycles=args.max_cycles, res_param=args.res_parameter).to(device)
elif args.dataset == 'cifar10':
train_data = datasets.CIFAR10(
'data', train=True, transform=train_transform_cifar, download=True)
test_data = datasets.CIFAR10(
'data', train=False, transform=test_transform_cifar, download=True)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=args.test_batch_size,
shuffle=True, num_workers=4, pin_memory=True)
num_classes = 10
model = WideResNet(args.layers, 10, args.widen_factor, args.droprate, args.ind, args.max_cycles, args.res_parameter).to(device)
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.wd)
if(args.schedule == 'cos'):
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda step: get_lr(step, args.epochs * len(train_loader), 1.0, 1e-5))
elif(args.schedule == 'stepLR'):
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
else:
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda step: lr_poly(1.0, step, args.epochs * len(train_loader), args.power))
# Begin training
best_acc = 0
for epoch in range(args.epochs):
train_loss, train_acc = train(args, model, device, train_loader, optimizer, scheduler, epoch,
cycles=args.max_cycles, mse_parameter=args.mse_parameter, clean_parameter=args.clean_parameter, clean=args.clean)
test_loss, test_acc = test(args, model, device, test_loader, cycles=args.max_cycles, epoch=epoch)
Tensor_writer.add_scalars('loss', {'train': train_loss}, epoch)
Tensor_writer.add_scalars('acc', {'train': train_acc}, epoch)
Tensor_writer.add_scalars('loss', {'test': test_loss}, epoch)
Tensor_writer.add_scalars('acc', {'test': test_acc}, epoch)
# Save the model with the best accuracy
if test_acc > best_acc and args.save_model is not None:
best_acc = test_acc
experiment_fn = args.save_model
torch.save(model.state_dict(),
args.model_dir + "/{}-best.pt".format(experiment_fn))
if ((epoch+1)%50)==0 and args.save_model is not None:
experiment_fn = args.save_model
torch.save(model.state_dict(),
args.model_dir + "/{}-epoch{}.pt".format(experiment_fn,epoch))
pgd_acc = test_pgd(args, model, device, test_loader, epsilon=args.eps)
Tensor_writer.add_scalars('pgd_acc', {'test': pgd_acc}, epoch)
# Save final model
if args.save_model is not None:
experiment_fn = args.save_model
torch.save(model.state_dict(),
args.model_dir + "/{}.pt".format(experiment_fn))
if __name__ == '__main__':
main()
| 2.140625 | 2 |
marcottievents/etl/exml/__init__.py | soccermetrics/marcotti-events | 22 | 12788403 | <reponame>soccermetrics/marcotti-events
from .base import BaseXML, FeedElement, FeedParser
| 1.015625 | 1 |
scripts/update_blueprint_versions.py | aws-samples/aws-enterprise-jumpstart | 2 | 12788404 | <filename>scripts/update_blueprint_versions.py
import os
import boto3
import yaml
from botocore.config import Config
boto3_config = Config(
retries={
'max_attempts': 10,
'mode': 'standard'
}
)
BLUEPRINTS_KEY = "blueprints"
artifacts_bucket_name = os.getenv("ARTIFACTS_BUCKET_NAME")
artifacts_bucket_prefix = os.getenv("ARTIFACTS_BUCKET_SC_ASSET_PREFIX")
region = os.getenv("AWS_REGION")
artifact_bucket = boto3.resource("s3").Bucket(artifacts_bucket_name)
s3_client = boto3.client("s3", config=boto3_config)
sc_client = boto3.client("servicecatalog", config=boto3_config)
ssm_client = boto3.client('ssm', config=boto3_config)
def __cleanup_versions(_name, _versions, _product_id):
pa_list = sc_client.list_provisioning_artifacts(
ProductId=_product_id
)['ProvisioningArtifactDetails']
for pa in pa_list:
if pa['Name'] != 'DUMMY' and len(list(filter(lambda x: x['name'] == pa['Name'], _versions))) <= 0:
sc_client.delete_provisioning_artifact(
ProductId=_product_id,
ProvisioningArtifactId=pa['Id']
)
with open("metadata.yaml", 'r') as stream:
blueprints = yaml.safe_load(stream)[BLUEPRINTS_KEY]
for name, blueprint in blueprints.items():
product_id = ssm_client.get_parameter(Name=f"/blueprints/{name}/id")['Parameter']['Value']
print(f"#### {name} - {product_id} ####")
__cleanup_versions(name, blueprint['versions'], product_id)
for version in blueprint['versions']:
print(version)
key = "{}/{}/{}.yaml".format(artifacts_bucket_prefix, name, version['name'])
obj = artifact_bucket.Object(key)
try:
obj.get()
except s3_client.exceptions.NoSuchKey as e:
# If version does not already exists upload template and create new provisioning artifact version
print(f"Uploading version {key}")
artifact_bucket.upload_file(f"{BLUEPRINTS_KEY}/{name}.yaml", key)
obj_url = f"https://{artifacts_bucket_name}.s3.{region}.amazonaws.com/{artifacts_bucket_prefix}/{name}/{version['name']}.yaml"
try:
sc_client.describe_provisioning_artifact(
ProductId=product_id,
ProvisioningArtifactName=version['name']
)
err_msg = f"Provisioning Artifact on product {name} ({product_id}) with name {version['name']} already exists"
print(err_msg)
raise Exception(err_msg)
except sc_client.exceptions.ResourceNotFoundException as e:
print(f"Create new provisioning artifact version for template:")
print(obj_url)
sc_client.create_provisioning_artifact(
ProductId=product_id,
Parameters={
'Name': version['name'],
'Description': version['description'],
'Info': {
'LoadTemplateFromURL': obj_url
},
'Type': 'CLOUD_FORMATION_TEMPLATE'
},
)
| 2.03125 | 2 |
bc/utils/misc.py | ikalevatykh/rlbc | 1 | 12788405 | import torch
import random
import socket
import os
import numpy as np
def get_device(device):
assert device in (
'cpu', 'cuda'), 'device {} should be in (cpu, cuda)'.format(device)
if socket.gethostname() == 'gemini' or not torch.cuda.is_available():
device = 'cpu'
else:
device = 'cuda' if device == 'cuda' else "cpu"
return device
def seed_exp(seed, device='cuda'):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == 'cuda':
torch.cuda.manual_seed(seed)
torch.set_num_threads(1)
def update_arguments(model=None, dataset=None, collect=None, sim2real=None):
""" User provides the arguments in a user-friendly way.
This function takes care of converting them to the format used by the repo. """
from bc.settings import MODEL_LOGDIR, DATASET_LOGDIR
def update_model_args(model):
if model is None:
return None
# convert the input_type argument from a string to a tuple
if isinstance(model['input_type'], (tuple, list)):
return
input_type_str2list = {
'rgb': ('rgb', ),
'depth': ('depth', ),
'rgbd': ('depth', 'rgb')
}
assert model['input_type'] in input_type_str2list
model['input_type'] = input_type_str2list[model['input_type']]
# get the full paths using the user-speicified settings
model['model_dir'] = os.path.join(MODEL_LOGDIR, model['name'])
model.pop('name')
return model
def update_dataset_args(dataset):
if dataset is None:
return None
dataset['dataset_dir'] = os.path.join(DATASET_LOGDIR, dataset['name'])
dataset.pop('name')
signal_keys_updated = []
for signal_key in dataset['signal_keys']:
signal_keys_updated.append(('state', signal_key))
dataset['signal_keys'] = signal_keys_updated
return dataset
def update_collect_args(collect):
if collect is None:
return None
collect['collect_dir'] = os.path.join(DATASET_LOGDIR, collect['folder'])
collect.pop('folder')
return collect
def update_sim2real_args(sim2real):
if sim2real is None:
return None
sim2real['mcts_dir'] = os.path.join(MODEL_LOGDIR, sim2real['name'])
sim2real['trainset_dir'] = os.path.join(DATASET_LOGDIR, sim2real['trainset_name'])
sim2real['evalset_dir'] = os.path.join(DATASET_LOGDIR, sim2real['evalset_name'])
sim2real.pop('name')
return sim2real
model = update_model_args(model)
dataset = update_dataset_args(dataset)
collect = update_collect_args(collect)
sim2real = update_sim2real_args(sim2real)
return [args for args in (model, dataset, collect, sim2real) if args is not None]
| 2.34375 | 2 |
tests/test_config.py | artur-shaik/wallabag-client | 16 | 12788406 | import os
import tempfile
import pathlib
import pytest
from wallabag.config import Configs, Options, Sections
from xdg.BaseDirectory import xdg_config_home as XDG_CONFIG_HOME
class TestConfigs():
configs = None
def teardown_method(self, method):
os.close(self.fd)
os.remove(self.path)
def setup_method(self, method):
self.fd, self.path = tempfile.mkstemp()
with open(self.path, 'w') as f:
f.write('')
self.configs = Configs(self.path)
if method.__name__ in ['test_get_config', 'test_is_token_expired',
'test_set_config', 'test_set_config_new']:
self.configs.config.read_string("""
[api]
serverurl = https://server
[token]
expires = 1000
""")
elif method.__name__ == 'test_is_valid__true':
self.configs.config.read_string("""
[api]
serverurl = url
username = user
password = <PASSWORD>
[oauth2]
client = 100
secret = 100
""")
elif method.__name__ == 'test_get_path':
self.configs = Configs()
def test_get_path(self):
xdg_config = os.path.expanduser(XDG_CONFIG_HOME)
expected = f"{xdg_config}/wallabag-cli/config.ini"
actual = self.configs.get_path()
assert expected == str(actual)
def test_get_path_custom(self):
expected = pathlib.PurePath("custom/directory")
assert expected == self.configs.get_path(expected)
@pytest.mark.parametrize(
'condition',
[(Sections.TOKEN, Options.EXPIRES, '1000', 'get'),
(Sections.TOKEN, Options.ACCESS_TOKEN, None, 'get'),
(Sections.API, Options.SERVERURL, "https://server", 'get'),
(Sections.API, '', 0, 'getint'),
(Sections.API, '', None, 'get'),
('', '', None, 'get'),
(None, None, None, 'get'),
(None, None, 0, 'getint'),
(Sections.TOKEN, Options.EXPIRES, 1000, 'getint')])
def test_get_config(self, condition):
if condition[3] == 'get':
assert self.configs.get(condition[0], condition[1]) == condition[2]
elif condition[3] == 'getint':
assert self.configs.getint(
condition[0], condition[1]) == condition[2]
def test_is_token_expired(self):
assert self.configs.is_token_expired()
def test_is_token_expired_no_value(self):
assert self.configs.is_token_expired()
def test_is_valid__false(self):
assert not self.configs.is_valid()
def test_is_valid__true(self):
assert self.configs.is_valid()
def test_set_config(self):
self.configs.set(Sections.TOKEN, Options.EXPIRES, str(500))
assert self.configs.getint(Sections.TOKEN, Options.EXPIRES) == 500
def test_set_config_new(self):
self.configs.set(Sections.TOKEN, Options.ACCESS_TOKEN, 'abba')
assert self.configs.get(Sections.TOKEN, Options.ACCESS_TOKEN) == 'abba'
def test_load_or_create(self, monkeypatch):
self.save_called = False
def exists(path):
return False
def savemock(configs, path):
self.save_called = True
return True
monkeypatch.setattr(os.path, 'exists', exists)
monkeypatch.setattr(Configs, 'save', savemock)
self.configs.load_or_create()
assert self.save_called
def test_load_or_create_load(self, monkeypatch):
self.load_called = False
def exists(path):
return True
def loadmock(configs, path):
self.load_called = True
return True
monkeypatch.setattr(os.path, 'exists', exists)
monkeypatch.setattr(Configs, 'load', loadmock)
self.configs.load_or_create()
assert self.load_called
def test_load_or_create_value_error(self, monkeypatch):
def exists(path):
return False
def savemock(configs, path):
return False
monkeypatch.setattr(os.path, 'exists', exists)
monkeypatch.setattr(Configs, 'save', savemock)
with pytest.raises(ValueError, match=Configs.LOAD_ERROR):
self.configs.load_or_create()
@pytest.mark.parametrize(
'password',
['<PASSWORD>', 'password', '<PASSWORD>'])
def test_encryption(self, password):
self.configs.set(Sections.API, Options.PASSWORD, password)
encrypted = self.configs.config.get(Sections.API, Options.PASSWORD)
plain = self.configs.get(Sections.API, Options.PASSWORD)
assert encrypted != password
assert plain == password
| 2.34375 | 2 |
src/estructura-flask/apiflaskdemo/project/auth/__init__.py | PythonistaMX/py231 | 3 | 12788407 | <filename>src/estructura-flask/apiflaskdemo/project/auth/__init__.py
from functools import wraps
from flask import g, abort
def login_required(view):
@wraps(view)
def wrapped_view(*args, **kwargs):
if g.user is None:
return abort(403)
return view(*args,**kwargs)
return wrapped_view | 1.992188 | 2 |
src/models/DGV0/model_v0.py | ChihabEddine98/DeepGo | 8 | 12788408 | <gh_stars>1-10
# imports
import os
import tensorflow.nn as nn
from tensorflow.keras import Input,Model
from tensorflow.keras.utils import plot_model
from tensorflow.keras import layers, regularizers,activations
from tensorflow.keras.optimizers import SGD,Adam
from utils import configs , DotDict
# end imports
'''
Model Configurations :
'''
config = DotDict({ 'n_filters' : 64,
'kernel' : 3,
'n_res_blocks' : 6,
'l2_reg' : 0.0001,
'dropout' : 0.2
})
'''
-------------------------------------------------------------------------------------------
DGM (DeepGoModel) : in this class we handle all stuff related to the deep neural model
who will represent our GO player all versions with different
architechtures will inheritat this basic methods and added to them
their new specific blocks or methods.
-------------------------------------------------------------------------------------------
'''
class DGM(object):
def __init__(self,version=0,dim=configs.dim,n_moves=configs.n_moves,n_planes=configs.n_planes,
n_filters=config.n_filters,kernel_size=config.kernel,l2_reg=config.l2_reg
,dropout=config.dropout,n_res_blocks=config.n_res_blocks) -> None:
super().__init__()
self.version = version
self.dim = dim
self.n_moves = n_moves
self.n_planes = n_planes
self.n_filters = n_filters
self.kernel = kernel_size
self.l2_reg = regularizers.l2(l2_reg)
self.dropout = dropout
self.n_res_blocks = n_res_blocks
self.model = None
def __str__(self) -> str:
return f'DGMV{self.version}'
def summary(self):
self.model.summary()
def plot_model(self,save_path='models/model_imgs'):
if not self.model:
print(f' You should build the model first !')
return
to_file = os.path.join(os.getcwd(),save_path,f'{str(self)}.png')
plot_model(self.model,to_file=to_file,show_shapes=True)
def build_model(self,n_blocks=config.n_res_blocks):
# Input Block
inp = Input(shape=(self.dim, self.dim, self.n_planes), name='board')
x = self.input_block(inp)
# Body Block
x = self.body_block(x,n_blocks)
# Outputs blocks
policy_head = self.output_policy_block(x)
value_head = self.output_value_block(x)
# Build model
self.model = Model(inputs=inp, outputs=[policy_head, value_head])
self.model.compile(
optimizer = Adam(learning_rate=configs.lr),
loss={'policy': 'categorical_crossentropy', 'value': 'binary_crossentropy'},
loss_weights={'policy' : configs.policy_w, 'value' : configs.value_w},
metrics={'policy': 'categorical_accuracy', 'value': 'mse'})
return self.model
def input_block(self,inp,kernel_resize=3,pad='same'):
# CONV2D + BN + activation
x = layers.Conv2D(self.n_filters, 1, padding=pad)(inp)
x = layers.BatchNormalization()(x)
x = self.activation(x)
if not kernel_resize:
return x
# CONV2D (resize) + BN + activation
x1 = layers.Conv2D(self.n_filters,kernel_resize, padding=pad)(inp)
x1 = layers.BatchNormalization()(x1)
x1 = self.activation(x1)
x = layers.add([x, x1])
return x
def body_block(self,x,n_blocks=config.n_res_blocks):
# Residual Blocks
for _ in range(n_blocks):
x = self.residual_block(x)
return x
def output_policy_block(self,x):
policy_head = layers.Conv2D(1, 1, padding='same', use_bias=False, kernel_regularizer=self.l2_reg)(x)
policy_head = layers.BatchNormalization()(policy_head)
policy_head = self.activation(policy_head)
policy_head = layers.Flatten()(policy_head)
policy_head = layers.Activation('softmax', name='policy')(policy_head)
return policy_head
def output_value_block(self,x):
value_head = layers.GlobalAveragePooling2D()(x)
value_head = layers.Dense(self.n_filters, kernel_regularizer=self.l2_reg)(value_head)
value_head = layers.BatchNormalization()(value_head)
value_head = self.activation(value_head)
value_head = layers.Dropout(self.dropout)(value_head)
value_head = layers.Dense(1, activation='sigmoid', name='value', kernel_regularizer=self.l2_reg)(value_head)
return value_head
def sub_residual_block(self,x1,ratio=4):
x = layers.Dropout(self.dropout)(x1)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(self.n_filters//ratio, activation='relu')(x)
x = layers.Dense(self.n_filters, activation='sigmoid')(x)
return layers.Multiply()([x1, x])
def residual_block(self,x,pad='same'):
x1 = layers.Conv2D(self.n_filters, self.kernel, padding=pad)(x)
x1 = layers.BatchNormalization()(x1)
x1 = self.activation(x1)
x1 = layers.Conv2D(self.n_filters, self.kernel, padding=pad)(x1)
x1 = layers.BatchNormalization()(x1)
x1 = self.sub_residual_block(x1)
x = layers.add([x1, x])
x = self.activation(x)
x = layers.BatchNormalization()(x)
return x
def activation(self,x):
return nn.swish(x)
| 2.25 | 2 |
clickuz/migrations/0001_initial.py | aziz837/ClickUz | 39 | 12788409 | <reponame>aziz837/ClickUz
# Generated by Django 3.0.5 on 2020-04-26 15:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('click_trans_id', models.CharField(max_length=255)),
('merchant_trans_id', models.CharField(max_length=255)),
('amount', models.CharField(max_length=255)),
('action', models.CharField(max_length=255)),
('sign_string', models.CharField(max_length=255)),
('sign_datetime', models.DateTimeField(max_length=255)),
('status', models.CharField(choices=[('processing', 'processing'), ('finished', 'finished'), ('canceled', 'canceled')], default='processing', max_length=25)),
],
),
]
| 1.90625 | 2 |
clinicadl/clinicadl/tools/deep_learning/data.py | 921974496/AD-DL | 1 | 12788410 | import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset, sampler
from scipy.ndimage.filters import gaussian_filter
class MRIDataset(Dataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(self, img_dir, data_file, preprocessing='linear', transform=None):
"""
Args:
img_dir (string): Directory of all the images.
data_file (string): File name of the train/test split file.
preprocessing (string): Defines the path to the data in CAPS
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.img_dir = img_dir
self.transform = transform
self.diagnosis_code = {'CN': 0, 'AD': 1, 'sMCI': 0, 'pMCI': 1, 'MCI': 1, 'unlabeled': -1}
self.data_path = preprocessing
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument datafile is not of correct type.')
if ('diagnosis' not in list(self.df.columns.values)) or ('session_id' not in list(self.df.columns.values)) or \
('participant_id' not in list(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include ['participant_id', 'session_id', 'diagnosis']")
self.size = self[0]['image'].numpy().size
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img_name = self.df.loc[idx, 'participant_id']
img_label = self.df.loc[idx, 'diagnosis']
sess_name = self.df.loc[idx, 'session_id']
# Not in BIDS but in CAPS
if self.data_path == "linear":
image_path = path.join(self.img_dir, 'subjects', img_name, sess_name,
't1', 'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')
elif self.data_path == "mni":
image_path = path.join(self.img_dir, 'subjects', img_name, sess_name,
't1', 'spm', 'segmentation', 'normalized_space',
img_name + '_' + sess_name + '_space-Ixi549Space_T1w.pt')
else:
raise NotImplementedError("The data path %s is not implemented" % self.data_path)
image = torch.load(image_path)
label = self.diagnosis_code[img_label]
if self.transform:
image = self.transform(image)
sample = {'image': image, 'label': label, 'participant_id': img_name, 'session_id': sess_name,
'image_path': image_path}
return sample
def session_restriction(self, session):
"""
Allows to generate a new MRIDataset using some specific sessions only (mostly used for evaluation of test)
:param session: (str) the session wanted. Must be 'all' or 'ses-MXX'
:return: (DataFrame) the dataset with the wanted sessions
"""
from copy import copy
data_output = copy(self)
if session == "all":
return data_output
else:
df_session = self.df[self.df.session_id == session]
df_session.reset_index(drop=True, inplace=True)
data_output.df = df_session
if len(data_output) == 0:
raise Exception("The session %s doesn't exist for any of the subjects in the test data" % session)
return data_output
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
image = sample['image']
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample['image'] = smoothed_image
return sample
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, image):
return (image - image.min()) / (image.max() - image.min())
def load_data(train_val_path, diagnoses_list, split, n_splits=None, baseline=True):
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
if n_splits is None:
train_path = path.join(train_val_path, 'train')
valid_path = path.join(train_val_path, 'validation')
else:
train_path = path.join(train_val_path, 'train_splits-' + str(n_splits),
'split-' + str(split))
valid_path = path.join(train_val_path, 'validation_splits-' + str(n_splits),
'split-' + str(split))
print("Train", train_path)
print("Valid", valid_path)
for diagnosis in diagnoses_list:
if baseline:
train_diagnosis_path = path.join(train_path, diagnosis + '_baseline.tsv')
else:
train_diagnosis_path = path.join(train_path, diagnosis + '.tsv')
valid_diagnosis_path = path.join(valid_path, diagnosis + '_baseline.tsv')
train_diagnosis_df = pd.read_csv(train_diagnosis_path, sep='\t')
valid_diagnosis_df = pd.read_csv(valid_diagnosis_path, sep='\t')
train_df = pd.concat([train_df, train_diagnosis_df])
valid_df = pd.concat([valid_df, valid_diagnosis_df])
train_df.reset_index(inplace=True, drop=True)
valid_df.reset_index(inplace=True, drop=True)
return train_df, valid_df
def load_data_test(test_path, diagnoses_list):
test_df = pd.DataFrame()
for diagnosis in diagnoses_list:
test_diagnosis_path = path.join(test_path, diagnosis + '_baseline.tsv')
test_diagnosis_df = pd.read_csv(test_diagnosis_path, sep='\t')
test_df = pd.concat([test_df, test_diagnosis_df])
test_df.reset_index(inplace=True, drop=True)
return test_df
| 2.84375 | 3 |
11/solution.py | Hegemege/advent-of-code-2021 | 0 | 12788411 | <gh_stars>0
class Octopus:
def __init__(self, energy):
self.flashed = False
self.energy = energy
self.neighbors = []
self.flashes = 0
def flash(self):
if self.flashed:
return
self.flashed = True
self.flashes += 1
for neighbor in self.neighbors:
neighbor.energy += 1
if neighbor.energy > 9:
neighbor.flash()
def part1(input_data):
height = len(input_data)
width = len(input_data[0])
grid = [[Octopus(int(y)) for y in x] for x in input_data]
for j in range(height):
for i in range(width):
cell = grid[j][i]
if j > 0:
cell.neighbors.append(grid[j - 1][i])
if j < height - 1:
cell.neighbors.append(grid[j + 1][i])
if i > 0:
cell.neighbors.append(grid[j][i - 1])
if i < width - 1:
cell.neighbors.append(grid[j][i + 1])
if j > 0 and i > 0:
cell.neighbors.append(grid[j - 1][i - 1])
if j < height - 1 and i > 0:
cell.neighbors.append(grid[j + 1][i - 1])
if j > 0 and i < width - 1:
cell.neighbors.append(grid[j - 1][i + 1])
if j < height - 1 and i < width - 1:
cell.neighbors.append(grid[j + 1][i + 1])
for i in range(100):
# Reset flashed for all octopuses and add 1 energy
for row in grid:
for octopus in row:
octopus.flashed = False
octopus.energy += 1
for row in grid:
for octopus in row:
if octopus.energy > 9:
octopus.flash()
for row in grid:
for octopus in row:
if octopus.flashed:
octopus.energy = 0
return sum([sum([y.flashes for y in x]) for x in grid])
def part2(input_data):
height = len(input_data)
width = len(input_data[0])
grid = [[Octopus(int(y)) for y in x] for x in input_data]
for j in range(height):
for i in range(width):
cell = grid[j][i]
if j > 0:
cell.neighbors.append(grid[j - 1][i])
if j < height - 1:
cell.neighbors.append(grid[j + 1][i])
if i > 0:
cell.neighbors.append(grid[j][i - 1])
if i < width - 1:
cell.neighbors.append(grid[j][i + 1])
if j > 0 and i > 0:
cell.neighbors.append(grid[j - 1][i - 1])
if j < height - 1 and i > 0:
cell.neighbors.append(grid[j + 1][i - 1])
if j > 0 and i < width - 1:
cell.neighbors.append(grid[j - 1][i + 1])
if j < height - 1 and i < width - 1:
cell.neighbors.append(grid[j + 1][i + 1])
i = 0
while True:
i += 1
# Reset flashed for all octopuses and add 1 energy
for row in grid:
for octopus in row:
octopus.flashed = False
octopus.energy += 1
for row in grid:
for octopus in row:
if octopus.energy > 9:
octopus.flash()
all_flashed = True
for row in grid:
for octopus in row:
if octopus.flashed:
octopus.energy = 0
else:
all_flashed = False
if all_flashed:
return i
if __name__ == "__main__":
with open("input", "r") as input_file:
input_data = list(map(lambda x: x.strip(), input_file.readlines()))
print(part1(input_data))
print(part2(input_data))
| 3.03125 | 3 |
delayBlit.py | cshih2003/Game-FSE | 0 | 12788412 | <reponame>cshih2003/Game-FSE
from time import *
from pygame import *
from math import *
from random import *
from tkinter import *
width=1050
height=750
screen=display.set_mode((width,height))
aa=True
RED=(255,0,0)
GREEN=(0,255,0)
BLACK=(0,0,0)
pathCol=(128,128,128,255)
pathCol2=(129,128,124,255)
init()
map1=image.load("FSE-Assets/Maps/map1.jpg")
boomPics=[]
for i in range(28):
boomPics+=[image.load("FSE-Assets/bomb wait/images\\Explode-05_frame_"+str(i)+".gif")]
class towerType:
def __init__(self,name,damage,price,upgrade,uCost):
self.name=name
self.damage=damage
self.price=price
self.upgrade=upgrade
self.uCost=uCost
self.filename="FSE-Assets/Defenses/"+name+".png"
antiTank=towerType('antiTank',80,800,False,300)
bunker=towerType('bunker',100,1000,False,350)
fortress=towerType('fortress',150,1250,False,450)
heavyGun=towerType('heavyGun',200,1500,False,500)
heavyMG=towerType('heavyMG',35,500,False,200)
soldier=towerType('soldier',25,250,False,150)
class enemyType:
def __init__(self,name,speed,health,damage):
self.name=name
self.speed=speed
self.health=health
self.damage=damage
self.filename="FSE-Assets/Enemies/"+name+".png"
infantry=enemyType('infantry',7,200,5)
transport=enemyType('transport',1.7,400,5)
motorcycle=enemyType('motorcycle',2,250,10)
lightTank=enemyType('lightTank',1,700,15)
heavyTank=enemyType('heavyTank',0.7,1000,20)
#fonts
comicSans40=font.SysFont("Comic Sans MS",40)
stencil20=font.SysFont("Stencil",20)
stencil40=font.SysFont("Stencil",40)
def genEnemies(enemy):
global pics
DELAY=2
pics=[]
'''
for i in range(len(enemyList)):
enemyList[i][DELAY]=30
print(enemyList[i][DELAY])
if enemyList[i][DELAY]>0:
enemyList[i][DELAY]-=1
if enemyList[i][DELAY]==0:
enemy.append(enemyList[i])
enemyList.remove(enemyList[i])
'''
for i in enemy:
img=[]
img.append(image.load(i[4].filename))
img.append(transform.rotate(image.load(i[4].filename),-90))
img.append(transform.rotate(image.load(i[4].filename),-270))
img.append(transform.rotate(image.load(i[4].filename),-180))
pics.append(img)
def moveEnemy(screen,enemy):
count=-1
for i in enemy:
if i[0]<220:
i[0]+=i[4].speed
i[3]=0
if i[0]>=220 and i[1]<420:
i[1]+=i[4].speed
i[3]=1
if i[1]>=410:
i[0]+=i[4].speed
i[3]=0
count+=1
screen.blit(pics[count][i[3]],i[:2])
display.flip()
def bombAnimation(screen,bombs):
for bomb in bombs[:]:
screen.blit(boomPics[bomb[3]],bomb[:2])
def advanceBombs(bombs):
global aa
for bomb in bombs:
bomb[2]+=1
if bomb[2]>5 and bomb[2]%5==0:
bomb[3]+=1
if bomb[3]==27:
bombs.remove(bomb)
aa=True
print("delete")
def baseHealth(enemy):
global aa
blackHeart=image.load("FSE-Assets/blackHeart.png")
blackHeart=transform.scale(blackHeart,(25,25))
screen.blit(blackHeart,(940,350))
bars=100
count=0
draw.rect(screen,BLACK,(944,374,102,12),0)
for i in enemy:
if i[0]>=900 and aa:
bombs.append([870,350,0,0])
aa=False
bars-=i[4].damage
advanceBombs(bombs)
if bars<=0:
bars=0
baseHealth=stencil20.render(str(bars),True,BLACK)
screen.blit(baseHealth,(965,353))
draw.rect(screen,RED,(1044,375,bars-100,10),0)
draw.rect(screen,GREEN,(945,375,bars,10),0)
if bars==0:
draw.rect(screen,RED,(945,375,100,10),0)
endScreen=Surface((width,height),SRCALPHA)
endScreen.fill((220,220,220,127))
screen.blit(endScreen,(0,0))
youLost=stencil40.render("GAME OVER",True,BLACK)
screen.blit(youLost,(400,350))
def healthBars(enemy):
for i in enemy:
draw.rect(screen,BLACK,(i[0]+14,i[1]-11,52,9),0)
draw.rect(screen,GREEN,(i[0]+15,i[1]-10,50,7),0)
def drawScene(screen):
screen.blit(map1,(0,0))
if len(bombs)>0:
bombAnimation(screen,bombs)
#[x,y,DELAY,FRAME,className]
#enemy=[]
enemy=[[-100,190,0,0,infantry],[-1000,190,0,0,infantry]]
bombs=[]
myclock=time.Clock()
running=True
while running:
for evt in event.get():
if evt.type==QUIT:
running=False
genEnemies(enemy)
moveEnemy(screen,enemy)
drawScene(screen)
baseHealth(enemy)
healthBars(enemy)
myclock.tick(60)
quit()
| 2.75 | 3 |
simple_qubitization.py | balopat/qsvt_experiments | 2 | 12788413 | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
from typing import List, Callable, Tuple
import cirq
import numpy as np
import pyqsp.phases
import scipy.linalg
from plot_qsp import qsp_plot
from qsp import to_r_z_from_wx
@dataclasses.dataclass
class FixedPointAmplitudeAmplification:
"""Amplitude amplification inputs.
Based on the inputs for an amplitude amplification problem, it
creates the fixed point amplitude amplification circuit and after the
proper projection (depending on whether the number of
coefficients is even or odd) it returns the amplitude.
On a real quantum computer, we'll need to provide all of u, u_inv, a0, b0,
rotA0=e^(i * phi * (2|a0><a0|-I)) and rotB0=e^(i * phi * (2|b0><b0|-I))
for the algorithm as black boxes, but in this simulation we can just
calculate them from u, a0, b0. Finally, coeffs determine the polynomial
we'd like to convert <a0|u|b0> with.
Attributes:
u: the unitary to amplify
a0: the goal state
b0: the starting state
coeffs: the coefficients in QSP(R, <0|.|0>) convention
"""
u: cirq.Gate
a0: cirq.STATE_VECTOR_LIKE
b0: cirq.STATE_VECTOR_LIKE
coeffs: List[float]
u_inv: cirq.Gate = None
rot_a0: Callable[[float], cirq.Gate] = None
rot_b0: Callable[[float], cirq.Gate] = None
num_qubits: int = 2
_amplitude_projector: Callable[[np.ndarray], np.complex] = None
def __post_init__(self):
self.u_inv = cirq.inverse(self.u)
self.rot_a0 = self._rot_state("a0", self.a0)
self.rot_b0 = self._rot_state("b0", self.b0)
self.num_qubits = cirq.num_qubits(self.u)
self._amplitude_projector = lambda uni: (
self.a0 @ uni @ self.b0
if len(self.coeffs) % 2 == 1
else self.b0 @ uni @ self.b0
)
def _rot_state(
self, name: str, state_vector: np.ndarray
) -> Callable[[float], cirq.Gate]:
"""Rotates the state around a given state."""
return lambda phi: cirq.MatrixGate(
name=f"{name}[{phi:.2f}]",
matrix=scipy.linalg.expm(
1j * phi * (2 * np.outer(state_vector, state_vector) - np.identity(4))
),
)
def get_circuit(self) -> cirq.Circuit:
qs = cirq.LineQubit.range(self.num_qubits)
# reverse operation order for circuits
# we mandatorily start with U, as this is the U|B0> in Eq (13)
if len(self.coeffs) == 0:
return cirq.Circuit(self.u(*qs))
ops = []
i = 1
for phi in self.coeffs[::-1]:
if i % 2 == 1:
ops += [self.u(*qs)]
ops += [self.rot_a0(phi)(*qs)]
else:
ops += [self.u_inv(*qs)]
ops += [self.rot_b0(phi)(*qs)]
i += 1
return cirq.Circuit(ops)
def run(self) -> float:
return self._amplitude_projector(cirq.unitary(self.get_circuit()))
def __str__(self):
return f"""FixedPointAmplification:
num qubits: {self.num_qubits},
u: {self.u},
a0: {self.a0},
b0: {self.b0},
{self.get_circuit()}"""
class Experiment:
def __init__(
self,
coeffs: List[float],
n_points: int,
basis_a: int = 2,
basis_b: int = 3,
n_qubits: int = 2,
):
self.coeffs = coeffs
self.basis_a = basis_a
self.basis_b = basis_b
self.n_points = n_points
self.a_s = []
self.fa_s = []
self.a0 = cirq.to_valid_state_vector(basis_a, n_qubits)
self.b0 = cirq.to_valid_state_vector(basis_b, n_qubits)
def _get_u_gate_and_initial_amplitude(
self, p: float, sign: int
) -> Tuple[float, cirq.Gate]:
"""Creates a CNOT-like unitary with a real amplitude."""
u = sign * scipy.linalg.expm(1j * p * cirq.unitary(cirq.CX))
a = u[self.basis_a][self.basis_b]
new_a = a * sign * np.conj(a) / np.abs(a)
return new_a, cirq.MatrixGate(
name="u", matrix=sign * np.conj(a) / np.abs(a) * u
)
def _run_half(self, sign: int):
for p in np.linspace(1e-8, np.pi, self.n_points):
a, u = self._get_u_gate_and_initial_amplitude(p, sign)
fp_amp = self._get_fpamp(u)
self.a_s.append(a)
self.fa_s.append(fp_amp.run())
def _get_fpamp(self, u):
return FixedPointAmplitudeAmplification(u, self.a0, self.b0, self.coeffs)
def run(self) -> Tuple[List[float], List[float]]:
_, sample_fpamp = self._get_u_gate_and_initial_amplitude(0.123, -1)
print(self._get_fpamp(sample_fpamp))
self._run_half(-1)
self._run_half(1)
return self.a_s, self.fa_s
def experiment(
coeffs,
npoints=50,
title=None,
filename="fp_amp.png",
target_fn=None,
target_fn_label: str = None,
):
"""The main function to qsp the two cases presented in the paper."""
title = f"Fixed amplitude amplification for {title}"
a_s, f_as = Experiment(coeffs, npoints).run()
qsp_plot(np.real(a_s), f_as, filename, target_fn, target_fn_label, title)
if __name__ == "__main__":
experiment(
title="$T_1$",
coeffs=to_r_z_from_wx([0, 0]),
npoints=10,
filename="fp_amp_t1.png",
target_fn=lambda a_s: a_s,
target_fn_label="$T_1(a)=a$",
)
experiment(
title="$T_2$",
coeffs=to_r_z_from_wx([0, 0, 0]),
npoints=100,
filename="fp_amp_t2.png",
target_fn=lambda a_s: 2 * a_s ** 2 - 1,
target_fn_label="$T_2(a)=2a^2-1$",
)
experiment(
title="$T_3$",
coeffs=to_r_z_from_wx([0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t3.png",
target_fn=lambda a_s: 4 * a_s ** 3 - 3 * a_s,
target_fn_label="$T_3(a)=4 a^3-3 a$",
)
experiment(
title="$T_4$",
coeffs=to_r_z_from_wx([0, 0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t4.png",
target_fn=lambda a_s: 8 * a_s ** 4 - 8 * a_s ** 2 + 1,
target_fn_label="$T_4(a)=8 a^4-8 a^2 +1$",
)
experiment(
title="$T_5$",
coeffs=to_r_z_from_wx([0, 0, 0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t5.png",
target_fn=lambda a_s: 16 * a_s ** 5 - 20 * a_s ** 3 + 5 * a_s,
target_fn_label="$T_5(a)=16 a^5-20 a^3 + 5 a$",
)
# these are the same as in the Martyn et al paper
wx_phis = pyqsp.phases.FPSearch().generate(10, 0.5)
experiment(
title="FPSearch(10,0.5)",
coeffs=to_r_z_from_wx(wx_phis),
npoints=100,
filename="fp_amp_fpsearch_10_0.5.png",
)
| 2.484375 | 2 |
plugins/site_screenshot.py | dytplay/darkvkbot | 0 | 12788414 | import aiohttp
from plugin_system import Plugin
plugin = Plugin("Скриншот любого сайта",
usage=["скрин [адрес сайта] - сделать скриншот сайта [адрес сайта]"])
# Желательно первой командой указывать основную (она будет в списке команд)
@plugin.on_command('скрин')
async def screen(msg, args):
if not args:
return msg.answer('Вы не указали сайт!')
async with aiohttp.ClientSession() as sess:
async with sess.get("http://mini.s-shot.ru/1024x768/1024/png/?" + args.pop()) as resp:
result = await msg.vk.upload_photo(await resp.read())
return await msg.answer('Держи', attachment=str(result))
| 2.59375 | 3 |
tests/right_tests/applier_tests/test_curry.py | lycantropos/lz | 7 | 12788415 | import pytest
from hypothesis import given
from lz import right
from lz.functional import curry
from tests import strategies
from tests.hints import (FunctionCall,
PartitionedFunctionCall)
@given(strategies.partitioned_transparent_functions_calls)
def test_basic(partitioned_function_call: PartitionedFunctionCall) -> None:
(function,
(first_args_part, second_args_part),
(first_kwargs_part, second_kwargs_part)) = partitioned_function_call
applied = right.applier(function,
*second_args_part,
**first_kwargs_part)
result = curry(applied)
assert (result(*first_args_part, **second_kwargs_part)
== function(*first_args_part, *second_args_part,
**first_kwargs_part, **second_kwargs_part))
@given(strategies.non_variadic_transparent_functions_calls_with_invalid_args)
def test_invalid_args(function_call: FunctionCall) -> None:
function, invalid_args, kwargs = function_call
applied = right.applier(function, *invalid_args, **kwargs)
with pytest.raises(TypeError):
curry(applied)
@given(strategies.non_variadic_transparent_functions_calls_with_invalid_kwargs)
def test_invalid_kwargs(function_call: FunctionCall) -> None:
function, args, invalid_kwargs = function_call
applied = right.applier(function, *args, **invalid_kwargs)
with pytest.raises(TypeError):
curry(applied)
| 2.28125 | 2 |
app/app/tests.py | snmirdamadi/kitchen-recipe | 0 | 12788416 | from django.test import TestCase
from app.calc import add, subtract
class CalcTest(TestCase):
def test_and_numbers(self):
"""Test that numbers are added together"""
self.assertEqual(add(3,8), 11)
def test_subtract_numbers(self):
"""Test That numbers are subtracted"""
self.assertEqual(subtract(5, 11), 6)
| 2.9375 | 3 |
functions.py | karipov/learn-to-decimals | 0 | 12788417 | <filename>functions.py
import random, decimal
import operator
ops = {"+": operator.add, "-": operator.sub, "/": operator.truediv,
"*": operator.mul} # this is so that strings are converted to operators
score_counter = 0 # overall score of the user. Modified by counter(t_or_f)
answers_list = [] # data for sending it to the Results.txt file
# asks the user for his name, later to be added to Results.txt
def username():
name = input("Please enter your name: ")
return name
# generates a random number with with 2 d.p.
def random_number():
return float(random.randint(1, 10000)/10)
# generates a random operation in form of a string
def random_ops():
t = random.randint(1, 4)
if t == 1:
oper = "+"
elif t == 2:
oper = "-"
elif t == 3:
oper = "/"
elif t == 4:
oper = "*"
else:
pass
return oper
# changes the score counter according to the answer. Takes a True or False as an input
def counter(t_or_f):
global score_counter
if t_or_f == True:
score_counter += 1
else:
pass
# # supposed to send the result to the file
# def send_to_file(num, result):
# result_file = open("Results.txt", "w")
# sentence = "{}, {}".format(num, result)
# result_file.write(sentence)
# result_file.close()
# prints a problem that has to be solved
def new_problem():
num1 = random_number() # first random number
num2 = random_number() # second random number
calc = random_ops() # random operation
print(num1, calc, num2, "= ") # sticks everything together to form a random problem
# if a user does not enter anything
try:
answer = float(input("--> ")) # asks the user for an answer
except ValueError:
answer = 0
# round() is to round off the recurring decimals that may come up while dividing
if answer == round(ops[calc](num1, num2), 2): # ops[calc](num1, num2) is how calc, which is a string, is converted into an operation
counter(True)
elif answer != round(ops[calc](num1, num2), 2):
counter(False)
sentence = "{} {} {} = {}, score: {} \n".format(num1, calc, num2, answer, score_counter) # stciks togther the user's answer and score
global answers_list # modifying a list outside the function
answers_list.append(sentence) # appending the sticked sentence to the list which will be later sent to a file
| 4.125 | 4 |
xdp_ddos/xdp_ip_whitelist.py | Oliryc/monobpf | 0 | 12788418 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# xdp_ip_whitelist.py Drop packet coming from ips not in a whitelist
#
# Based on https://github.com/iovisor/bcc/blob/master/examples/networking/xdp/xdp_drop_count.py,
# Copyright (c) 2016 PLUMgrid
# Copyright (c) 2016 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License")
# See http://apache.org/licenses/LICENSE-2.0
from bcc import BPF
import pyroute2
import time
import sys
import socket, struct
# Like unblockedIp = ['10.244.3.24']
unblockedIp = [
"192.168.1.187", # Cyril’s computer
"192.168.1.98", # Lucian’s connector
"192.168.1.1", # Router
"192.168.1.150", # Router
"192.168.1.131", # ??
"192.168.1.68", # ??
]
debug = 1
flags = 0
def usage():
print("Usage: {0} [-S] <ifdev>".format(sys.argv[0]))
print(" -S: use skb mode\n")
print("e.g.: {0} eth0\n".format(sys.argv[0]))
exit(1)
if len(sys.argv) < 2 or len(sys.argv) > 3:
usage()
if len(sys.argv) == 2:
device = sys.argv[1]
if len(sys.argv) == 3:
if "-S" in sys.argv:
# XDP_FLAGS_SKB_MODE
flags |= 2 << 0
if "-S" == sys.argv[1]:
device = sys.argv[2]
else:
device = sys.argv[1]
mode = BPF.XDP
#mode = BPF.SCHED_CLS
if mode == BPF.XDP:
ret = "XDP_DROP"
ctxtype = "xdp_md"
else:
ret = "TC_ACT_SHOT"
ctxtype = "__sk_buff"
# load BPF program
bpf_src = ''
with open("xdp_ip_whitelist.bpf") as bpf_file:
bpf_src = bpf_file.read()
ip4array = map(str,
[socket.htonl(struct.unpack("!L", socket.inet_aton(ip))[0])
for ip in unblockedIp])
bpf_src = bpf_src.replace("__IP4ARRAY__", ", ".join(ip4array))
bpf_src = bpf_src.replace("__IP4ARRAYSIZE__", str(len(ip4array)))
if debug:
print("C code of BPF program:")
print(bpf_src)
b = BPF(text = bpf_src,
cflags=["-w", "-DRETURNCODE=%s" % ret, "-DCTXTYPE=%s" % ctxtype])
fn = b.load_func("xdp_prog1", mode)
if mode == BPF.XDP:
print("XDP Mode")
b.attach_xdp(device, fn, flags)
else:
print("TC Fallback")
ip = pyroute2.IPRoute()
ipdb = pyroute2.IPDB(nl=ip)
idx = ipdb.interfaces[device].index
ip.tc("add", "clsact", idx)
ip.tc("add-filter", "bpf", idx, ":1", fd=fn.fd, name=fn.name,
parent="ffff:fff2", classid=1, direct_action=True)
dropcnt = b.get_table("dropcnt")
prev = [0] * 256
print("Accepting packets only from the following IP addresses {}, hit CTRL+C to stop".format(unblockedIp))
while 1:
try:
time.sleep(1)
except KeyboardInterrupt:
print("Removing filter from device")
break;
if mode == BPF.XDP:
b.remove_xdp(device, flags)
else:
ip.tc("del", "clsact", idx)
ipdb.release()
| 2.21875 | 2 |
sockets/tcp_server.py | zhaoyu69/python3-learning | 1 | 12788419 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# TCP
import socket
# Client
# # create
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#
# # connect
# s.connect(('www.sina.com.cn', 80))
#
# # AF_INET IPV4
# # AF_INET6 IPV6
# # SOCK_STREAM 使用面向流的TCP协议
# # connect 参数是tuple 包含ip和port
#
# # send
# s.send(b'GET / HTTP/1.1\r\nHost: www.sina.com.cn\r\nConnection: close\r\n\r\n')
#
# # receive
# buffer = []
# while True:
# # 每次最多接收1k字节:
# d = s.recv(1024)
# if d:
# buffer.append(d)
# else:
# break
# data = b''.join(buffer)
#
# # close
# s.close()
#
# # handle data to file
# header, html = data.split(b'\r\n\r\n', 1)
# print(header.decode('utf-8'))
# with open('sina.html', 'wb') as f:
# f.write(html)
# Server
# create
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind
s.bind(('127.0.0.1', 9999))
# listen
s.listen(5)
print('Waiting for connection...')
# accept
import threading, time
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('Hello, %s!' % data.decode('utf-8')).encode('utf-8'))
sock.close()
print('Connection from %s:%s closed.' % addr)
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
| 3.40625 | 3 |
test_client.py | jbilskie/BME547_final | 0 | 12788420 | <gh_stars>0
# test_client.py
# Authors: <NAME>
# Last Modified: 4/24/19
from user import User
import numpy as np
import pytest
from image import read_img_as_b64
good_img1 = read_img_as_b64("test_client/test1.jpg")
good_img2 = read_img_as_b64("test_client/test2.png")
good_img3 = read_img_as_b64("test_client/test3.tiff")
bad_img1 = "239fbn3rb0tbh0r2hvr0bh"
bad_img2 = ""
bad_img3 = good_img1 + "ruin"
@pytest.mark.parametrize("file_list, exp_status_code",
[([["1image1", good_img1,
[False, False, True, True, False]],
["1image2", good_img2,
[False, True, True, True, False]],
["1image3", good_img3,
[False, False, False, True, False]]],
[200, 200, 200]),
(['junk'], [400]),
([[1, good_img1,
[False, False, True, True, False]],
["2image2", good_img2,
[False, True, True, True, False]],
["2image3", good_img3,
[False, False, False, True, False]]],
[400, 200, 200]),
([["3image1", bad_img1,
[False, False, True, True, False]],
["3image2", good_img2,
[False, True, True, True, False]],
["3image3", good_img3,
[False, False, False, True, False]]],
[400, 200, 200]),
([["4image1", good_img1,
[False, False, True, True, False]],
["4image2", bad_img2,
[False, True, True, True, False]],
["4image3", good_img3,
[False, False, False, True, False]]],
[200, 400, 200]),
([["5image1", good_img1,
[False, False, True, True, False]],
["5image2", good_img2,
[False, True, True, True, False]],
["5image3", bad_img3,
[False, False, False, True, False]]],
[200, 200, 400]),
([["6image1", good_img1,
[False, 1, True, True, False]],
["6image2", good_img2,
[False, True, True, True, False]],
["6image3", good_img3,
[False, False, False, True, False]]],
[400, 200, 200]),
([["7image1", good_img1,
[False, False, True, True, True, False]],
["7image2", good_img2,
[False, True, True, True, False]],
["7image3", good_img3,
[False, False, False, True, False]]],
[400, 200, 200]),
([["8image1", good_img1],
["8image2", good_img2,
[False, True, True, True, False]],
["8image3", good_img3,
[False, False, False, True, False]]],
[400, 200, 200]),
([["9image1", good_img1,
[False, False, False, False, False]]], [400])])
def test_upload_check_file(file_list, exp_status_code):
"""Tests check_file
Tests whether a filelist contains the correct type and amount
of elements for uploading multiple images.
Args:
file_list (list): UPLOAD list of files where each item in the list
is a list of the file's filename, b64 image, and an array of
what processing steps should be done
Example: file_list = [file1, file2]
file1 = ["image1", b64_image1,
[False, False, True, True, False]]
file2 = ["image1", b64_image1,
[True, True, False, False, True]]
Each processing steps array has a True if that process is
desired and False if not. In this example, image1 desires to
perform contrast stretching and log compression. Likewise,
image2 desires the original and to perform histogram
equalization and reverse video.
exp_status_code (int): status code either 200 or 400
Returns:
none
"""
from client import check_file
status = {}
status['code'] = []
for file in file_list:
i_status = check_file(file, "upload")
status['code'].append(i_status['code'])
assert status['code'] == exp_status_code
@pytest.mark.parametrize("file_list, exp_status_code",
[([["1image1", ".jpg",
[False, False, True, True, False]],
["1image2", ".png",
[False, True, True, True, False]],
["1image3", ".tiff",
[False, False, False, True, False]]],
[200, 200, 200]),
(['junk'], [400]),
([[1, ".jpg",
[False, False, True, True, False]],
["2image2", ".png",
[False, True, True, True, False]],
["2image3", ".tiff",
[False, False, False, True, False]]],
[400, 200, 200]),
([["3image1", 10,
[False, False, True, True, False]],
["3image2", ".png",
[False, True, True, True, False]],
["3image3", ".tiff",
[False, False, False, True, False]]],
[400, 200, 200]),
([["4image1", ".jpg",
[False, False, True, True, False]],
["4image2", "png",
[False, True, True, True, False]],
["4image3", ".tiff",
[False, False, False, True, False]]],
[200, 400, 200]),
([["5image1", ".jpg",
[False, False, True, True, False]],
["5image2", ".png",
[False, True, True, True, False]],
["5image3", ".gif",
[False, False, False, True, False]]],
[200, 200, 400]),
([["6image1", ".jpg",
[False, 1, True, True, False]],
["6image2", ".png",
[False, True, True, True, False]],
["6image3", ".tiff",
[False, False, False, True, False]]],
[400, 200, 200]),
([["7image1", ".jpg",
[False, False, True, True, True, False]],
["7image2", ".png",
[False, True, True, True, False]],
["7image3", ".tiff",
[False, False, False, True, False]]],
[400, 200, 200]),
([["8image1", ".jpg"],
["8image2", ".png",
[False, True, True, True, False]],
["8image3", ".tiff",
[False, False, False, True, False]]],
[400, 200, 200]),
([["9image1", ".jpg",
[False, False, False, False, False]]], [400])])
def test_download_check_file(file_list, exp_status_code):
"""Tests check_file
Tests whether a filelist contains the correct type and amount
of elements for downloading multiple images.
Args:
file_list (list): DOWNLOAD list of files where each item in the
list is a list of the file's filename, image download type, and
an array of what processing steps should be downloaded
Example: file_list = [file1, file2]
file1 = ["image1", ".jpg",
[False, False, True, True, False]]
file2 = ["image1", ".tiff",
[True, True, False, False, True]]
Each processing steps array has a 1 if that process is
desired and 0 if not. In this example, image1 desires to
perform contrast stretching and log compression. Likewise,
image2 desires the original and to perform histogram
equalization and reverse video.
exp_status_code (int): status code either 200 or 400
Returns:
none
"""
from client import check_file
status = {}
status['code'] = []
for file in file_list:
i_status = check_file(file, "download")
status['code'].append(i_status['code'])
assert status['code'] == exp_status_code
# @pytest.mark.parametrize("zip_file, zip_path, exp_success",
# # Example of Folder with Pictures
# [('test_client_cp/Example1.zip',
# 'test_client_cp/Example1/', True),
# # Example of Empty Folder
# ('test_client_cp/Example2.zip',
# 'test_client_cp/Example2/', True),
# # Example of Pictures in Folder in Provided Folder
# ('test_client_cp/Example3.zip',
# 'test_client_cp/', False),
# # Example of Existing Zip Folder
# ('test_client_cp/Example4.zip',
# 'test_client_cp/Example4/', True)])
# def test_zipdir(zip_file, zip_path, exp_success):
# """Tests zip_dir
#
# Tests whether this function puts all the images in an existing folder
# into a zipped folder.
#
# Code outside of the function already makes sure the path is valid so
# that isn't tested here.
#
# Test includes the code that creates the zip folder.
#
# Args:
# zip_file (str): name of zip folder to be created
# zip_path (str): location of where the pictures to be zipped are
# located
# exp_success (bool): expected success
#
# Returns:
# none
# """
# import zipfile
# import os
# from client import zipdir
# import shutil
#
# fail = False
# cwd = os.getcwd()
# try:
# shutil.rmtree('test_client_cp/')
# except:
# pass
# shutil.copytree('test_client/', 'test_client_cp/')
# zipf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)
# success = zipdir(zip_path, zipf)
# zipf.close()
# os.chdir(cwd)
# shutil.rmtree('test_client_cp/')
#
# assert success == exp_success
@pytest.mark.parametrize("proc_steps, exp_str",
[([True, False, True, True, False], '10110'),
([True, True, True, True, True], '11111'),
([False, False, False, True, False], '00010'),
([True, False, True, False, False], '10100'),
([False, False, False, False, False], '00000')])
def test_proc_string(proc_steps, exp_str):
"""Tests proc_string
Tests whether this function creates the correct string from an array of
five Boolean terms.
Code outside of the function already makes sure array is formatted
correctly so that isn't tested here.
Args:
proc_steps (list): list of 5 Booleans
Returns:
proc_ext (str): string of 1's and 0's
"""
from client import proc_string
proc_ext = proc_string(proc_steps)
assert proc_ext == exp_str
| 2.15625 | 2 |
app/plugin.dbmc/resources/lib/accountsettings.py | TidalPaladin/Superliminal-resin | 0 | 12788421 | <filename>app/plugin.dbmc/resources/lib/accountsettings.py
#/*
# * Copyright (C) 2013 <NAME>
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import xbmcvfs
import shutil
import os
import pickle
from resources.lib.utils import *
class AccountSettings(object):
'''
Class which loads and saves all the account settings,
for easy access to the account settings
'''
def __init__(self, account_name):
if isinstance (account_name,str):
self.account_name = account_name.decode("utf-8")
else:
self.account_name = account_name
self.access_token = u''
self.passcode = ''
self.passcodetimeout = 30
self.session_id = ''
self.synchronisation = False
self.syncfreq = 5
self.syncpath = u''
self.remotepath = u''
dataPath = xbmc.translatePath( ADDON.getAddonInfo('profile') ).decode("utf-8")
self.account_dir = os.path.normpath(dataPath + '/accounts/' + self.account_name) + os.sep #add os seperator because it is a dir
#read from location if present
if xbmcvfs.exists( self.account_dir.encode("utf-8") ):
self.load()
#Don't use the stored account_dir
self.account_dir = os.path.normpath(dataPath + '/accounts/' + self.account_name) + os.sep #add os seperator because it is a dir
else:
log_debug('Account (%s) doesn\'t exist yet' % (self.account_name) )
def load(self):
log_debug('Loading account settings: %s' % (self.account_name) )
settings_file = os.path.normpath(self.account_dir + 'settings')
try:
with open(settings_file, 'rb') as file_obj:
tmp_dict = pickle.load(file_obj)
except Exception as exc:
log_error('Failed to load the settings: %s' % (str(exc)) )
else:
self.__dict__.update(tmp_dict)
#correct the items; make sure that they are unicode...)
if isinstance (self.account_name,str):
self.account_name = self.account_name.decode("utf-8")
if isinstance (self.syncpath,str):
self.syncpath = self.syncpath.decode("utf-8")
if isinstance (self.remotepath,str):
self.remotepath = self.remotepath.decode("utf-8")
def save(self):
log_debug('Save account settings: %s' % (self.account_name) )
#check if the account directory is present, create otherwise
if not xbmcvfs.exists( self.account_dir.encode("utf-8") ):
xbmcvfs.mkdirs( self.account_dir.encode("utf-8") )
#Save...
settings_file = os.path.normpath(self.account_dir + u'settings')
try:
with open(settings_file, 'wb') as file_obj:
pickle.dump(self.__dict__, file_obj)
except Exception as exc:
log_error('Failed saving the settings: %s' % (str(exc)) )
def remove(self):
log_debug('Remove account folder: %s' % (self.account_dir) )
shutil.rmtree( self.account_dir )
#remove cache folder
shutil.rmtree( get_cache_path(self.account_name) )
#remove synced data is done in the DropboxSynchronizer!
| 1.789063 | 2 |
InjectionLog/migrations/0001_initial.py | JKesslerPhD/FIPInjectionLogger | 1 | 12788422 | # Generated by Django 3.0.6 on 2020-05-25 00:02
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('birthday', models.DateField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GSBrand',
fields=[
('brand', models.CharField(max_length=100, primary_key=True, serialize=False)),
('concentration', models.DecimalField(decimal_places=2, max_digits=2)),
],
),
migrations.CreateModel(
name='WarriorAdmin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('warrior_admin', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='UserExtension',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('warrior_admin', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.WarriorAdmin')),
],
),
migrations.CreateModel(
name='InjectionLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_added', models.DateField(default=datetime.date.today, editable=False)),
('cat_weight', models.DecimalField(decimal_places=2, max_digits=2)),
('injection_time', models.TimeField()),
('injection_amount', models.DecimalField(decimal_places=1, max_digits=2)),
('cat_behavior_today', models.IntegerField(default=3)),
('injection_notes', models.TextField(null=True)),
('gaba_dose', models.IntegerField(null=True)),
('other_notes', models.TextField(null=True)),
('cat_name', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.Cats')),
('gs_brand', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.GSBrand')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.765625 | 2 |
class14.py | itsforbasu/Python_class_files | 1 | 12788423 | def display(*args):
for x in args:
print(x)
display("Sam")
display((2,4,5,6))
display(23)
def sum( *args):
total=0
print(len(args))
for x in args:
total += x
return total
print(sum(23))
print(sum(23,34,12))
print(sum(23,34,12,45,65,89))
def average( *args):
total= count=0
print(len(args))
for x in args:
total += x
count += 1
return total/count
print(average(23))
print(average(23,34,12))
print(average(23,34,12,45,65,89))
def displayX( **kwargs):
for k, v in kwargs.items():
print(k)
print(v)
# displayX(23)
# displayX({"Name" = 'Sam', 'Age'=13})
displayX(Name='Sam')
def order(x,y=0,*args, **kwargs):
print("I am require:")
print(x)
print("I am default:")
print(y)
print("I am *args:")
print(args)
print("I am **kwargs:")
print(kwargs)
order(34,12,23, Name='sam', Age=45)
name = 'Sam'
def disAge():
age = 14
print(name, age)
def disAddress():
address = 'Amarsingh'
print(name, address)
# print(name, age)
disAge()
disAddress()
# print(age)
print(name)
def disName():
# global name
name = 'Sun'
print(name)
disName()
print(name)
# Single line statements list comprehensions and function
std = 13
kim = "Sandip" if std>13 else 45
kim
std = {'Name':'Sam', 'Age':13}
kim = [k for k, v in std.items()]
kim
data = list(range(1,21))
data2 = [x**2 for x in data if x %2 == 0]
data2
rim = [k for k, v in std.items()] if len(std) >1 else [2,3,4]
rim
# lambda function
fun1 = lambda x: x+1
print(fun1(3))
#Generator and Iterator
def gen(n=1):
"Generates number from 1 to given number"
assert (type(n) == int and n >=1 ), "The input should be int and greater than 1"
x = 1
while x <= n:
yield x
x+=1
# for i in gen(2):
# print(i)
num = [i for i in gen(2)]
print(num) | 3.828125 | 4 |
morm/exceptions.py | neurobin/python-morm | 4 | 12788424 | <filename>morm/exceptions.py
"""Exceptions.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>'
__license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)'
__version__ = '0.0.1'
class ItemDoesNotExistError(Exception): pass
class TransactionError(Exception): pass
class MigrationError(Exception): pass
class MigrationModelNotAllowedError(Exception): pass
class UnsupportedError(Exception): pass
| 1.96875 | 2 |
gettingdata/PrintRegexLinesOfFile.py | arunma/Python_DataScience | 1 | 12788425 | <gh_stars>1-10
import sys,re
file_name=sys.argv[1]
with open (file_name, 'r') as f:
for line in f:
if re.match("^#", line):
print(line)
#python PrintRegexLinesOfFile.py RegexMatch.py | 2.9375 | 3 |
python/Array/152_maximum_product_subarray.py | Jan-zou/LeetCode | 0 | 12788426 | <filename>python/Array/152_maximum_product_subarray.py<gh_stars>0
# !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Find the contiguous subarray within an array (containing at least one number) which has the largest product.
For example, given the array [2,3,-2,4],
the contiguous subarray [2,3] has the largest product = 6.
Tags: Array, Dynamic Programming
'''
class Solution(object):
# O(n) runtime; O(1) space
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
local_min, local_max, global_max = 1, 1, float("-inf")
for i in nums:
local_max, local_min = max(i, i*local_max, i*local_min), min(i, i*local_max, i*local_min)
global_max = max(local_max, global_max)
return global_max
if __name__ == '__main__':
print Solution().maxProduct([2,3,-2,4])
| 4.125 | 4 |
benchmarks.py | xnupanic/scikit-perform | 0 | 12788427 | <reponame>xnupanic/scikit-perform
"""
Copyright 2021 <NAME>
This Source Code Form is subject to the terms of the BSD-2-Clause license.
If a copy of the BSD-2-Clause license was not distributed with this
file, You can obtain one at https://opensource.org/licenses/BSD-2-Clause.
"""
import lzma
import math
import hashlib
import functools
from collections import ChainMap
from functools import partial
import xml.etree.ElementTree as ET
from io import StringIO
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn import ensemble
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.model_selection import GridSearchCV
NASA_DATA = 'https://aiweb.cs.washington.edu/research/projects/xmltk/xmldata/data/nasa/nasa.xml.gz'
HAMLET = 'https://gist.githubusercontent.com/provpup/2fc41686eab7400b796b/raw/b575bd01a58494dfddc1d6429ef0167e709abf9b/hamlet.txt'
ENGLISH_WORDS = 'https://raw.githubusercontent.com/dwyl/english-words/master/words_alpha.txt'
SHUTTLE_DATA = 'https://archive.ics.uci.edu/ml/machine-learning-databases/statlog/shuttle/shuttle.tst'
EL_NINO = 'https://archive.ics.uci.edu/ml/machine-learning-databases/el_nino-mld/tao-all2.dat.gz'
PROTEIN_SEQUENCE = 'https://aiweb.cs.washington.edu/research/projects/xmltk/xmldata/data/SwissProt/SwissProt.xml.gz'
def with_data(**kwargs):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
wrapper._data_urls = kwargs
return wrapper
return decorator
@with_data(hamlet=HAMLET, words=ENGLISH_WORDS)
def hamlet_word_count(ncores, map, hamlet, words):
words = words.split()
total_words = len(words)
job_words = math.floor(total_words / ncores)
slices = []
for core in range(ncores):
start = core * job_words
if core + 1 == ncores:
end = total_words - 1
else:
end = (core + 1) * job_words
slices.append(words[start:end])
count_partial = partial(count_occurences, hamlet)
disjoint_word_count = map(count_partial, slices)
word_count = dict(ChainMap(*disjoint_word_count))
return word_count
def count_occurences(text, words):
return {word: text.count(word) for word in words}
@with_data(test_data=NASA_DATA)
def xml_parsing(_1, _2, test_data):
ET.fromstring(test_data)
@with_data(test_data=NASA_DATA)
def lzma_compression(_1, _2, test_data):
lzma.compress(test_data)
@with_data(test_data=PROTEIN_SEQUENCE)
def sha512(_1, _2, test_data):
m = hashlib.sha3_512()
m.update(test_data)
m.digest()
@with_data(shuttle_data=SHUTTLE_DATA)
def support_vector_machine(ncores, _, shuttle_data):
param_grid = [
{'model__C': [1, 10, 100, 1000], 'model__kernel': ['linear']},
{'model__C': [1, 10, 100, 1000], 'model__gamma': [0.001, 0.0001], 'model__kernel': ['rbf']},
]
inputs, targets = parse_shuttle_data(shuttle_data)
pipeline = Pipeline(steps=[('scale', StandardScaler()), ('model', svm.SVC())])
estimator = GridSearchCV(pipeline, param_grid, cv=2, n_jobs=ncores)
estimator.fit(inputs, targets)
@with_data(shuttle_data=SHUTTLE_DATA)
def random_forest(ncores, _, shuttle_data):
param_grid = [
{
'criterion': ['gini', 'entropy'],
'min_samples_split': list(range(2, 11))
}
]
inputs, targets = parse_shuttle_data(shuttle_data)
estimator = GridSearchCV(ensemble.RandomForestClassifier(n_estimators=500),
param_grid,
cv=2,
n_jobs=ncores)
estimator.fit(inputs, targets)
@with_data(shuttle_data=SHUTTLE_DATA)
def locally_linear_embedding(ncores, _, shuttle_data):
inputs, _ = parse_shuttle_data(shuttle_data)
embedding = LocallyLinearEmbedding(eigen_solver='dense', n_jobs=ncores)
embedding.fit_transform(inputs[:4000])
def parse_shuttle_data(shuttle_data):
shuttle_data = StringIO(shuttle_data)
shuttle_df = pd.read_csv(shuttle_data, sep=' ')
shuttle_array = shuttle_df.to_numpy()
inputs = shuttle_array[:, [0, -2]]
targets = shuttle_array[:, -1]
return inputs, targets
| 1.898438 | 2 |
PowerPoint/Slide.py | Cow-Fu/PowerPointCat | 0 | 12788428 |
class Slide:
def getTitle(self):
return self.titles
def setTitle(self, titles):
self.self.titles = titles
def getContent(self):
return contentHolders
def setContent(self, content):
self.content = content
def getMarkup(self):
return self.markup
def setMarkup(self, markup):
self.self.markup = markup
def __init__(self, title=None, content=None, markup=None):
self.title = title
self.content = content
self.markup = markup
| 2.875 | 3 |
desafio15.py | lucasmc64/Curso_de_Python_CeV | 1 | 12788429 | <reponame>lucasmc64/Curso_de_Python_CeV
print('{} DESAFIO 15 {}'.format('='*10, '='*10))
dias = int(input('Por quantos dias o carro foi alugado? '))
km = float(input('Quantos kilômetros foram rodados? '))
pdias = dias * 60
pkm = km * 0.15
print('O total a pagar é de R${:.2f}!'.format(pdias + pkm))
| 3.734375 | 4 |
src/apply_fixes/main.py | martis42/depend_on_what_you_use | 8 | 12788430 | import json
import subprocess
import sys
from argparse import ArgumentParser
from os import environ
from pathlib import Path
from typing import Any, List
# Bazel sets this environment for 'bazel run' to document the workspace root
WORKSPACE_ENV_VAR = "BUILD_WORKSPACE_DIRECTORY"
def cli():
parser = ArgumentParser()
parser.add_argument(
"--workspace",
metavar="PATH",
help="""
Workspace for which DWYU reports are gathered and fixes are applied to the source code. If no dedicated
workspace is provided, we assume we are running from within the workspace for which the DWYU reports have been
generated and determine the workspace root automatically.
By default the Bazel output directory containing the DWYU report files is deduced by following the 'bazel-bin'
convenience symlink.""",
)
parser.add_argument(
"--use-bazel-info",
const="fastbuild",
choices=["dbg", "fastbuild", "opt"],
nargs="?",
help="""
Don't follow the convenience symlinks to reach the Bazel output directory containing the DWYU reports. Instead,
use 'bazel info' to deduce the output directory.
This option accepts an optional argument specifying the compilation mode which was used to generate the DWYU
report files.
Using this option is recommended if the convenience symlinks do not exist, don't follow the default
naming scheme or do not point to the Bazel output directory containing the DWYU reports.""",
)
parser.add_argument(
"--bazel-bin",
metavar="PATH",
help="""
Path to the bazel-bin directory inside which the DWYU reports are located.
Using this option is recommended if neither the convenience symlinks nor the 'bazel info' command are suited to
deduce the Bazel output directory containing the DWYU report files.""",
)
parser.add_argument(
"--buildozer",
metavar="PATH",
help="""
buildozer binary which shall be used by this script. If none is provided, it is expected to find buildozer on
PATH.""",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Don't apply fixes. Report the buildozer commands and print the adapted BUILD files to stdout.",
)
parser.add_argument("--verbose", action="store_true", help="Announce intermediate steps.")
return parser.parse_args()
def get_workspace(main_args: Any) -> Path:
if main_args.workspace:
return Path(main_args.workspace)
workspace_root = environ.get(WORKSPACE_ENV_VAR)
if not workspace_root:
print(
"ERROR:"
f" No workspace was explicitly provided and environment variable '{WORKSPACE_ENV_VAR}' is not available."
)
return Path(workspace_root)
def get_bazel_bin_dir(main_args: Any, workspace_root: Path) -> Path:
if main_args.bazel_bin:
return Path(main_args.bazel_bin)
if main_args.use_bazel_info:
process = subprocess.run(
["bazel", "info", f"--compilation_mode={main_args.use_bazel_info}", "bazel-bin"],
cwd=workspace_root,
check=True,
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return Path(process.stdout.strip())
bazel_bin_link = workspace_root / "bazel-bin"
if not bazel_bin_link.is_symlink():
print(f"ERROR: convenience symlink '{bazel_bin_link}' does not exist or is not a symlink.")
sys.exit(1)
return bazel_bin_link.resolve()
def gather_reports(bazel_bin: Path) -> List[Path]:
return list(bazel_bin.glob("**/*_dwyu_report.json"))
def make_base_cmd(buildozer: str, dry: bool) -> List[str]:
cmd = [buildozer]
if dry:
cmd.append("-stdout")
return cmd
def perform_fixes(workspace: Path, report: Path, buildozer: str, dry: bool = False, verbose=False):
with open(report, encoding="utf-8") as report_in:
content = json.load(report_in)
target = content["analyzed_target"]
unused_deps = content["unused_dependencies"]
base_cmd = make_base_cmd(buildozer=buildozer, dry=dry)
if unused_deps:
deps_str = " ".join(unused_deps)
cmd = base_cmd + [f"remove deps {deps_str}", target]
if dry or verbose:
print(f"Buildozer command: {cmd}")
subprocess.run(cmd, cwd=workspace, check=True)
def main(args: Any) -> int:
"""
This script expects that the user has invoked DWYU in the given workspace and by doing so generated DYWU report
files in the output path.
The script expects "bazel" to be available on PATH.
"""
buildozer = args.buildozer if args.buildozer else "buildozer"
workspace = get_workspace(args)
if args.verbose:
print(f"Workspace: '{workspace}'")
bin_dir = get_bazel_bin_dir(main_args=args, workspace_root=workspace)
if args.verbose:
print(f"Bazel-bin directory: '{bin_dir}'")
reports = gather_reports(bin_dir)
if not reports:
print("ERROR: Did not find any DWYU report files.")
print("Did you forget to run DWYU beforehand?")
print(
"By default this tool looks for DWYU report files in the output directory for a 'fastbuild' DWYU execution."
" If you want to use another output directory, have a look at the apply_fixes CLI options via '--help'."
)
return 1
for report in reports:
if args.verbose:
print(f"Report File '{report}'")
perform_fixes(workspace=workspace, report=report, buildozer=buildozer, dry=args.dry_run, verbose=args.verbose)
return 0
if __name__ == "__main__":
cli_args = cli()
sys.exit(main(cli_args))
| 2.671875 | 3 |
day18/python/jamhocken/solution.py | jamhocken/aoc-2020 | 16 | 12788431 | import math
def process_input(file_contents):
lines_stripped = [line.strip() for line in file_contents]
lines_stripped = [line.replace(" ","") for line in lines_stripped]
return lines_stripped
def find_opening_par(input_string):
position = input_string.rfind(")")
counter = 1
while counter > 0:
position -= 1
if input_string[position] == "(":
counter -= 1
elif input_string[position] == ")":
counter += 1
return position
def do_math(input_string):
j = len(input_string) - 1
if input_string[j] == ")":
opening_par = find_opening_par(input_string)
if input_string[opening_par-1] == "*":
result = do_math(input_string[opening_par+1:j])*do_math(input_string[:opening_par-1])
elif input_string[opening_par-1] == "+":
result = do_math(input_string[opening_par+1:j])+do_math(input_string[:opening_par-1])
else:
return do_math(input_string[opening_par+1:j])
else:
number = int(input_string[j])
if j>0:
if input_string[j-1] == "*":
result = number*do_math(input_string[:j-1])
elif input_string[j-1] == "+":
result = number+do_math(input_string[:j-1])
else:
result = number
return result
def do_math2(input_string):
last_par = input_string.rfind(")")
while last_par != -1:
opening_par = find_opening_par(input_string)
input_string = input_string.replace(input_string[opening_par:last_par+1], str(do_math2(input_string[opening_par+1:last_par])))
last_par = input_string.rfind(")")
fragments = input_string.split("*")
results = [sum([int(number) for number in fragment.split("+")]) for fragment in fragments]
return math.prod(results)
def main():
with open("input.txt",'r') as code_file:
all_code_file = code_file.readlines()
operations_list = process_input(all_code_file)
math_results = [do_math(operation) for operation in operations_list]
print("The sum of the answers to all problems in the initial homework is", sum(math_results))
math_results = [do_math2(operation) for operation in operations_list]
print("The sum of the answers to all problems in the advanced math homework is", sum(math_results))
main()
| 3.59375 | 4 |
campaign/migrations/0011_merge_0010_alter_donor_id_0010_donor_display_name.py | Aleccc/gtcrew | 0 | 12788432 | <reponame>Aleccc/gtcrew
# Generated by Django 3.2.5 on 2021-07-24 16:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('campaign', '0010_alter_donor_id'),
('campaign', '0010_donor_display_name'),
]
operations = [
]
| 1.257813 | 1 |
findMissingNum.py | ezquire/python-challenges | 0 | 12788433 | def findMissing(arr, start, end):
ans = ""
if start > end:
return "None"
if start > arr[-1] or end < arr[0]:
ans += str(start) + " - " + str(end)
return ans
else:
i = 0
if start < arr[0]: # capture any range in the beginning
ans += str(start) + " - " + str(arr[0] - 1)
start = arr[0]
i += 1
while arr[i] <= start: # move i to one past the start
i += 1
while i < len(arr) and arr[i] <= end:
if abs(arr[i] - start) == 2:
if len(ans) > 0:
ans += ", "
ans += str(start + 1)
elif abs(arr[i] - start) > 2:
if len(ans) > 0:
ans += ", "
ans += str(start + 1) + " - " + str(arr[i] - 1)
start = arr[i]
i += 1
if end > start: # capture any range at the end
if len(ans) > 0:
ans += ", "
ans += str(start + 1) + " - " + str(end)
return ans
# s e
arr = [1, 3, 5, 7, 8, 9, 13]
# i
print(findMissing(arr, 20, 14)) # start > end Output: None
print(findMissing(arr, 14, 20)) # start and end out of bounds at the end Output: 14 - 20
print(findMissing(arr, -20, 0)) # start and end out of bounds at the beginning Output: -20 - 0
print(findMissing(arr, -4, 5)) # start out of bounds, end in bounds Output: -4 - 0, 2, 4
print(findMissing(arr, 5, 20)) # start in bounds, end out of bounds Output: 6, 10 - 12, 14 - 20
print(findMissing(arr, 5, 12)) # start and end in bounds Output: 6, 10 - 12
print(findMissing(arr, -5, 20)) # start and end out of bounds Output: -5 - 0, 2, 4, 6, 10 - 12, 14 - 20
| 3.640625 | 4 |
pyknp_eventgraph/base_phrase.py | ku-nlp/pyknp-eventgraph | 7 | 12788434 | import collections
from typing import TYPE_CHECKING, List, NoReturn, Optional, Tuple, Union
from pyknp import Morpheme, Tag
from pyknp_eventgraph.builder import Builder
from pyknp_eventgraph.component import Component
from pyknp_eventgraph.helper import PAS_ORDER, convert_katakana_to_hiragana, get_parallel_tags
from pyknp_eventgraph.relation import filter_relations
if TYPE_CHECKING:
from pyknp_eventgraph.argument import Argument
from pyknp_eventgraph.event import Event
from pyknp_eventgraph.predicate import Predicate
class BasePhrase(Component):
"""A wrapper of :class:`pyknp.knp.tag.Tag`, which allow exophora to be a base phrase.
BasePhrase is a bidirectional linked list; each of base phrases has its parent and children.
Attributes:
event (Event): An event that has this base phrase.
tag (Tag, optional): A tag.
ssid (int): A serial sentence ID.
bid (int): A serial bunsetsu ID.
tid (int): A serial tag ID.
is_child (bool): If true, this base phrase is a child of a head base phrase.
exophora (str): An exophora.
omitted_case (str): A omitted case.
parent (BasePhrase, optional): A parent base phrase.
children (List[BasePhrase]): A list of child base phrases.
"""
def __init__(
self,
event: "Event",
tag: Optional[Tag],
ssid: int,
bid: int,
tid: int,
is_child: bool = False,
exophora: str = "",
omitted_case: str = "",
):
self.event = event
self.tag: Optional[Tag] = tag
self.ssid = ssid
self.bid = bid
self.tid = tid
self.is_child = is_child
self.exophora = exophora
self.omitted_case = omitted_case
self.parent: Optional["BasePhrase"] = None
self.children: List["BasePhrase"] = []
self._surf = None
def __hash__(self):
return hash(self.key)
def __eq__(self, other: "BasePhrase"):
assert isinstance(other, BasePhrase)
return self.key == other.key
def __lt__(self, other: "BasePhrase"):
assert isinstance(other, BasePhrase)
return self.key < other.key
@property
def morphemes(self) -> List[Union[str, Morpheme]]:
mrphs = []
if self.omitted_case:
if self.exophora:
mrphs.append(self.exophora)
else:
exists_content_word = False
for mrph in self.tag.mrph_list():
is_content_word = mrph.hinsi not in {"助詞", "特殊", "判定詞"}
if not is_content_word and exists_content_word:
break
exists_content_word = exists_content_word or is_content_word
mrphs.append(mrph)
mrphs.append(self.omitted_case)
else:
mrphs.extend(list(self.tag.mrph_list()))
return mrphs
@property
def surf(self) -> str:
"""A surface string."""
if self._surf is None:
morphemes = self.morphemes
if self.omitted_case:
bases, case = morphemes[:-1], morphemes[-1]
base = "".join(base if isinstance(base, str) else base.midasi for base in bases)
case = convert_katakana_to_hiragana(case)
self._surf = f"[{base}{case}]"
else:
self._surf = "".join(mrph.midasi for mrph in morphemes)
return self._surf
@property
def key(self) -> Tuple[int, int, int, int]:
"""A key used for sorting."""
return PAS_ORDER.get(self.omitted_case, 99), self.ssid, self.bid, self.tid
@property
def is_event_head(self) -> bool:
"""True if this base phrase is the head of an event."""
return bool(self.tag and any("節-主辞" in tag.features for tag in [self.tag] + get_parallel_tags(self.tag)))
@property
def is_event_end(self) -> bool:
"""True if this base phrase is the end of an event."""
return bool(self.tag and any("節-区切" in tag.features for tag in [self.tag] + get_parallel_tags(self.tag)))
@property
def adnominal_events(self) -> List["Event"]:
"""A list of events modifying this predicate (adnominal)."""
if self.omitted_case:
return []
else:
return [r.modifier for r in filter_relations(self.event.incoming_relations, ["連体修飾"], [self.tid])]
@property
def sentential_complement_events(self) -> List["Event"]:
"""A list of events modifying this predicate (sentential complement)."""
if self.omitted_case:
return []
else:
return [r.modifier for r in filter_relations(self.event.incoming_relations, ["補文"], [self.tid])]
@property
def root(self) -> "BasePhrase":
"""Return the root of this base phrase."""
root_bp = self
while root_bp.parent:
root_bp = root_bp.parent
return root_bp
def to_list(self) -> List["BasePhrase"]:
"""Expand to a list."""
return sorted(self.root.modifiers(include_self=True))
def modifiees(self, include_self: bool = False) -> List["BasePhrase"]:
"""Return a list of base phrases modified by this base phrase.
Args:
include_self: If true, include this base phrase to the return.
"""
modifiee_bps = [self] if include_self else []
def add_modifiee(bp: BasePhrase):
if bp.parent:
modifiee_bps.append(bp.parent)
add_modifiee(bp.parent)
add_modifiee(self)
return modifiee_bps
def modifiers(self, include_self: bool = False) -> List["BasePhrase"]:
"""Return a list of base phrases modifying this base phrase.
Args:
include_self: If true, include this base phrase to the return.
"""
modifier_bps = [self] if include_self else []
def add_modifier(bp: BasePhrase):
for child_bp in bp.children:
modifier_bps.append(child_bp)
add_modifier(child_bp)
add_modifier(self)
return sorted(modifier_bps)
def to_dict(self) -> dict:
"""Convert this object into a dictionary."""
return dict(ssid=self.ssid, bid=self.bid, tid=self.tid, surf=self.surf)
def to_string(self) -> str:
"""Convert this object into a string."""
return f"<BasePhrase, ssid: {self.ssid}, bid: {self.bid}, tid: {self.tid}, surf: {self.surf}>"
class BasePhraseBuilder(Builder):
@classmethod
def build(cls, event: "Event"):
# Greedily dispatch base phrases to arguments.
argument_head_bps: List[BasePhrase] = []
for args in event.pas.arguments.values():
for arg in args:
head = cls._dispatch_head_base_phrase_to_argument(arg)
argument_head_bps.append(head)
if head.parent:
argument_head_bps.append(head.parent)
# Resolve duplication.
cls._resolve_duplication(argument_head_bps)
# Dispatch base phrases to a predicate.
cls._dispatch_head_base_phrase_to_predicate(event.pas.predicate, sentinels=argument_head_bps)
@classmethod
def _dispatch_head_base_phrase_to_argument(cls, argument: "Argument") -> BasePhrase:
event = argument.pas.event
ssid = argument.pas.ssid - argument.arg.sdist
tid = argument.arg.tid
bid = Builder.stid_bid_map.get((ssid, tid), -1)
tag = Builder.stid_tag_map.get((ssid, tid), None)
if argument.arg.flag == "E": # exophora
head_bp = BasePhrase(event, None, ssid, bid, tid, exophora=argument.arg.midasi, omitted_case=argument.case)
elif argument.arg.flag == "O": # zero anaphora
head_bp = BasePhrase(event, tag, ssid, bid, tid, omitted_case=argument.case)
else:
head_bp = BasePhrase(event, tag, ssid, bid, tid)
cls._add_children(head_bp, ssid)
cls._add_compound_phrase_component(head_bp, ssid)
argument.head_base_phrase = head_bp
return head_bp
@classmethod
def _dispatch_head_base_phrase_to_predicate(cls, predicate: "Predicate", sentinels: List[BasePhrase]) -> BasePhrase:
event = predicate.pas.event
ssid = predicate.pas.event.ssid
tid = predicate.head.tag_id
bid = Builder.stid_bid_map.get((ssid, tid), -1)
tag = Builder.stid_tag_map.get((ssid, tid), None)
head_bp = BasePhrase(event, tag, ssid, bid, tid)
cls._add_children(head_bp, ssid, sentinels=sentinels)
if predicate.pas.event.head != predicate.pas.event.end:
next_tid = predicate.pas.event.end.tag_id
next_bid = Builder.stid_bid_map.get((ssid, next_tid), -1)
head_parent_bp = BasePhrase(event, predicate.pas.event.end, ssid, next_bid, next_tid)
cls._add_children(head_parent_bp, ssid, sentinels=sentinels + [head_bp])
cls._add_compound_phrase_component(head_parent_bp, ssid)
head_bp.parent = head_parent_bp
head_parent_bp.children.append(head_bp)
predicate.head_base_phrase = head_bp
return head_bp
@classmethod
def _add_compound_phrase_component(cls, bp: BasePhrase, ssid: int) -> NoReturn:
next_tag = Builder.stid_tag_map.get((ssid, bp.tag.tag_id + 1), None)
if next_tag and "複合辞" in next_tag.features and "補文ト" not in next_tag.features:
next_tid = bp.tag.tag_id + 1
next_bid = Builder.stid_bid_map.get((ssid, next_tid), -1)
parent_bp = BasePhrase(bp.event, next_tag, ssid, next_bid, next_tid)
cls._add_children(parent_bp, ssid, sentinels=[bp])
cls._add_compound_phrase_component(parent_bp, ssid)
bp.parent = parent_bp
parent_bp.children.append(bp)
@classmethod
def _add_children(cls, parent_bp: BasePhrase, ssid: int, sentinels: List[BasePhrase] = None) -> NoReturn:
sentinel_tags = {sentinel.tag for sentinel in sentinels} if sentinels else {}
for child_tag in parent_bp.tag.children: # type: Tag
if child_tag in sentinel_tags or "節-主辞" in child_tag.features or "節-区切" in child_tag.features:
continue
tid = child_tag.tag_id
bid = Builder.stid_bid_map.get((ssid, tid), -1)
child_bp = BasePhrase(parent_bp.event, child_tag, ssid, bid, tid, is_child=True)
cls._add_children(child_bp, ssid, sentinels)
child_bp.parent = parent_bp
parent_bp.children.append(child_bp)
@classmethod
def _resolve_duplication(cls, head_bps: List[BasePhrase]) -> NoReturn:
keys = {head_bp.key[1:] for head_bp in head_bps} # head_bp.key[0] is the case id.
def resolver(children: List[BasePhrase]) -> NoReturn:
for i in reversed(range(len(children))):
child_bp = children[i]
if child_bp.omitted_case:
continue
if child_bp.key[1:] in keys:
children.pop(i)
else:
resolver(child_bp.children)
for head in head_bps:
resolver(head.children)
| 2.25 | 2 |
ape_fantom/ecosystem.py | ApeWorX/ape-fantom | 0 | 12788435 | from ape.api.config import PluginConfig
from ape.api.networks import LOCAL_NETWORK_NAME
from ape_ethereum.ecosystem import Ethereum, NetworkConfig
NETWORKS = {
# chain_id, network_id
"opera": (250, 250),
"testnet": (4002, 4002),
}
class FantomConfig(PluginConfig):
opera: NetworkConfig = NetworkConfig(required_confirmations=1, block_time=1) # type: ignore
testnet: NetworkConfig = NetworkConfig(required_confirmations=1, block_time=1) # type: ignore
local: NetworkConfig = NetworkConfig(default_provider="test") # type: ignore
default_network: str = LOCAL_NETWORK_NAME
class Fantom(Ethereum):
@property
def config(self) -> FantomConfig: # type: ignore
return self.config_manager.get_config("fantom") # type: ignore
| 2.234375 | 2 |
setup.py | arthexis/sigils | 1 | 12788436 | from setuptools import setup
from os import path
base_dir = path.abspath(path.dirname(__file__))
with open(path.join(base_dir, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='sigils',
version='0.0.5',
description='Extract, resolve and replace [SIGILS] embedded in text.',
long_description=long_description,
long_description_content_type='text/x-rst',
url='http://github.com/arthexis/sigils',
download_url='https://github.com/arthexis/sigils/archive/v0.0.5.tar.gz',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
keywords=["UTILS", "SIGIL", "STRING", "TEXT"],
packages=['sigils'],
zip_safe=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: Text Processing',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'lark-parser',
'lru-dict'
],
extras_require={
'django': [
'django',
],
'dev': [
'pytest',
'black',
'pytest-cov',
]
}
)
| 1.296875 | 1 |
experiments/smk.py | magnusross/gpcm | 2 | 12788437 | import lab as B
from stheno import EQ, GP, Delta, Measure
from gpcm.experiment import run, setup
args, wd = setup("smk")
# Setup experiment.
n = 801 # Need to add the last point for the call to `linspace`.
noise = 1.0
t = B.linspace(-44, 44, n)
t_plot = B.linspace(0, 10, 500)
# Setup true model and GPCM models.
kernel = EQ().stretch(1) * (lambda x: B.cos(2 * B.pi * x))
kernel = kernel + EQ().stretch(1) * (lambda x: B.sin(2 * B.pi * x))
window = 4
scale = 0.5
n_u = 40
n_z = 88
# Sample data.
m = Measure()
gp_f = GP(kernel, measure=m)
gp_y = gp_f + GP(noise * Delta(), measure=m)
truth, y = map(B.flatten, m.sample(gp_f(t_plot), gp_y(t)))
# Remove region [-8.8, 8.8].
inds = ~((t >= -8.8) & (t <= 8.8))
t = t[inds]
y = y[inds]
def comparative_kernel(vs_):
return vs_.pos(1) * EQ().stretch(vs_.pos(1.0)) + vs_.pos(noise) * Delta()
run(
args=args,
wd=wd,
noise=noise,
window=window,
scale=scale,
t=t,
y=y,
n_u=n_u,
n_z=n_z,
true_kernel=kernel,
true_noisy_kernel=kernel + noise * Delta(),
comparative_kernel=comparative_kernel,
t_plot=t_plot,
truth=(t_plot, truth),
x_range={"psd": (0, 3)},
y_range={"kernel": (-1.5, 1.5), "psd": (-100, 10)},
)
| 1.96875 | 2 |
squiggler/hmm.py | JohnUrban/squiggler | 1 | 12788438 | import numpy as np
##from sklearn import hmm
from scipy.stats import norm
import nwalign as nw
from collections import defaultdict
import itertools
import time
from model_tools import get_stored_model, read_model_f5, read_model_tsv
onemers = [''.join(e) for e in itertools.product("ACGT")]
dimers = [''.join(e) for e in itertools.product("ACGT","ACGT")]
trimers = [''.join(e) for e in itertools.product("ACGT","ACGT","ACGT")]
fourmers = [''.join(e) for e in itertools.product("ACGT","ACGT","ACGT","ACGT")]
fivemers = [''.join(e) for e in itertools.product("ACGT","ACGT","ACGT","ACGT","ACGT")]
i=np.matrix([0.25,0.25,0.25,0.25])
e=np.matrix([[10,40,70,100],[np.sqrt(5),np.sqrt(5),np.sqrt(5),np.sqrt(5)]])
t=np.matrix([[0.1,0.2,0.3,0.4],[0.4,0.3,0.2,0.1],[0.25,0.25,0.15,0.35], [0.3,0.2,0.3,0.2]])
## transition estimation using bayesian updating
## make a program that adjusts the transition probabilities according to trusted data
## e.g. goes through illumina reads starting with uniform priors, updates transitions after seeing data
## it could update BOTH 1 move and 2 move (and K move) transitions
## 0 move transitions would have to be Baum-Welched...
## do long sequences faster
## break sequences into chunks, calculate the viterbi matrix on each chunk in parallel
## deal with multiple matrices in way that results in correct answer
## is this possible?
class HMM(object):
pass
def generate_statepath(tran_probs, initial_probs, states, length=10):
## t, e, and i are np.matrix objects
numstates = len(states)
statenums = range(numstates)
current = np.random.choice(statenums, p=initial_probs)
statePath = [current]
length -= 1
while length > 0:
upcoming = np.random.choice(statenums, p=tran_probs[current])
current = upcoming
statePath.append(current)
length -= 1
return statePath
def generate_emissions_from_statepath(emission_probs, statepath):
means = emission_probs[0,statepath]
stdevs = emission_probs[1,statepath]
emits = np.random.normal(means, stdevs)
return emits
def generate_statepath_and_emissions(emission_probs, tran_probs, initial_probs, states, length=10):
statenums = range(len(states))
current = int(np.random.choice(statenums, size=1, p=initial_probs))
statepath = [current]
## print type(statepath)
emitted_data = [np.random.normal(emission_probs[0,current], emission_probs[1,current])]
length = length-1
while length > 0:
upcoming = int(np.random.choice(statenums, size=1, p=tran_probs[current,:]))
current = upcoming
## print current, upcoming
statepath.append(current)
emitted_data.append(np.random.normal(emission_probs[0,current], emission_probs[1,current]))
length = length-1
return statepath, emitted_data
def generate_emissions_twoemits():
pass
def forward(emission_probs, tran_probs, initial_probs, states, emitted_data, num_states = None, num_emits=None):
## t, e, and i are np.matrix objects
if num_states == None:
num_states = len(states)
if num_emits == None:
num_emits = len(emitted_data)
ep = norm(emission_probs[0,:], emission_probs[1,:])
Forward = np.zeros([num_states,num_emits])
scalefactors = np.zeros([2,num_emits])
#initial
Forward[:, 0] = np.multiply(initial_probs,ep.pdf(emitted_data[0]))
## scale to prevent underflow -- keep track of scaling
scalefactors[0,0] = sum(Forward[:,0])
scalefactors[1,0] = np.log(scalefactors[0,0])
Forward[:,0] = Forward[:,0]/scalefactors[0,0]
## iterate
for k in range(1, num_emits):
emit = ep.pdf(emitted_data[k])
Forward[:,k] = np.multiply(emit, np.dot(Forward[:,k-1],tran_probs))
scalefactors[0,k] = sum(Forward[:,k])
scalefactors[1,k] = np.log(scalefactors[0,k]) + scalefactors[1,k-1]
Forward[:,k] = Forward[:,k]/scalefactors[0,k]
return Forward, scalefactors
def backward(emission_probs, tran_probs, initial_probs, states, emitted_data, num_states = None, num_emits=None):
## t, e, and i are np.matrix objects
if num_states == None:
num_states = len(states)
if num_emits == None:
num_emits = len(emitted_data)
ep = norm(emission_probs[0,:], emission_probs[1,:])
Backward = np.zeros([num_states,num_emits])
scalefactors = np.zeros([2,num_emits])
end = num_emits - 1
#initial
Backward[:, end] = 1
## scale to prevent underflow -- keep track of scaling
scalefactors[0,end] = sum(Backward[:,end])
scalefactors[1,end] = np.log(scalefactors[0,end])
Backward[:,end] = Backward[:,end]/scalefactors[0,end]
## iterate
for k in range(end-1, -1, -1):
emit = ep.pdf(emitted_data[k+1])
a = np.multiply(Backward[:,k+1], emit).transpose()
Backward [:,k] = np.dot(tran_probs, a).transpose()
scalefactors[0,k] = sum(Backward[:,k])
scalefactors[1,k] = np.log(scalefactors[0,k]) + scalefactors[1,k+1]
Backward[:,k] = Backward[:,k]/scalefactors[0,k]
return Backward, scalefactors
def posterior_decoding(Forward, F_scales, Backward, B_scales, states):#, getseqfxn=get.sequence):
##F and B are scaled long seq matrices -- the scales are scalefactors that come with them out of long fxns
num_states = len(states)
num_emits = np.shape(Forward)[1]
posterior_path = np.zeros([num_emits], dtype=int)
## logprobs = np.zeros([1,num_emits])
for i in range(num_emits):
fb = Forward[:,i]*Backward[:,i]
max_state = int(fb.argmax())
posterior_path[i] = max_state
# logprobs[i] = np.exp(F_scales[i])*np.exp(B_scales[i])*max(fb)
return posterior_path #, logprobs
def max_and_index(x):
i=x.argmax()
m=x[i]
return i,m
##MAKE FASTER
def viterbi(emission_probs, tran_probs, initial_probs, states, emitted_data, num_states = None, num_emits=None, logprobs=False):
np.seterr(divide='ignore')
if not num_states:
num_states = len(states)
if not num_emits:
num_emits = len(emitted_data)
if not logprobs:
initial_probs = np.log(initial_probs)
tran_probs = np.log(tran_probs)
ep = norm(emission_probs[0,:], emission_probs[1,:])
pointer = np.zeros([num_emits, num_states])
Viterbi = np.zeros([num_states, num_emits])
## need to add log_probs instead of multiply probs to prevent underflow
Viterbi[:,0] = initial_probs + ep.logpdf(emitted_data[0])
pointer[0,:] = 1
for j in range(1,num_emits):
selection = Viterbi[:,j-1] + tran_probs.transpose()
maxstates = np.apply_along_axis(max_and_index, 1, selection)
Viterbi[:,j] = ep.logpdf(emitted_data[j]) + maxstates[:,1]
pointer[j,:] = maxstates[:,0]
end = num_emits - 1
#path init
viterbi_path = np.zeros(num_emits).astype(int)
viterbi_path[end] = Viterbi[:,end].argmax()
#prob
viterbi_prob = Viterbi[viterbi_path[end], end]
#path iter
for j in range(end,0,-1):
viterbi_path[j-1] = pointer[j,viterbi_path[j]]
return viterbi_path, viterbi_prob
##############################################################################
'''two emits viterbi'''
##############################################################################
def viterbi2(emission_probs, emission_probs2, tran_probs, initial_probs, states, emitted_data, emitted_data2, num_states = None, num_emits=None, logprobs=False):
np.seterr(divide='ignore')
if not num_states:
num_states = len(states)
if not num_emits:
num_emits = len(emitted_data)
if not logprobs:
initial_probs = np.log(initial_probs)
tran_probs = np.log(tran_probs)
ep1 = norm(emission_probs[0,:], emission_probs[1,:])
ep2 = norm(emission_probs2[0,:], emission_probs2[1,:])
pointer = np.zeros([num_emits, num_states])
Viterbi = np.zeros([num_states, num_emits])
## need to add log_probs instead of multiply probs to prevent underflow
Viterbi[:,0] = initial_probs + ep1.logpdf(emitted_data[0]) + ep2.logpdf(emitted_data2[0])
pointer[0,:] = 1
for j in range(1,num_emits):
selection = Viterbi[:,j-1] + tran_probs.transpose()
maxstates = np.apply_along_axis(max_and_index, 1, selection)
Viterbi[:,j] = ep1.logpdf(emitted_data[j]) + ep2.logpdf(emitted_data2[j]) + maxstates[:,1]
pointer[j,:] = maxstates[:,0]
end = num_emits - 1
#path init
viterbi_path = np.zeros(num_emits).astype(int)
viterbi_path[end] = Viterbi[:,end].argmax()
#prob
viterbi_prob = Viterbi[viterbi_path[end], end]
#path iter
for j in range(end,0,-1):
viterbi_path[j-1] = pointer[j,viterbi_path[j]]
return viterbi_path, viterbi_prob
##############################################################################
def baumwelch():
pass
def prob_data(Forward, scalefactors, num_emits=None):
if num_emits == None:
end = np.shape(Forward)[1]-1
else:
end = num_emits-1
return sum(Forward[:,end])*np.exp(scalefactors[1,end])
def compare_statepath(sp1,sp2):
try:
ident = sum(sp1 == sp2)
total = len(sp1)
except:
return
edit_dist = total - ident
return edit_dist, ident, 100.0*ident/total
def nwalign(s1,s2):
return nw.global_align(s1,s2)
def edit_dist(s1,s2,length=None):
'''Assumes length s1 == length s2 '''
if length == None:
length = len(s1)
dist = 0
for i in range(length):
dist += s1[i] != s2[i]
return dist
def pct_id(length,distance):
'''Assumes dist <= length '''
return 100.0*(length-distance)/length
def compare_seq_nwa(s1,s2):
s1, s2 = nwalign(s1,s2)
length = len(s1)
dist = edit_dist(s1,s2,length)
return dist, pct_id(length,dist)
def combine_2_seq(s1,s2, length=None):
'''Assumes length s1 == length s2 '''
s1,s2 = nwalign(s1,s2)
if length == None:
length = len(s1)
editdist = 0
combinedseq = ''
for i in range(length):
if s1[i] == s2[i]:
combinedseq += s1[i]
elif s1[i] == "-":
editdist += 1
combinedseq += s2[i]
elif s2[i] == "-":
editdist += 1
combinedseq += s1[i]
else: ## mismatch -- arbitrarily go with complement
editdist += 1
combinedseq += s1[i]
return combinedseq, editdist
def complement(DNAstring):
DNAstring = DNAstring.upper()
compString = ''
complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}
for base in DNAstring:
compString = compString + complement[base]
return compString
def reverseComplement(DNAstring):
return complement(DNAstring[-1::-1])
def reverse_seq(DNAstring):
return DNAstring[-1::-1]
def get_2D_seq(t,c):
c = complement(c)
return combine_2_seq(t,c)
def generate_kmer_initial_probs(states,uniform=True):
num_states = len(states)
if uniform:
initial = np.array([1.0/num_states]*num_states)
else:
initial = np.random.poisson(lam=10.0, size=num_states)
initial = initial/sum(initial)
return initial
def generate_random_kmer_transition_probs(states, allow_gaps=True, unif=False):
## if allow_gaps = False, assumes each k-mer has another kmer overlapped by k-1
## can set nonzero.trans to any vector -- for DNA length 4
k = len(states[0])
if k == 1:
pass
elif k == 2:
pass
else:
num_states = len(states)
tran_probs = np.zeros([num_states,num_states])
# make prefix-suffix dict -- overlaps of k-1 and k-2
prefix = defaultdict(list)
for i in range(num_states):
pref = states[i][:k-1]
prefix[pref].append(i)
pref = states[i][:k-2]
prefix[pref].append(i)
## create transition probs -- can soft code the sampling parameters later if want
for i in range(num_states):
## overlap by k-1 (move = 1)
current_suffix = states[i][1:k]
if unif:
trans = np.array([365,365,365,365])
else:
trans = np.random.poisson(lam=365.0,size=4)
t = 0
for j in prefix[current_suffix]:
tran_probs[i,j] = trans[t]
t += 1
if allow_gaps:
## overlap by k-2 (move = 2) -- add additional counts
current_suffix = states[i][2:]
if unif:
trans = np.array([1]*16)
else:
trans = np.random.poisson(lam=4.0, size=16)
t = 0
for j in prefix[current_suffix]:
tran_probs[i,j] = tran_probs[i,j] + trans[t]
t += 1
## stay in place: add additional probability to staying in place (move = 0)
current_suffix = states[i]
if unif:
trans = np.array([3])
else:
trans = np.random.poisson(lam=20.0, size=1)
tran_probs[i,i] = tran_probs[i,i] + trans
## normalize all counts by sum to create probs that sum to 1
tran_probs[i,:] = tran_probs[i,:]/sum(tran_probs[i,:])
return tran_probs
### Allow higher gaps
##def generate_random_kmer_transition_probs(states, unif=False):
## ## if allow_gaps = False, assumes each k-mer has another kmer overlapped by k-1
## ## can set nonzero.trans to any vector -- for DNA length 4
## k = len(states[0])
## if k == 1:
## pass
## elif k == 2:
## pass
## else:
## num_states = len(states)
## tran_probs = np.zeros([num_states,num_states])
##
## # make prefix-suffix dict -- overlaps of k-1 and k-2
## prefix = defaultdict(list)
## for i in range(num_states):
## pref = states[i][:k-1]
## prefix[pref].append(i)
## pref = states[i][:k-2]
## prefix[pref].append(i)
## pref = states[i][:k-3]
## prefix[pref].append(i)
## pref = states[i][:k-4]
## prefix[pref].append(i)
##
## ## create transition probs -- can soft code the sampling parameters later if want
## for i in range(num_states):
## ## overlap by k-1 (move = 1)
## current_suffix = states[i][1:k]
## if unif:
## tran_probs[i,prefix[current_suffix]] += 365
## else:
## trans = np.random.poisson(lam=365.0,size=4)
## t = 0
## for j in prefix[current_suffix]:
## tran_probs[i,j] += trans[t]
## t += 1
##
## ## overlap by k-2 (move = 2) -- add additional counts
## current_suffix = states[i][2:]
## if unif:
## tran_probs[i,prefix[current_suffix]] += 1
## else:
## trans = np.random.poisson(lam=4.0, size=16)
## t = 0
## for j in prefix[current_suffix]:
## tran_probs[i,j] += tran_probs[i,j] + trans[t]
## t += 1
##
## ## overlap by k-3 (move = 3)
## current_suffix = states[i][3:]
## if unif:
## tran_probs[i,prefix[current_suffix]] += 0.5
## else:
## trans = np.random.poisson(lam=2.0, size=64)
## t = 0
## for j in prefix[current_suffix]:
## tran_probs[i,j] += tran_probs[i,j] + trans[t]
## t += 1
##
## ## overlap by k-4 (move = 3)
## current_suffix = states[i][4:]
## if unif:
## tran_probs[i,prefix[current_suffix]] += 0.25
## else:
## trans = np.random.poisson(lam=4.0, size=256)
## t = 0
## for j in prefix[current_suffix]:
## tran_probs[i,j] += tran_probs[i,j] + trans[t]
## t += 1
##
## ## no overlap (move = 5)
## tran_probs[i] += 0.1
##
## ## stay in place: add additional probability to staying in place (move = 0)
## current_suffix = states[i]
## if unif:
## tran_probs[i,i] += 3
## else:
## tran_probs[i,i] = tran_probs[i,i] + np.random.poisson(lam=20.0, size=1)
##
## ## normalize all counts by sum to create probs that sum to 1
## tran_probs[i,:] = tran_probs[i,:]/sum(tran_probs[i,:])
##
## return tran_probs
def generate_kmer_emission_probs(states, level=True):
## generates either level emissions or sd emissions
# mu.mean and sigma.mean are the mean and std dev of the r7.3 state level means to be used to generate emission means
# mu.sd, sigma.sd -- same for std devs of signals
if level:
mu_mean = 65.57454
sigma_mean = 6.497453
mu_sd = 1.163836
sigma_sd = 0.4116285
else: ## sd emission
mu_mean = 1.37316
sigma_mean = 0.3144043
mu_sd = 0.1761904
sigma_sd = 0.06263217
num_states = len(states)
emissions = np.zeros([2,num_states])
for i in range(num_states):
emissions[0,i] = np.random.normal(mu_mean, sigma_mean)
emissions[1,i] = abs(np.random.normal(mu_sd,sigma_sd))
return emissions
##def get_emiss_probs_from_model(model, twoemits=False):
## ''' model is object returned from get_stored_model() in model_tools '''
## states = sorted(model[1].keys())
## num_states = len(states)
## t_emissions = np.zeros([2,num_states])
## c_emissions = np.zeros([2,num_states])
## for i in range(num_states):
## t_emissions[0,i] = model[1][states[i]][0]
## t_emissions[1,i] = model[1][states[i]][1]
## c_emissions[0,i] = model[2][states[i]][0]
## c_emissions[1,i] = model[2][states[i]][1]
## return t_emissions, c_emissions
def get_emiss_probs_from_model(model, twoemits=False):
''' model is object returned from get_stored_model() in model_tools '''
states = sorted(model[1].keys())
num_states = len(states)
t_emissions = np.zeros([2,num_states])
c_emissions = np.zeros([2,num_states])
if twoemits:
t_emissions2 = np.zeros([2,num_states])
c_emissions2 = np.zeros([2,num_states])
for i in range(num_states):
t_emissions[0,i] = model[1][states[i]][0]
t_emissions[1,i] = model[1][states[i]][1]
c_emissions[0,i] = model[2][states[i]][0]
c_emissions[1,i] = model[2][states[i]][1]
if twoemits:
t_emissions2[0,i] = model[1][states[i]][2]
t_emissions2[1,i] = model[1][states[i]][3]
c_emissions2[0,i] = model[2][states[i]][2]
c_emissions2[1,i] = model[2][states[i]][3]
if twoemits:
return t_emissions, c_emissions, t_emissions2, c_emissions2
return t_emissions, c_emissions
def generate_kmer_transition_probs_withgaps():
pass
def get_sequence():
pass
##def get_sequence_withgaps(states, statepath, checkoverlap=True, posterior_decoded=False):
## ## states are some type of kmer
## ## statepath is vector of numbers (indexes)
## path_length = len(statepath)
## moves = [0]*path_length ## first move is 0
## k = len(states[0])
## end = k-1
## if k == 1 or k == 2:
## return "This currently only works with 3-mers as smallest kmer."
## else:
## #init
## seq = states[statepath[0]]
## moves[0] = 0
## #iter
## for i in range(1,path_length):
## lastSuffix = states[statepath[i-1]][1:]
## currentPrefix = states[statepath[i]][:k-1]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end]
## moves[i] = 1
## else:
## lastSuffix = states[statepath[i-1]][2:]
## currentPrefix = states[statepath[i]][:k-2]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end-1:]
## moves[i] = 2
## elif statepath[i-1] == statepath[i]:
## ## by checking same state last, only heteropolymers affected
## ## homopolymers would be caught in first condition
## moves[i] = 0
## ## nothing is added to sequence
## ## could make another fxn that just spits out events and states line by line like 'template events' in f5
## ## ELSE::: do what? ... in other one just added centroid seq regardless...
## elif posterior_decoded:
## seq += states[statepath[i]][end]
## moves[i] = -1
## ## -1 means it was an "illegal" move (move to a kmer that does not overlap by k-1 or k-2)
## ## it turns out that adding the base from the illegal move does not hurt the seq overall much
## return seq, moves
## reduce homo-5mer polmerization...
def get_sequence_withgaps(states, statepath, checkoverlap=True, posterior_decoded=False):
## states are some type of kmer
## statepath is vector of numbers (indexes)
path_length = len(statepath)
moves = [0]*path_length ## first move is 0
k = len(states[0])
end = k-1
if k == 1 or k == 2:
return "This currently only works with 3-mers as smallest kmer."
else:
#init
seq = states[statepath[0]]
moves[0] = 0
#iter
for i in range(1,path_length):
lastSuffix = states[statepath[i-1]][1:]
currentPrefix = states[statepath[i]][:k-1]
if statepath[i-1] == statepath[i]:
moves[i] = 0
elif lastSuffix == currentPrefix:
seq += states[statepath[i]][end]
moves[i] = 1
else:
lastSuffix = states[statepath[i-1]][2:]
currentPrefix = states[statepath[i]][:k-2]
if lastSuffix == currentPrefix:
seq += states[statepath[i]][end-1:]
moves[i] = 2
elif posterior_decoded:
seq += states[statepath[i]][end]
moves[i] = -1
## -1 means it was an "illegal" move (move to a kmer that does not overlap by k-1 or k-2)
## it turns out that adding the base from the illegal move does not hurt the seq overall much
return seq, moves
### ALLOW higher gaps 3, 4, 5
##def get_sequence_withgaps(states, statepath, checkoverlap=True, posterior_decoded=False):
## ## states are some type of kmer
## ## statepath is vector of numbers (indexes)
## path_length = len(statepath)
## moves = [0]*path_length ## first move is 0
## k = len(states[0])
## end = k-1
## if k == 1 or k == 2:
## return "This currently only works with 3-mers as smallest kmer."
## else:
## #init
## seq = states[statepath[0]]
## moves[0] = 0
## #iter
## for i in range(1,path_length):
## lastSuffix = states[statepath[i-1]][1:]
## currentPrefix = states[statepath[i]][:k-1]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end]
## moves[i] = 1
## elif statepath[i-1] == statepath[i]:
## ## by checking same state last, only heteropolymers affected
## ## homopolymers would be caught in first condition
## moves[i] = 0
## ## nothing is added to sequence
## ## could make another fxn that just spits out events and states line by line like 'template events' in f5
##
## else:
## lastSuffix = states[statepath[i-1]][2:]
## currentPrefix = states[statepath[i]][:k-2]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end-1:]
## moves[i] = 2
## else:
## lastSuffix = states[statepath[i-1]][3:]
## currentPrefix = states[statepath[i]][:k-3]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end-2:]
## moves[i] = 3
## else:
## lastSuffix = states[statepath[i-1]][4:]
## currentPrefix = states[statepath[i]][:k-4]
## if lastSuffix == currentPrefix:
## seq += states[statepath[i]][end-3:]
## moves[i] = 4
## else:
## ## skip 5
## seq += states[statepath[i]][end-4:]
## moves[i] = 5
## ## ELSE::: do what? ... in other one just added centroid seq regardless...
#### elif posterior_decoded:
#### seq += states[statepath[i]][end]
#### moves[i] = -1
#### ## -1 means it was an "illegal" move (move to a kmer that does not overlap by k-1 or k-2)
## ## it turns out that adding the base from the illegal move does not hurt the seq overall much
## return seq, moves
def update_table_dict(d,l,keys, length=None):
'''d is dict to update, l is list of values for k, the keys
assumes l and d are of same length
assumes k and l are paired by shared index'''
if not length:
length = len(l)
for i in range(length):
d[keys[i]].append(l[i])
return d
def read_table(fh, keys, types=None):
'''fh is a file path to a tsv file. keys are column names.
lengths of types and keys = number colimns in table
both keys and types should appear in same order as columns'''
length = len(keys)
if not types:
types = [str]*length
print types
data = open(fh).readlines()
table = defaultdict(list)
for i in range(len(data)):
line = data[i].strip().split("\t")
line = [types[j](line[j]) for j in range(len(line))]
table = update_table_dict(table,line,keys, length)
return table
def read_model_file(model_file, variantcolumn=False):
if variantcolumn:
keys = ["kmer","variant","level_mean","level_stdv","sd_mean","sd_stdv","weight"]
types = [str] + [float]*6
else:
keys = ["kmer","level_mean","level_stdv","sd_mean","sd_stdv","weight"]
types = [str] + [float]*5
return read_table(model_file, keys, types)
def read_events_file(events_file, input_events=False):
''' file may contain input, template, or complement events '''
if input_events:
keys = ["mean", "stddev", "start", "length"]
types = [float]*4
else:
keys = ["mean", "stddev", "start", "length", "model_state", "model_level", "move", "p_model_state", "mp_state", "p_mp_state", "p_A", "p_C", "p_G", "p_T"]
types = [float]*4 + [str] + [float]*3 + [str] + [float]*5
return read_table(events_file, keys, types)
def lead_hmm(first_50_events):
## 15 states: 0:14, states 0:13 part of lead profile, state14 is end/template state
emit_probs = np.zeros([2,15])
## means
emit_probs[0,:] = [43.93368, 51.82074, 66.3531, 76.30256, 84.15992, 89.97542, 96.22626, 100.97302, 107.33552, 100.54961, 75.71837, 46.63833, 57.33411, 43.53527, 60.0]
## stdevs
emit_probs[1,:] = [2.097209, 3.526526, 2.809502, 1.954605, 1.857928, 1.793586, 1.163202, 1.120078, 2.364349, 2.866541, 13.945599, 1.991525, 16.866727, 2.678975, 5.0]
## initial probs - can start anywhere in profile, but mostly first 3 states
init_probs = np.array([0.4,0.3,0.2,0,0,0,0,0,0,0,0,0,0,0,0])+0.001
init_probs = init_probs/sum(init_probs)
## trans probs -- mostly trans to next state, but can skip states, also somewhat likely to stay in same state
tran_probs = np.zeros([15,15])
tran_probs[14,14] = 1.0
for i in range(14): tran_probs[i,i] = 0.3
for i in range(13): tran_probs[i,i+1] = 0.35
for i in range(12): tran_probs[i,i+2] = 0.2
for i in range(11): tran_probs[i,i+3] = 0.1
for i in range(10): tran_probs[i,i+4] = 0.001
for i in range(9): tran_probs[i,i+5] = 0.001
for i in range(8): tran_probs[i,i+6] = 0.001
for i in range(7): tran_probs[i,i+7] = 0.001
## for now only last 3 states transition to end state
tran_probs[11,14] = 0.05
tran_probs[12,14] = 0.1
tran_probs[13,14] = 0.2
## normalize all rows to 1
for i in range(14): tran_probs[i,:] = tran_probs[i,:]/sum(tran_probs[i,:])
## get viterbi path for lead adapter coordinates
vpath, vprob = viterbi(emission_probs = emit_probs, tran_probs = tran_probs, initial_probs = init_probs, states = range(15), emitted_data = first_50_events)
template_start = 0
try:
while vpath[template_start] != 14:
template_start += 1
except IndexError: ## if profile HMM does not find template start in 1st 50, then assume start is at 50
template_start = 50
return template_start, vpath
def hp_hmm(events,trim=5):
## state B,1,2,3,4,5,E = 7 states
emit_probs = np.zeros([2,7])
emit_probs[0,] = [65.0, 93.78638, 117.49618, 100.67429, 60.19801, 46.50402, 65.0]
emit_probs[1,] = [6.0, 6.787453, 8.665963, 4.354063, 6.305904, 1.931336, 6.0]
init_probs = np.array([0.7,0.2,0.1,0,0,0,0])
init_probs = init_probs/sum(init_probs)
tran_probs = np.zeros([7,7])
tran_probs[6,6] = 1.0
for i in range(7): tran_probs[i,i] = 0.3
for i in range(6): tran_probs[i,i+1] = 0.35
for i in range(5): tran_probs[i,i+2] = 0.2
for i in range(4): tran_probs[i,i+3] = 0.1
for i in range(3): tran_probs[i,i+4] = 0.001
for i in range(2): tran_probs[i,i+5] = 0.001
tran_probs[3,6] = 0.05
tran_probs[4,6] = 0.1
tran_probs[4,5] = 0.7 ## state 4 usually goes directly to 5 (occasionally 2 events, but have not seen more -- all other states tend to stay in-state longer)
for i in range(7): tran_probs[i,] = tran_probs[i,:]/sum(tran_probs[i,:])
vpath, vprob = viterbi(emission_probs = emit_probs, tran_probs = tran_probs, initial_probs = init_probs, states = range(7), emitted_data = events)
hpstart = 0
while vpath[hpstart] != 1:
hpstart += 1
hpend = len(vpath)-1
while vpath[hpend] != 5:
hpend -= 1
return hpstart-trim, hpend+trim, vpath
## To help with writing ####################################################################
#### This will help you learn how the functions are used.
##emis=generate_kmer_emission_probs(trimers)
##tran=generate_random_kmer_transition_probs(trimers)
##init=generate_kmer_initial_probs(trimers)
##sp=generate_statepath(tran,init,trimers)
##em=generate_emissions_from_statepath(emis,sp)
##f,fs=forward(emis, tran, init, trimers, em)
##b,bs=backward(emis, tran, init, trimers, em)
##vpath,vprob=viterbi(emis,tran,init,trimers,em)
##postpath=posterior_decoding(f,fs,b,bs,trimers)
##print "comparison, edit_dist, ident, pctID"
##print "posterior decoded path:", compare_statepath(sp,postpath)
##print "viterbi decoded path:", compare_statepath(sp,vpath)
##print "posterior vs. viterbi:", compare_statepath(postpath,vpath)
##aseq,amoves=get_sequence_withgaps(trimers,sp)
##vseq,vmoves=get_sequence_withgaps(trimers,vpath)
##pseq,pmoves=get_sequence_withgaps(trimers,postpath,posterior_decoded=True)
####print "ans-vs-p: edit dist, pctID =", compare_seq_nwa(vseq,sp)
####print "ans-vs-v: edit dist, pctID =", compare_seq_nwa(pseq,sp)
##print "a-vs-p: edit dist, pctID =", compare_seq_nwa(vseq,aseq)
##print "a-vs-v: edit dist, pctID =", compare_seq_nwa(pseq,aseq)
##print "v-vs-p: edit dist, pctID =", compare_seq_nwa(vseq,pseq)
##print "amoves:", amoves
##print "vmoves:", vmoves
##print "pmoves:", pmoves
### More realistic example -- read in...
def test_viterbi(states=trimers, length=10):
emis=generate_kmer_emission_probs(states)
tran=generate_random_kmer_transition_probs(states)
init=generate_kmer_initial_probs(states)
sp=generate_statepath(tran,init,states,length=length)
em=generate_emissions_from_statepath(emis,sp)
return viterbi(emis,tran,init,states,em)
def simulate(states=fivemers, length=10):
emis=generate_kmer_emission_probs(states)
tran=generate_random_kmer_transition_probs(states)
init=generate_kmer_initial_probs(states)
sp=generate_statepath(tran,init,states,length=length)
em=generate_emissions_from_statepath(emis,sp)
print "forward..."
start=time.time()
f,fs=forward(emis,tran,init,states,em)
end=time.time()
f1=end-start
print "...operation took ", f1, " seconds...."
print "backward..."
start=time.time()
b,bs=backward(emis,tran,init,states,em)
end=time.time()
b1=end-start
print "...operation took ", b1, " seconds...."
print "post..."
start=time.time()
postpath=posterior_decoding(f,fs,b,bs,states)
end=time.time()
print "...operation took ", end-start, " seconds...."
print "viterbi..."
start=time.time()
vpath,vprob=viterbi(emis,tran,init,states,em)
end=time.time()
v1=end-start
print "...operation took ", v1, " seconds...."
print ("").join([str(e) for e in ["...viterbi is ", v1/f1, "x and ", v1/b1, "x slower than F and B respectively"]])
print "posterior path vs known:", compare_statepath(sp,postpath)
print "viterbi path vs known:", compare_statepath(sp,vpath)
print "posterior vs viterbi:", compare_statepath(postpath,vpath)
def simulate_delete_me(states=trimers, length=10):
emis=generate_kmer_emission_probs(states)
tran=generate_random_kmer_transition_probs(states)
init=generate_kmer_initial_probs(states)
sp=generate_statepath(tran,init,states,length=length)
em=generate_emissions_from_statepath(emis,sp)
print "forward..."
start=time.time()
f,fs=forward(emis,tran,init,states,em)
end=time.time()
f1=end-start
print "...operation took ", f1, " seconds...."
print "backward..."
start=time.time()
b,bs=backward(emis,tran,init,states,em)
end=time.time()
b1=end-start
print "...operation took ", b1, " seconds...."
print "post..."
start=time.time()
postpath=posterior_decoding(f,fs,b,bs,states)
end=time.time()
print "...operation took ", end-start, " seconds...."
print "viterbi..."
start=time.time()
vpath,vprob=viterbi(emis,tran,init,states,em)
end=time.time()
v1=end-start
print "...operation took ", v1, " seconds...."
print "viterbi_fast..."
start=time.time()
v2path,v2pr=viterbi_fast(emis,tran,init,states,em)
end=time.time()
v2=end-start
print "...operation took ", v2, " seconds...."
print "...new viterbi ", v1/v2, "x faster than old one..."
print "...new viterbi is ", v2/f1, " x and ", v2/b1, "x slower than F and B respectively"
print "posterior path vs known:", compare_statepath(sp,postpath)
print "viterbi path vs known:", compare_statepath(sp,vpath)
print "viterbi_fast path vs known:", compare_statepath(sp,v2path)
print "posterior vs viterbi:", compare_statepath(postpath,vpath)
print "viterbi path vs viterbi fast path:", compare_statepath(vpath,v2path)
print "posterior vs viterbi fast:", compare_statepath(postpath,v2path)
| 1.960938 | 2 |
apps/postgres.py | abiolarasheed/fabobjects | 0 | 12788439 | # coding: utf-8
from __future__ import unicode_literals
import getpass
import os
import sys
from fabric.api import hide
from fabric.api import settings
from apps.base import BaseApp
from fabobjects.utils import server_host_manager, random_password
PG_VERSION = "9.5"
GIS_VERSION = "2.2"
HBA_TEXT = (
"local all postgres ident\n"
"host replication replicator 0.0.0.0/0 md5\n"
"local all all password\n"
"# # IPv4 local connections:\n"
"host all all 127.0.0.1/32 %(encrypt)s\n"
"# # IPv6 local connections:\n"
"host all all ::1/128 %(encrypt)s\n"
"# # IPv4 external\n"
"host all all 0.0.0.0/0 %(encrypt)s\n"
)
POSTGRES_CONFIG = {
"listen_addresses": "'*'",
"wal_level": "hot_standby",
"wal_keep_segments": "32",
"max_wal_senders": "5",
"archive_mode": "on",
}
class PostgresServer(BaseApp):
"""
Set up Postgres Pry database server.
"""
def __init__(self, *args, **kwargs):
super(PostgresServer, self).__init__(*args, **kwargs)
self.name = "master_setup"
self.service_name = "postgresql"
self.db_pass = kwargs.get("db_pass", None) or self.get_db_pass()
self.db_name = kwargs.get("db_name", None)
self.db_user = kwargs.get("db_user", None)
self.replicator_pass = kwargs.get("replicator_pass", None)
self.service_port = kwargs.get("service_port", "5432")
self.db_version = kwargs.get("db_version", PG_VERSION)
self.gis_version = kwargs.get("gis_version", GIS_VERSION)
self.encrypt = kwargs.get("encrypt", "md5")
self.hba_text = kwargs.get("hba_text", HBA_TEXT)
self.postgres_config = kwargs.get("postgres_config", POSTGRES_CONFIG)
self.data_dir_default_base = kwargs.get("data_dir_default_base", "/var/pgsql")
self.binary_path = kwargs.get("binary_path", None)
self.version_directory_join = kwargs.get("version_directory_join", ".")
@server_host_manager
def deploy(
self,
db_version=PG_VERSION,
encrypt="md5",
gis_version=GIS_VERSION,
enable_postgis=False,
master=False,
db_name=None,
db_user=None,
passwd=<PASSWORD>,
):
"""
Install and configure postgres.
:param db_version: Postgres Version to install
:param string encrypt: Set encryption type for user password.
:param string gis_version: PostGis Version to install
:param bool enable_postgis: Configure PostGis if true
:param bool master: Set True if it's a master db of a cluster
:param string db_name: Name of Data base you are creating
:param db_user: Name of user for the db you are creating
:param string passwd: Password for the user you are creating
:return:
"""
db_version = db_version or self.db_version
gis_version = gis_version or self.gis_version
db_name = db_name or self.db_name
db_user = db_user or self.db_user
passwd = <PASSWORD> or self.db_pass
if all([db_version, gis_version]):
package = "postgresql-{0}-postgis-{1}".format(db_version, gis_version)
else:
package = "postgresql-{0}".format(db_version)
try:
self.install_package(package)
if enable_postgis:
try:
self.create_postgis_db(db=db_name)
except:
pass
self.create_db(dbname=db_name)
self.create_db_user(user=db_user, passwd=<PASSWORD>)
self.grant_permission(permission_type="All", db=db_name, role_name=db_user)
data_dir = self.__get_data_dir(db_version)
config_dir = self.__get_config_dir(db_version)
config = dict(self.postgres_config)
config["archive_command"] = "'cp {0} {1}/wal_archive/{2}'".format(
"%p", data_dir, "%f"
)
self.__setup_hba_config(config_dir, encrypt)
self.__setup_postgres_config(config_dir, config)
self.__setup_archive_dir(data_dir)
self.service_restart()
if master:
self.__create_replicator()
except:
return
@server_host_manager
def __get_db_version(self, db_version=None):
if not db_version or db_version is None:
db_version = self.db_version
return self.version_directory_join.join(db_version.split(".")[:2])
@server_host_manager
def turn_pg(self):
self.install_package("pgtune")
if self.is_package_installed("pgtune"):
config_file = "/etc/postgresql/{0}/main/postgresql.conf".format(
self.db_version
)
out_file = "postgresql.conf"
self.sudo("python pgtune -i {0} -o {1}".format(config_file, out_file))
self.sudo("cp {0} {1}.bak".format(config_file, config_file))
self.sudo("mv {0} {1}".format(out_file, config_file))
self.service_reload("postgresql")
else:
print("E: Unable to locate package pgtune")
@server_host_manager
def psql(self, sql, show=True, use_table=None):
""" Runs SQL against the project's database. """
if use_table and use_table is not None:
psql_ = "psql {0} -c".format(use_table)
else:
psql_ = "psql -c"
out = self.sudo("{0} '{1}' ".format(psql_, sql), user="postgres")
if show:
self.print_command(sql)
return out
@server_host_manager
def create_db(self, dbname=None):
command = "CREATE DATABASE {0}".format(dbname)
self.psql(command)
@server_host_manager
def create_db_user(self, user=None, passwd=None):
with settings(warn_only=True):
command = "CREATE USER {0}".format(user)
self.psql(command)
if passwd:
with hide("running", "stdout", "stderr"):
command = (
"ALTER USER {0} WITH ENCRYPTED "
"PASSWORD \"'{1}'\";".format(user, passwd)
)
self.psql(command)
@server_host_manager
def grant_permission(self, permission_type="All", db=None, role_name=None):
opts = dict(permission_type=permission_type, db=db, role_name=role_name)
command = "GRANT {permission_type} ON DATABASE {db} TO {role_name}".format(
**opts
)
self.psql(command)
@server_host_manager
def create_postgis_db(self, db=None):
with settings(warn_only=True):
self.psql("ALTER EXTENSION postgis UPDATE;")
if db is not None:
self.create_db(dbname=db)
self.psql("CREATE EXTENSION postgis;", use_table=db)
self.psql("CREATE EXTENSION postgis_topology;", use_table=db)
else:
self.psql("CREATE EXTENSION postgis;")
self.psql("CREATE EXTENSION postgis_topology;")
def __get_data_dir(self, db_version):
data_dir = "/var/lib/postgresql/{0}/main".format(db_version)
return data_dir
@property
def data_dir(self):
return self.__get_data_dir(self.__get_db_version())
def __get_config_dir(self, db_version):
data_dir = "/etc/postgresql/{0}/main".format(db_version)
return data_dir
@server_host_manager
def __setup_parameter(self, filename, **kwargs):
for key, value in kwargs.items():
origin = "#{0} =".format(key)
new = "{0} = {1}".format(key, value)
self.sudo('sed -i "/{0}/ c\{1}" {2}'.format(origin, new, filename))
@server_host_manager
def __setup_hba_config(self, config_dir, encrypt=None):
""" enable postgres access without password from localhost """
if not encrypt or encrypt is not None:
encrypt = self.encrypt
hba_conf = os.path.join(config_dir, "pg_hba.conf")
kwargs = {"encrypt": encrypt}
hba_text = self.hba_text % kwargs
if self.exists(hba_conf, use_sudo=True):
self.sudo("echo '{0}' > {1}".format(hba_text, hba_conf))
else:
print(
"Could not find file {0}. Please make sure postgresql was "
"installed and data dir was created correctly.".format(hba_conf)
)
sys.exit(1)
@server_host_manager
def __setup_postgres_config(self, config_dir, config):
postgres_conf = os.path.join(config_dir, "postgresql.conf")
if self.exists(postgres_conf, use_sudo=True):
self.__setup_parameter(postgres_conf, **config)
else:
print(
"Could not find file {0}. Please make sure postgresql was "
"installed and data dir was created correctly.".format(postgres_conf)
)
sys.exit(1)
@server_host_manager
def __setup_archive_dir(self, data_dir):
"""
Set up dir for continuous archiving.
:param data_dir:
:return:
"""
archive_dir = os.path.join(data_dir, "wal_archive")
self.sudo("mkdir -p {0}".format(archive_dir))
self.sudo("chown postgres:postgres {0}".format(archive_dir))
return archive_dir
@server_host_manager
def get_home_dir(self):
return self._get_home_dir(user="postgres")[0]
@server_host_manager
def __setup_ssh_key(self, pswd):
ssh_dir = os.path.join(self.get_home_dir(), ".ssh")
self.sudo("mkdir -p {0}".format(ssh_dir))
rsa = os.path.join(ssh_dir, "id_rsa")
if self.exists(rsa, use_sudo=True):
print("rsa key exists, skipping creating")
else:
with self.cd(ssh_dir):
command = "ssh-keygen -t rsa -b 4096 -f {0} -N {1}".format(rsa, pswd)
self.sudo("chown -R postgres:postgres {0}".format(ssh_dir))
self.sudo("chmod -R og-rwx {0}".format(ssh_dir))
self.sudo(command, user="postgres")
@server_host_manager
def add_user_2_db(self, db_user, db_pass):
command = "CREATE USER {0} WITH NOCREATEDB NOCREATEUSER ENCRYPTED PASSWORD '{1}' ".format(
db_user, db_pass
)
self.psql(command)
@server_host_manager
def __create_replicator(self, replicator_pass=None):
if replicator_pass is None and self.replicator_pass is None:
self.replicator_pass = replicator_pass = <PASSWORD>(12)
command = "CREATE USER replicator REPLICATION LOGIN ENCRYPTED PASSWORD \"'{0}'\"".format(
replicator_pass
)
self.psql(command)
history_file = os.path.join(self.get_home_dir(), ".psql_history")
if self.exists(history_file):
self.sudo("rm {0}".format(history_file))
return replicator_pass
else:
print("user replicator already exists, skipping creating user.")
return None
@server_host_manager
def get_db_pass(self):
""" Prompts for the database password if unknown. """
if not self.db_pass or self.db_pass is None:
self.db_pass = getpass("Enter the database password: ")
return self.db_pass
@server_host_manager
def postgres_run(self, command):
""" Runs the given command as the postgres user. """
return self.run_as_user(command, user="postgres")
@server_host_manager
def clone_db(self, remotehost, remotehost_user, remote_db, local_host_user):
command = "pg_dump -C -h {0} -U {1} -d {2} | psql -h {4} -d {2} -U {3}".format(
remotehost, remotehost_user, remote_db, local_host_user, self.ip
)
self.sudo(command, user="postgres")
@server_host_manager
def backup(self, project_name, filename):
""" Backs up the database. """
return self.postgres_run("pg_dump -Fc {0} > {1}".format(project_name, filename))
@server_host_manager
def restore(self, project_name, filename):
""" Restores the database. """
return self.postgres_run(
"pg_restore -c -d {0} {1}".format(project_name, filename)
)
@server_host_manager
def set_daily_backup(self, password):
# configure daily dumps of all databases
self.sudo("mkdir -p /var/backups/postgresql")
self.echo(
"localhost:*:*:postgres:{0}".format(password),
to="/root/.pgpass",
use_sudo=True,
append=True,
)
self.sudo("chmod 600 /root/.pgpass")
self.echo(
"0 7 * * * pg_dumpall --username postgres --file /var/backups/postgresql/postgresql_$"
"(date +%%Y-%%m-%%d).dump",
to="/etc/cron.d/pg_dump",
)
class PostgresServerReplica(PostgresServer):
""" Set up master-slave streaming replication: slave node """
name = "slave_setup"
postgres_config = {
"listen_addresses": "'*'",
"wal_level": "hot_standby",
"hot_standby": "on",
}
def __init__(self, *args, **kwargs):
super(PostgresServerReplica, self).__init__(*args, **kwargs)
self.service_name = "postgresql"
self.master_db = kwargs.get("master_db", None)
self.service_port = kwargs.get("service_port", "5432")
class PGBouncer(BaseApp):
"""
Set up PGBouncer on a database server
"""
name = "setup_pgbouncer"
config_dir = "/etc/pgbouncer"
config = {
"*": "host=127.0.0.1",
"logfile": "/var/log/pgbouncer/pgbouncer.log",
"pidfile": None,
"listen_addr": "*",
"listen_port": "6432",
"unix_socket_dir": "/tmp",
"auth_type": "md5",
"auth_file": "%s/userlist.txt" % config_dir,
"pool_mode": "session",
"admin_users": "postgres",
"stats_users": "postgres",
}
def __init__(self, *args, **kwargs):
super(PGBouncer, self).__init__(*args, **kwargs)
self.config["db_host"] = kwargs.get("db_host", "127.0.0.1")
| 1.90625 | 2 |
neural_transfer/config.py | deephdc/neural_transfer | 2 | 12788440 | # -*- coding: utf-8 -*-
"""
Module to define CONSTANTS used across the project
"""
import os
from webargs import fields, validate
from marshmallow import Schema, INCLUDE
# identify basedir for the package
BASE_DIR = os.path.dirname(os.path.normpath(os.path.dirname(__file__)))
# default location for input and output data, e.g. directories 'data' and 'models',
# is either set relative to the application path or via environment setting
IN_OUT_BASE_DIR = BASE_DIR
if 'APP_INPUT_OUTPUT_BASE_DIR' in os.environ:
env_in_out_base_dir = os.environ['APP_INPUT_OUTPUT_BASE_DIR']
if os.path.isdir(env_in_out_base_dir):
IN_OUT_BASE_DIR = env_in_out_base_dir
else:
msg = "[WARNING] \"APP_INPUT_OUTPUT_BASE_DIR=" + \
"{}\" is not a valid directory! ".format(env_in_out_base_dir) + \
"Using \"BASE_DIR={}\" instead.".format(BASE_DIR)
print(msg)
DATA_DIR = os.path.join(IN_OUT_BASE_DIR, 'data/')
IMG_STYLE_DIR = os.path.join(IN_OUT_BASE_DIR, 'neural_transfer/dataset/style_images')
MODEL_DIR = os.path.join(IN_OUT_BASE_DIR, 'models')
neural_RemoteSpace = 'rshare:/neural_transfer/'
neural_RemoteShare = 'https://nc.deep-hybrid-datacloud.eu/s/9Qp4mxNBaLKmqAQ/download?path=%2F&files='
REMOTE_IMG_DATA_DIR = os.path.join(neural_RemoteSpace, 'dataset/training_dataset/')
REMOTE_IMG_STYLE_DIR = os.path.join(neural_RemoteSpace, 'styles/')
REMOTE_MODELS_DIR = os.path.join(neural_RemoteSpace, 'models/')
# Input parameters for predict() (deepaas>=1.0.0)
class PredictArgsSchema(Schema):
class Meta:
unknown = INCLUDE # support 'full_paths' parameter
# full list of fields: https://marshmallow.readthedocs.io/en/stable/api_reference.html
# to be able to upload a file for prediction
img_content = fields.Field(
required=False,
missing=None,
type="file",
data_key="image_content",
location="form",
description="Image to be styled."
)
accept = fields.Str(
require=False,
description="Returns the image with the new style or a pdf containing the 3 images.",
missing='image/png',
validate=validate.OneOf(['image/png', 'application/pdf']))
model_name = fields.Str(
required=False,
missing = "mosaic",
description="Name of the saved model. This module already comes with some styles, just write the name: 'mosaic', 'candy', 'rain_princess' or 'udnie'. You can see the styles in the dataset/style_images folder. Running 'get_metadata' return the list of models in the module."
)
# Input parameters for train() (deepaas>=1.0.0)
class TrainArgsSchema(Schema):
class Meta:
unknown = INCLUDE # support 'full_paths' parameter
model_name = fields.Str(
required=True,
description="Name of the style image e.g. 'name.jpg' in nextcloud. This will also be the name of the model."
)
upload_model = fields.Boolean(
required=False,
missing = 2,
description="Upload model to nextcloud."
)
epochs = fields.Int(
required=False,
missing = 2,
description="Number of training epochs."
)
learning_rate = fields.Float(
required=False,
missing = 0.003,
description="Learning rate."
)
batch_size = fields.Int(
required=False,
missing = 4,
description="Batch size for training."
)
content_weight = fields.Float(
required=False,
missing = 1e5,
description="Weight for content-loss."
)
style_weight = fields.Float(
required=False,
missing = 1e10,
description="Number of iterations on the network to compute the gradients."
)
size_train_img = fields.Int(
required=False,
missing = 256,
description="Size of training images, default is 256 X 256"
)
log_interval = fields.Int(
required=False,
missing = 200,
description="Number of images after which the training loss is logged."
)
| 1.898438 | 2 |
rvpvp/isa/rvv/vmfxx_vf.py | ultrafive/riscv-pvp | 5 | 12788441 | from ...isa.inst import *
import numpy as np
class Vmfeq_vf(Inst):
name = 'vmfeq.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['rs1'] == self['vs2'][no]
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfne_vf(Inst):
name = 'vmfne.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['rs1'] != self['vs2'][no]
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmflt_vf(Inst):
name = 'vmflt.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] < self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfle_vf(Inst):
name = 'vmfle.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] <= self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfgt_vf(Inst):
name = 'vmfgt.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] > self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfge_vf(Inst):
name = 'vmfge.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] >= self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
| 2.5 | 2 |
venv/lib/python3.8/site-packages/jeepney/bindgen.py | Retraces/UkraineBot | 2 | 12788442 | /home/runner/.cache/pip/pool/ea/f2/c8/d53ffbac437df1f03c084b37a86dcb937cc9b32f0cd8412ff499c36c2d | 0.808594 | 1 |
rest_multi_factor/__init__.py | KENTIVO/rest-multi-factor | 0 | 12788443 | r"""
_____ ______ _____ _______
| __ \| ____|/ ____|__ __|
| |__) | |__ | (___ | |
| _ /| __| \___ \ | |
| | \ \| |____ ____) | | |
|_| \_\______|_____/ |_|
__ __ _ _ _ ______ _
| \/ | | | | (_) | ____| | |
| \ / |_ _| | |_ _ | |__ __ _ ___| |_ ___ _ __
| |\/| | | | | | __| | | __/ _` |/ __| __/ _ \| '__|
| | | | |_| | | |_| | | | | (_| | (__| || (_) | |
|_| |_|\__,_|_|\__|_| |_| \__,_|\___|\__\___/|_|
"""
__title__ = "REST Multi Factor"
__author__ = "<NAME>"
__licence__ = "MIT"
__version__ = (__import__("rest_multi_factor.version", fromlist=["Version"])
.Version(1, 1, 0, "beta", 1))
default_app_config = "rest_multi_factor.apps.RestMultiFactorConfig"
| 1.640625 | 2 |
evosolve/operator/mutation/restricted_mixing.py | piotr-rarus/linkage-learning | 0 | 12788444 | from typing import List, Tuple
import numpy as np
from evobench.benchmark import Benchmark
# from evobench.linkage import DependencyStructureMatrix
from evobench.model import Population, Solution
from ..operator import Operator
class RestrictedMixing(Operator):
def __init__(self, benchmark: Benchmark):
super(RestrictedMixing, self).__init__(benchmark)
# def apply(
# self,
# population: Population,
# dsm: DependencyStructureMatrix
# ) -> Population:
def mix(
self,
source: Solution,
ils: List[int], population: Population
) -> Tuple[Solution, np.ndarray]:
assert source.genome.size == self.benchmark.genome_size
if not source.fitness:
source.fitness = self.benchmark.evaluate_solution(source)
trial = Solution(source.genome.copy())
best_fitness = source.fitness
mask = np.zeros(self.benchmark.genome_size, dtype=bool)
for gene_index in ils:
trial.genome[gene_index] = 1 - trial.genome[gene_index]
fitness = self.benchmark.evaluate_solution(trial)
# ! TODO: benchmark min/max
if fitness >= best_fitness and not population.contains(trial):
best_fitness = fitness
mask[gene_index] = True
else:
trial.genome[gene_index] = 1 - trial.genome[gene_index]
trial.fitness = best_fitness
return trial, mask
| 2.0625 | 2 |
src/SurveyDataViewer/settings/development.py | UCHIC/SurveyDataViewer | 10 | 12788445 | <filename>src/SurveyDataViewer/settings/development.py
__author__ = 'Juan'
from SurveyDataViewer.settings.base import *
DEBUG = True
TEMPLATE_DEBUG = True
STATIC_URL = '/static/'
SITE_URL = ''
MEDIA_ROOT = data["media_files_dir"]
MEDIA_URL = '/surveydata/' | 1.375 | 1 |
stack_it/utils/templatetags/node_mixin.py | Jufik/django_stack_it | 8 | 12788446 | import logging
from django.template import VariableDoesNotExist
from django import template
from stack_it.contents.abstracts import BaseContentMixin
from stack_it.models import Page, Template as TemplateModel
from django.db import transaction
from django.utils.safestring import mark_safe
# Get an instance of a logger
logger = logging.getLogger(__name__)
register = template.Library()
def get_template(request, templatename):
if hasattr(request, "templates"):
if request.templates.get(templatename) is not None:
_template = request.templates.get(templatename)
else:
with transaction.atomic():
_template, _ = TemplateModel.objects.get_or_create(path=templatename)
request.templates.update({templatename: _template})
else:
with transaction.atomic():
_template, _ = TemplateModel.objects.get_or_create(path=templatename)
request.templates = {templatename: _template}
return _template
class ContentNodeMixin(template.Node):
CONTENT_MODEL = None
ADMIN_TEMPLATE = "stack_it/editable.html"
INSTANCE_PARAMETER_NAME = None
def __init__(self, instance, content_type, key, widget, nodelist):
super(ContentNodeMixin, self).__init__()
self.instance = template.Variable(instance)
self.key_variable = key
self.widget = widget
self.nodelist = nodelist
self.messages = []
self.content_type = content_type
self.alternative_content_types = list(
dict(BaseContentMixin.CONTENT_TYPES).keys()
)
self.alternative_content_types.remove(self.content_type)
self.admin_template = template.loader.get_template(self.ADMIN_TEMPLATE)
def create_content(self, instance, content_type, key, value):
"""
Creates related content
This is meant to be overriden
Returns:
CONTENT_MODEL instance: Returns the CONTENT_MODEL instance which's just been created
"""
attrs = dict(
[
(self.INSTANCE_PARAMETER_NAME, instance),
("content_type", content_type),
("key", key),
("value", value),
]
)
with transaction.atomic():
content_instance = self.CONTENT_MODEL.objects.create(**attrs)
getattr(instance, f"{self.content_type}s").update(
dict(((self.key, content_instance),))
)
return content_instance
def _get_instance(self, context):
return self.instance.resolve(context)
def _get_key(self, context):
try:
return self.key_variable.resolve(context)
except AttributeError:
return self.key_variable
def content(self, context):
instance = self._get_instance(context)
self.key = self._get_key(context)
original_output = self.nodelist.render(context)
if self.key in getattr(instance, f"{self.content_type}s").keys():
return getattr(instance, f"{self.content_type}s").get(self.key)
for content_type in self.alternative_content_types:
# Checking the key cannot be found anywhere else®
if self.key in getattr(instance, f"{content_type}s").keys():
content_instance = getattr(instance, f"{content_type}s").get(self.key)
content_instance.content_type = self.content_type
with transaction.atomic():
content_instance.save()
msg = (
"warning",
f"Automatically changed {self.key} for instance {content_instance}!",
)
self.messages.append(msg)
getattr(instance, f"{content_type}s").pop(self.key)
getattr(instance, f"{self.content_type}s").update(
{self.key: content_instance}
)
return content_instance
return self.create_content(
instance, self.content_type, self.key, original_output
)
def render(self, context):
content = self.content(context)
if hasattr(context["request"], "user") and context["request"].user.is_staff:
return self.admin_template.render(
{
"id": content.id,
"key": self.key,
"widget": self.widget,
"value": mark_safe(content.value),
}
)
return mark_safe(content.value)
class TemplateContentNodeMixin(ContentNodeMixin):
def __init__(self, instance, content_type, key, widget, nodelist):
super(TemplateContentNodeMixin, self).__init__(
instance, content_type, key, widget, nodelist
)
self.instance = instance
def _get_instance(self, context):
request = context["request"]
return get_template(request, self.instance)
class TextTagMixin(object):
CONTENT_MODEL = None
ADMIN_TEMPLATE = "stack_it/editable.html"
INSTANCE_PARAMETER_NAME = None
def __init__(self, instance, content_type, key, content):
super(TextTagMixin, self).__init__()
self.instance = instance
self.key = key
self.content = content
self.messages = []
self.content_type = content_type
self.alternative_content_types = list(
dict(BaseContentMixin.CONTENT_TYPES).keys()
)
self.alternative_content_types.remove(self.content_type)
def create_content(self, instance, content_type, key, value):
"""
Creates related content
This is meant to be overriden
Returns:
CONTENT_MODEL instance: Returns the CONTENT_MODEL instance which's just been created
"""
attrs = dict(
[
(self.INSTANCE_PARAMETER_NAME, instance),
("content_type", content_type),
("key", key),
("value", value),
]
)
with transaction.atomic():
content_instance = self.CONTENT_MODEL.objects.create(**attrs)
getattr(instance, f"{self.content_type}s").update(
dict(((self.key, content_instance),))
)
return content_instance
def __call__(self):
instance = self.instance
if self.key in getattr(instance, f"{self.content_type}s").keys():
return getattr(instance, f"{self.content_type}s").get(self.key)
for content_type in self.alternative_content_types:
# Checking the key cannot be found anywhere else®
if self.key in getattr(instance, f"{content_type}s").keys():
content_instance = getattr(instance, f"{content_type}s").get(self.key)
content_instance.content_type = self.content_type
content_instance.save()
msg = (
"warning",
f"Automatically changed {self.key} for instance {content_instance}!",
)
self.messages.append(msg)
getattr(instance, f"{content_type}s").pop(self.key)
getattr(instance, f"{self.content_type}s").update(
{self.key: content_instance}
)
return content_instance
return self.create_content(instance, self.content_type, self.key, self.content)
class ImageTagMixin(object):
CONTENT_MODEL = None
ADMIN_TEMPLATE = "stack_it/editable.html"
INSTANCE_PARAMETER_NAME = None
def __init__(self, instance, content_type, key, size, color):
super(ImageTagMixin, self).__init__()
self.instance = instance
self.key = key
self.size = size
self.color = color
self.messages = []
self.content_type = content_type
self.alternative_content_types = list(
dict(BaseContentMixin.CONTENT_TYPES).keys()
)
self.alternative_content_types.remove(self.content_type)
self.admin_template = template.loader.get_template(self.ADMIN_TEMPLATE)
def create_content(
self, instance, content_type, key, size="800x600", color=(0, 0, 0)
):
"""
Creates related content
This is meant to be overriden
Returns:
CONTENT_MODEL instance: Returns the CONTENT_MODEL instance which's just been created
"""
attrs = dict(
[
(self.INSTANCE_PARAMETER_NAME, instance),
("content_type", content_type),
("key", key),
("size", size),
("color", color),
]
)
with transaction.atomic():
content_instance = self.CONTENT_MODEL.init(**attrs)
getattr(instance, f"{self.content_type}s").update(
dict(((self.key, content_instance),))
)
return content_instance
def __call__(self):
instance = self.instance
if self.key in getattr(instance, f"{self.content_type}s").keys():
return getattr(instance, f"{self.content_type}s").get(self.key)
for content_type in self.alternative_content_types:
# Checking the key cannot be found anywhere else®
if self.key in getattr(instance, f"{content_type}s").keys():
content_instance = getattr(instance, f"{content_type}s").get(self.key)
content_instance.content_type = self.content_type
content_instance.save()
msg = (
"warning",
f"Automatically changed {self.key} for instance {content_instance}!",
)
self.messages.append(msg)
getattr(instance, f"{content_type}s").pop(self.key)
getattr(instance, f"{self.content_type}s").update(
{self.key: content_instance}
)
return content_instance
return self.create_content(
instance, self.content_type, self.key, self.size, self.color
)
class PageTagMixin(object):
CONTENT_MODEL = None
ADMIN_TEMPLATE = "stack_it/editable.html"
INSTANCE_PARAMETER_NAME = None
def __init__(self, instance, content_type, key, title):
super(PageTagMixin, self).__init__()
self.instance = instance
self.key = key
self.title = title
self.messages = []
self.content_type = content_type
self.alternative_content_types = list(
dict(BaseContentMixin.CONTENT_TYPES).keys()
)
self.alternative_content_types.remove(self.content_type)
def create_content(self, instance, content_type, key, title):
"""
Creates related content
This is meant to be overriden
Returns:
CONTENT_MODEL instance: Returns the CONTENT_MODEL instance which's just been created
"""
with transaction.atomic():
page = Page.get_or_create(title=title)
content_instance, created = self.CONTENT_MODEL.objects.get_or_create(
**dict(
[
(self.INSTANCE_PARAMETER_NAME, instance),
("content_type", content_type),
("key", key),
("value", page),
]
)
)
getattr(instance, f"{self.content_type}s").update(
dict(((self.key, content_instance),))
)
return content_instance
def __call__(self):
instance = self.instance
if self.key in getattr(instance, f"{self.content_type}s").keys():
return getattr(instance, f"{self.content_type}s").get(self.key)
for content_type in self.alternative_content_types:
# Checking the key cannot be found anywhere else®
if self.key in getattr(instance, f"{content_type}s").keys():
content_instance = getattr(instance, f"{content_type}s").get(self.key)
content_instance.content_type = self.content_type
content_instance.save()
msg = (
"warning",
f"Automatically changed {self.key} for instance {content_instance}!",
)
self.messages.append(msg)
getattr(instance, f"{content_type}s").pop(self.key)
getattr(instance, f"{self.content_type}s").update(
{self.key: content_instance}
)
return content_instance
return self.create_content(instance, self.content_type, self.key, self.title)
| 2.046875 | 2 |
tests/test_masks.py | CyberZHG/keras-trans-mask | 17 | 12788447 | from unittest import TestCase
import os
import tempfile
import numpy as np
from keras_trans_mask.backend import keras
from keras_trans_mask import CreateMask, RemoveMask, RestoreMask
class TestMasks(TestCase):
def test_over_fit(self):
input_layer = keras.layers.Input(shape=(None,))
embed_layer = keras.layers.Embedding(
input_dim=10,
output_dim=15,
)(input_layer)
mask_layer = CreateMask(mask_value=9)(input_layer)
embed_layer = RestoreMask()([embed_layer, mask_layer])
removed_layer = RemoveMask()(embed_layer)
conv_layer = keras.layers.Conv1D(
filters=32,
kernel_size=3,
padding='same',
)(removed_layer)
restored_layer = RestoreMask()([conv_layer, embed_layer])
lstm_layer = keras.layers.LSTM(units=5)(restored_layer)
dense_layer = keras.layers.Dense(units=2, activation='softmax')(lstm_layer)
model = keras.models.Model(inputs=input_layer, outputs=dense_layer)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.summary()
x = np.array([
[1, 2, 3, 4, 5, 9, 9, 9],
[6, 7, 8, 9, 9, 9, 9, 9],
] * 1024)
y = np.array([[0], [1]] * 1024)
model_path = os.path.join(tempfile.gettempdir(), 'test_trans_mask_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'CreateMask': CreateMask,
'RemoveMask': RemoveMask,
'RestoreMask': RestoreMask,
})
model.fit(x, y, epochs=10)
| 2.421875 | 2 |
qian_dev/basic/group_fix.py | QianWanghhu/factor_fixing | 0 | 12788448 | import numpy as np
from scipy.stats import entropy
from bisect import bisect
from scipy import stats
from scipy.stats import median_absolute_deviation as mad
from sklearn.metrics import r2_score, mean_squared_error
from pyapprox.multivariate_polynomials import conditional_moments_of_polynomial_chaos_expansion as cond_moments
def group_fix(partial_result, func, x, y_true, x_default,
rand, pool_results, file_exist=False):
"""
Function for compare results between conditioned and unconditioned QoI.
Fix parameters from the least influential group
based on results from partially sorting.
Four error measure types will be returned.
Parameters
----------
partial_result : dict,
dictionary of parameter groups, results of partial sort
func : list of function,
function for analysis (analytical formula or model)
x : np.array,
Input with shape of N * D where N is sampling size and
D is the number of parameters
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
x_default : int, float, list,
Default values of x as a scalar or list of scalars
rand : np.ndarray,
Resample index in bootstrap, shape of R * N,
where R is the number of resamples
pool_results : dict,
Index of fixed parameters and the corresponding results
file_exist : bool (default: False),
If true, reads cached partial-ranking results from a file.
Otherwise, calculates results.
Returns
----------
Tuple of:
dict_return: dictionary of uncertainty measures
mae and the uncertain ranges:
Changes in absolute mean error of the func results due to fixing
parameters
var and the uncertain ranges :
Changes in variance of the func results due to fixing parameters
ks measures and the uncertain ranges :
Changes in pearson correlation coefficients
of the func results due to fixing parameters
pool_results:
"""
num_group = len(partial_result) - 1
# store results from fixing parameters in dict
cf_upper = {i: None for i in range(num_group)}
cf_lower, cv, ks, pvalue = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_upper_upper, cf_upper_lower, ks_upper, pvalue_upper = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_lower_lower, cf_lower_upper, ks_lower, pvalue_lower = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_width, cf_width_lower, cf_width_upper, cond_mean = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
ind_fix = []
conf_level = [0.025, 0.975]
measures_all = [cf_upper, cf_lower, ks, pvalue, cv,
cf_upper_upper, cf_upper_lower, cf_lower_upper,
cf_lower_lower, ks_lower, ks_upper,
pvalue_lower, pvalue_upper,
cf_width, cf_width_lower,
cf_width_upper, cond_mean]
for i in range(num_group, -1, -1):
if file_exist:
try:
ind_fix.extend(partial_result[str(i)])
except NameError:
ind_fix = partial_result[str(i)]
else:
try:
ind_fix.extend(partial_result[i])
except NameError:
ind_fix = partial_result[i]
ind_fix.sort()
x_temp = x_default[ind_fix]
# check whether results existing
skip_calcul = results_exist(ind_fix, pool_results)
# print(skip_calcul)
if skip_calcul == False:
x_copy = np.copy(x)
x_copy[ind_fix, :] = x_temp
# compare results with insignificant parameters fixed
Nresample = rand.shape[0]
num_func = len(func)
total_resample = num_func * Nresample
pvalue_bt, ks_bt, cf_upper_bt, cf_lower_bt, cf_width_bt, y_true_width = \
np.zeros(total_resample), np.zeros(total_resample), np.zeros(total_resample), np.zeros(total_resample), \
np.zeros(total_resample), np.zeros(total_resample)
## Add the bootstrap of PCE
for jj in range(num_func):
fun = func[jj]
results_fix = fun(x_copy).flatten()
for ii in range(Nresample):
I = rand[ii]
ind_resample = jj * Nresample + ii
[cf_lower_bt[ind_resample], cf_upper_bt[ind_resample], ks_bt[ind_resample], pvalue_bt[ind_resample], y_true_width[ind_resample]] \
= error_measure(I, y_true[jj], results_fix, conf_level)
cf_width_bt = (cf_upper_bt - cf_lower_bt) / y_true_width
# End for
cf_upper[i], cf_lower[i], ks[i], pvalue[i] = cf_upper_bt.mean(), cf_lower_bt.mean(), ks_bt.mean(), pvalue_bt.mean()
cf_upper_lower[i], cf_upper_upper[i] = np.quantile(cf_upper_bt, conf_level)
cf_lower_lower[i], cf_lower_upper[i] = np.quantile(cf_lower_bt, conf_level)
cf_width[i], cf_width_lower[i], cf_width_upper[i] = cf_width_bt.mean(), *np.quantile(cf_width_bt, conf_level)
ks_lower[i], ks_upper[i] = np.quantile(ks_bt, conf_level)
pvalue_lower[i], pvalue_upper[i] = np.quantile(pvalue_bt, conf_level)
cond_mean[i] = results_fix.mean()
if len(ind_fix) == x.shape[0]:
cv[i] = 0
# cond_mean[i] = func(x_temp)[0][0]
else:
mean, variance = cond_moments(fun, x_temp, ind_fix, return_variance=True)
# cond_mean[i] = mean[0]
cv[i] = (np.sqrt(variance) / mean)[0]
# End If
# update pool_results
measure_list = [measure_ele[i] for measure_ele in measures_all]
pool_results = pool_update(ind_fix, measure_list, pool_results)
else:
# map index to calculated values
for ele in range(len(measures_all)):
measures_all[ele][i] = skip_calcul[ele]
# End if
# End for()
names = ['cf_upper', 'cf_lower', 'ks', 'pvalue', 'cv',
'cf_upper_upper', 'cf_upper_lower', 'cf_lower_upper',
'cf_lower_lower', 'ks_lower', 'ks_upper',
'pvalue_lower', 'pvalue_upper',
'cf_width', 'cf_width_lower',
'cf_width_upper', 'cond_mean']
dict_return = dict(zip(names, measures_all))
return dict_return, pool_results
def error_measure(I, y_true, results_fix, conf_level):
"""
Calculate the error measures with a resample dataset.
Parameters:
----------
I : np.array
the random index of each bootstrap
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
result_fix : list,
Conditional results with all some x fixed
conf_level: list, percentiles used to calculate the confidence intervals
Returns:
----------
List, values of uncertainty measures
"""
y_true_resample = y_true[I]
results_fix_resample = results_fix[I]
cf_lower_temp, cf_upper_temp = np.quantile(results_fix_resample, conf_level)
ks_bt_temp, pvalue_bt_temp = stats.ks_2samp(y_true_resample, results_fix_resample)
y_true_width_temp = np.quantile(y_true_resample, conf_level[1]) - np.quantile(y_true_resample, conf_level[0])
return [cf_lower_temp, cf_upper_temp, ks_bt_temp, pvalue_bt_temp, y_true_width_temp]
def uncond_cal(y_true, conf_level, rand):
"""
Calculate the unconditional results
Parameters:
----------
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
conf_level: list, percentiles used to calculate the confidence intervals
rand : np.ndarray,
Resample index in bootstrap, shape of R * N,
where R is the number of resamples
Returns:
----------
"""
# if rand is None:
# y_true_bt = y_true
# elif isinstance(rand, np.ndarray):
# y_true_bt = y_true[rand]
# else:
# AssertionError
y_true_bt = np.zeros(shape=(y_true.shape[0], rand.shape[0], y_true.shape[1]))
# import pdb; pdb.set_trace()
for ii in range(y_true.shape[0]):
y_true_bt[ii] = y_true[ii][rand]
uncond_cf_bt = np.quantile(y_true_bt, conf_level, axis=2)
uncond_cf_low, uncond_cf_up = {}, {}
uncond_cf_low['mean'] = uncond_cf_bt[0].mean()
uncond_cf_low['low'], uncond_cf_low['up'] = np.quantile(uncond_cf_bt[0], conf_level)
uncond_cf_up['mean'] = uncond_cf_bt[1].mean()
uncond_cf_up['low'], uncond_cf_up['up'] = np.quantile(uncond_cf_bt[1], conf_level)
uncond_dict = {
'uncond_cf_low' : uncond_cf_low,
'uncond_cf_up' : uncond_cf_up,
'uncond_mean': y_true_bt.mean()
}
return uncond_dict
def results_exist(parms_fixed, pool_results):
"""
Helper function to determine whether results exist.
Parameters
----------
parms_fixed : list,
Index of parameters to fix
pool_results : dict,
Contains both index of parameters fixed and the corresponding results
Returns
-------
skip_cal : bool
"""
if pool_results == {}:
skip_cal = False
elif parms_fixed in pool_results['parms']:
index_measure = pool_results['parms'].index(parms_fixed)
skip_cal = pool_results[f'measures_{index_measure}']
else:
skip_cal = False
return skip_cal
def pool_update(parms_fixed, measure_list, pool_results):
"""Update pool_results with new values.
Parameters
----------
parms_fixed : list,
Index of parameters to fix
measure_list : list,
Measures newly calculated for parameters in parms_fixed
pool_results : dict,
Contains both index of parameters fixed and the corresponding results
Returns
----------
Updated pool_results
"""
try:
pool_results['parms'].append(parms_fixed[:])
except KeyError:
pool_results['parms'] = [parms_fixed[:]]
index_measure = pool_results['parms'].index(parms_fixed)
pool_results[f'measures_{index_measure}'] = measure_list
return pool_results
| 2.25 | 2 |
scripts/sources/swedish/sls.py | AlexGustafsson/word-frequencies | 0 | 12788449 | http://fho.sls.fi/tidsperiod/1900-tal/
| 0.882813 | 1 |
app/admin/views/admin.py | erics1996/D5-Video | 1 | 12788450 | from .. import admin
from app import db, models
from flask import render_template, flash, request, redirect, url_for, session
from ..forms.admin_form import AdminForm
from ..forms.admin_form import AdminLoginForm
from werkzeug.security import generate_password_hash
from ...models import db
from .decorator import admin_login_decorator
# 添加管理员
@admin.route("/administrator/add/", methods=["GET", "POST"])
@admin_login_decorator
def admin_add():
form = AdminForm()
if form.validate_on_submit():
data = form.data
admin = models.Admin(
name=data["name"],
pwd=<PASSWORD>_password_hash(data["pwd"]),
role_id=data["role_id"],
is_super=1 # 非超级管理员均为1
)
db.session.add(admin)
db.session.commit()
db.session.remove()
flash("管理员添加成功!", "ok")
return render_template('admin/admin_add.html', form=form)
# 管理员列表
@admin.route("/administrator/list/<int:page>/", methods=["GET"])
@admin_login_decorator
def admin_list(page=None):
page_data = models.Admin.query.paginate(page=page, per_page=10)
return render_template('admin/admin_list.html', page_data=page_data)
@admin.route("/edit/", methods=["GET", "POST"])
@admin_login_decorator
def admin_edit():
return ''
@admin.route("/del/<int:id>/", methods=["GET", "POST"])
@admin_login_decorator
def admin_del(id=None):
admin = models.Admin.query.filter_by(id=id).first_or_404()
db.session.delete(admin)
db.session.commit()
db.session.remove()
return redirect(url_for('admin.admin_list', page=1))
# 登录
@admin.route("/login/", methods=["GET", "POST"])
def login():
form = AdminLoginForm()
if request.method == "POST":
if form.validate_on_submit():
data = form.data
# print(data["name"])
# print(data["pwd"])
admin = models.Admin.query.filter_by(name=data["name"]).first()
# print(admin.check_pwd(data['pwd']))
if not admin or \
not admin.check_pwd(data["pwd"]):
flash("账号或密码错误!", "err")
return redirect(url_for("admin.login", next=request.url))
session["admin"] = admin.name
session["admin_id"] = admin.id
# session['face'] = admin.face
admin_log = models.UserLog(
admin_id=admin.id,
ip=request.remote_addr
)
db.session.add(admin_log)
db.session.commit()
db.session.remove()
return redirect(url_for("admin.index"))
return render_template('admin/login.html', form=form)
# 退出登录
@admin.route("/logout/")
@admin_login_decorator
def logout():
if session.get('admin') and session.get('admin_id'):
session.pop('admin', None)
session.pop('admin_id', None)
return redirect(url_for('admin.login'))
# 修改密码
@admin.route("/pwd/", methods=["GET", "POST"])
@admin_login_decorator
def pwd():
return ''
# 管理员登录日志表
@admin.route("/loginlog/list/<int:page>/", methods=["GET"])
@admin_login_decorator
def admin_loginlog_list(page=None):
page_data = models.UserLog.query.join(
models.Admin
).filter(
models.UserLog.admin_id == models.Admin.id
).order_by(
models.UserLog.add_time.desc()
).paginate(page=page, per_page=10)
# print(page_data.items)#[<UserLog 60>, <UserLog 59>, <UserLog 58>, <UserLog 57>, <UserLog 56>, <UserLog 55>, <UserLog 54>, <UserLog 53>, <UserLog 52>, <UserLog 51>]
return render_template('admin/admin_loginlog_list.html', page_data=page_data)
| 2.234375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.