max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
core/migrations/0003_auto_20190805_1144.py | ArthurGorgonio/suggestclasses | 0 | 12795351 | <filename>core/migrations/0003_auto_20190805_1144.py
# Generated by Django 2.1.5 on 2019-08-05 14:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20190509_1508'),
]
operations = [
migrations.CreateModel(
name='Horario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordem', models.CharField(choices=[('1', 'Primeiro Horário'), ('2', 'Segundo Horário'), ('3', 'Terceiro Horário'), ('4', 'Quarto Horário'), ('5', 'Quinto Horário'), ('6', 'Sexto Horário')], max_length=1)),
('turno', models.CharField(choices=[('M', 'Manhã'), ('T', 'Tarde'), ('N', 'Noite')], max_length=10)),
('hora_inicio', models.TimeField()),
('hora_fim', models.TimeField()),
],
),
migrations.AlterUniqueTogether(
name='horario',
unique_together={('ordem', 'turno')},
),
]
| 1.742188 | 2 |
rawdisk/scheme/__init__.py | dariusbakunas/rawdisk | 3 | 12795352 | <reponame>dariusbakunas/rawdisk
# -*- coding: utf-8 -*-
__all__ = ['mbr', 'gpt', 'common']
from . import common
from . import mbr
from . import gpt
| 1.023438 | 1 |
sewer/dns_providers/__init__.py | dnet/sewer | 0 | 12795353 | from .common import BaseDns # noqa: F401
from .auroradns import AuroraDns # noqa: F401
from .cloudflare import CloudFlareDns # noqa: F401
| 1.046875 | 1 |
expanse_su_estimator/sbatch_parser.py | alex-wenzel/expanse-su-estimator | 0 | 12795354 | <reponame>alex-wenzel/expanse-su-estimator
"""
This file defines a class that parses and represents an SBATCH submission
script
"""
class SBATCHScript:
def __init__(self, path):
self.path = path
self.args = {}
def __getitem__(self, key):
if type(key) == list:
return self.multiple_key_query(key)
else:
return self.args[key]
def __str__(self):
return '\n'.join([
f"{key}: {value}"
for key, value in self.args.items()
])
def parse(self):
for line in open(self.path, 'r').readlines():
if not line.startswith("#SBATCH"):
continue
tokens = line.split()[1:]
arg, val = None, None
## parse args with '--' and '='
if len(tokens) == 1:
arg, val = tokens[0].split('=')
## parse args with '-'
else:
arg, val = tokens
arg = arg.strip("-")
self.args[arg] = val
def multiple_key_query(self, keys):
"""
A function to allow for querying of parameters
that can have multiple names, .e.g., -N, --nodes
"""
for key in keys:
try:
return self.args[key]
except KeyError:
continue
raise KeyError(f"None of {keys} in sbatch arguments")
if __name__ == "__main__":
s = SBATCHScript("test_examples/expanse_shared_example.sh")
s.parse()
| 2.75 | 3 |
bolinette/blnt/commands/__init__.py | bolinette/bolinette | 4 | 12795355 | from bolinette.blnt.commands.argument import Argument
from bolinette.blnt.commands.command import Command
from bolinette.blnt.commands.parser import Parser
| 1.289063 | 1 |
src/doc/overrides/.icons/convert.py | alexanderpann/mps-gotchas | 4 | 12795356 | <filename>src/doc/overrides/.icons/convert.py
import base64
import glob, os
def write_svg_file(svg_path, encoded_str):
with open(svg_path, "w") as text_file:
content = """
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="32" height="32" viewBox="0 0 32 32">
<image width="32" height="32"
xlink:href="{0}"
/>
</svg>
"""
text_file.write(content.format(encoded_str))
def image_to_data_url(filename):
ext = filename.split('.')[-1]
prefix = f'data:image/{ext};base64,'
with open(filename, 'rb') as f:
img = f.read()
return prefix + base64.b64encode(img).decode('utf-8')
basePath = "" # set path to icons here
for file in glob.glob(basePath+"/**/**/*.png"):
png_file = file
svg_file = file[0:-4]+ ".svg"
image_data = image_to_data_url(png_file)
write_svg_file(svg_file,image_data)
os.remove(png_file) | 3.09375 | 3 |
docker_host/views.py | DisMosGit/Dodja | 0 | 12795357 | <gh_stars>0
from rest_framework.exceptions import PermissionDenied
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import action
from django.db.models import Q
from .models import Host, Access, Job
from .serializer import AccessCreateSerializer, AccessReadSerializer, HostSerializer, ActionSerializer, JobSerializer, UserAccessSerializer
from .drivers import DockerConnectionPool
from .permissions import IsHostOperationAllowed, HostOperationMixin
class HostViewSet(ModelViewSet, HostOperationMixin):
queryset = Host.objects
serializer_class = HostSerializer
permission_classes = [IsHostOperationAllowed]
permission_kind = "dh"
host_pk = "pk"
ignored_suffixes = ("List", )
def get_queryset(self):
return super().get_queryset().filter(
Q(creator=self.request.user)
| Q(accesses__user=self.request.user)).distinct().order_by('title')
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
@action(detail=True,
methods=['POST'],
url_path="execute",
serializer_class=ActionSerializer)
def execute(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if not IsHostOperationAllowed.check_host_permissions(
request.user,
IsHostOperationAllowed.get_docker_operation(
serializer.data["command"]),
host__pk=self.kwargs.get(self.host_pk),
kind=self.permission_kind,
):
raise PermissionDenied()
instance = self.get_object()
result = DockerConnectionPool(str(instance.id), instance).execute(
command=serializer.data.get('command'),
**serializer.data.get("args"),
)
status = 200
if bool(result.get("error")):
status = 400
return Response(result, status=status)
@action(detail=True,
methods=['get'],
url_path="my_access",
serializer_class=UserAccessSerializer)
def my_access(self, request, pk, **kwargs):
permissions = {}
access: Access = Access.objects.filter(host__pk=pk,
user=request.user).first()
if access is None:
permissions = {"full": True}
else:
for kind in access.permissions_kind:
permissions[access.permissions_dictionary.get(
kind,
kind,
)] = access.able_operations(kind)
serializer = self.get_serializer({
"permissions": permissions,
"user": request.user.pk,
"host": pk
})
return Response(serializer.data)
class AccessViewSet(ModelViewSet, HostOperationMixin):
queryset = Access.objects
serializer_class = AccessReadSerializer
permission_classes = [IsHostOperationAllowed]
lookup_field = 'id'
permission_kind = "dp"
host_pk = "host__pk"
def get_serializer_class(self):
if self.action in ("update", "create", "partial_update"):
return AccessCreateSerializer
else:
return AccessReadSerializer
def get_queryset(self):
return super().get_queryset().prefetch_related('user').filter(
host__pk=self.kwargs.get("host__pk")).order_by('permissions')
def perform_create(self, serializer):
serializer.save(host_id=self.kwargs.get("host__pk"))
class JobViewSet(ReadOnlyModelViewSet, HostOperationMixin):
queryset = Job.objects
serializer_class = JobSerializer
permission_classes = [IsHostOperationAllowed]
lookup_field = 'id'
permission_kind = "dh"
host_pk = "host__pk"
def get_queryset(self):
return super().get_queryset().filter(
host__pk=self.kwargs.get("host__pk"))
# TODO:
# @action(detail=True,
# methods=['GET'],
# url_path="job",
# serializer_class=ActionSerializer)
# def job(self, request, *args, **kwargs):
# instance = self.get_object()
# result = DockerConnectionPool(str(instance.id),
# instance).get_job_result(
# request.query_params.get("key"))
# return Response(result)
| 2 | 2 |
shared/management/__init__.py | Saevon/webdnd | 4 | 12795358 | from django.db.models.signals import post_syncdb
from django.conf import settings
from django.core import management
import os
import re
FIXTURE_RE = re.compile(r'^[^.]*.json$')
def load_data(sender, **kwargs):
"""
Loads fixture data after loading the last installed app
"""
if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + ".models":
fixture_files = []
for loc in settings.INITIAL_FIXTURE_DIRS:
loc = os.path.abspath(loc)
if os.path.exists(loc):
fixture_files += os.listdir(loc)
fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files)
fixture_files = [os.path.join(loc, f) for f in fixture_files]
if len(fixture_files) > 0:
print "Initializing Fixtures:"
for fixture in fixture_files:
print " >> %s" % (fixture)
management.call_command('loaddata', fixture, verbosity=0)
# Update the index
print 'Generating Index'
management.call_command('index', 'all', flush=True, verbosity=1)
post_syncdb.connect(load_data)
| 2.09375 | 2 |
windpyutils/args.py | windionleaf/windPyUtils | 0 | 12795359 | <gh_stars>0
# -*- coding: UTF-8 -*-
""""
Created on 30.06.20
Module with wrapper that makes arguments parsers raising exceptions.
:author: <NAME>
"""
from argparse import ArgumentParser
class ArgumentParserError(Exception):
"""
Exceptions for argument parsing.
"""
pass
class ExceptionsArgumentParser(ArgumentParser):
"""
Argument parser that uses exceptions for error handling.
"""
def error(self, message):
raise ArgumentParserError(message)
| 2.5625 | 3 |
asyncClient/__main__.py | harveyspec1245/tcpClient | 0 | 12795360 | <reponame>harveyspec1245/tcpClient<gh_stars>0
from asyncClient import Clients
import sys
import getopt
if __name__ == '__main__':
_clients = 2
_data = 'Hello'
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:c:", ["data=", "clients="])
except getopt.GetoptError as e:
print(e)
print('Usage: python -m client -d <data_to_send> -c <num of clients>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('Usage: python -m client -d <data_to_send> -c <num of clients>')
sys.exit()
elif opt in ("-d", "--data"):
_data = arg
elif opt in ("-c", "--clients"):
_clients = int(arg)
Clients(_clients, _data)
| 2.59375 | 3 |
Max/Max_0160_20200409.py | Morek999/OMSCS_Taiwan_Leetcode | 1 | 12795361 | <filename>Max/Max_0160_20200409.py
"""
160. Intersection of Two Linked Lists
https://leetcode.com/problems/intersection-of-two-linked-lists/
Time complexity: O()
Space complexity: O()
"""
from typing import List
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
if headA is None or headB is None:
return None
ptA = headA
ptB = headB
while ptA is not ptB:
ptA = headB if ptA is None else ptA.next
ptB = headA if ptB is None else ptB.next
return ptA | 3.515625 | 4 |
django/reviewApp/admin.py | Akasiek/scorethatlp | 5 | 12795362 | from django.contrib import admin
from . import models
@admin.register(models.Reviewer)
class ReviewerAdmin(admin.ModelAdmin):
autocomplete_fields = ["user"]
list_display = ["username", "email"]
search_fields = ["username__istartswith"]
@admin.register(models.Album)
class AlbumAdmin(admin.ModelAdmin):
list_display = ["title", "artist_id", "created_at", "created_by"]
ordering = ["title"]
list_per_page = 30
prepopulated_fields = {
"slug": ["title"]
}
list_select_related = ["artist_id", "created_by"]
autocomplete_fields = ["artist_id"]
search_fields = ["title"]
@admin.register(models.Artist)
class ArtistAdmin(admin.ModelAdmin):
list_display = ["name", "created_at", "created_by"]
ordering = ["name"]
list_per_page = 30
prepopulated_fields = {
"slug": ["name"]
}
search_fields = ['name__istartswith']
@admin.register(models.Genre)
class GenreAdmin(admin.ModelAdmin):
search_fields = ['name__istartswith']
@admin.register(models.AlbumGenre)
class AlbumGenreAdmin(admin.ModelAdmin):
list_display = ["__str__", "album_id", "genre_id"]
autocomplete_fields = ["album_id", "genre_id"]
@admin.register(models.AlbumLink)
class AlbumLinkAdmin(admin.ModelAdmin):
list_display = ["__str__", "album_id"]
autocomplete_fields = ["album_id"]
@admin.register(models.AlbumOfTheYear)
class AlbumOfTheYear(admin.ModelAdmin):
list_display = ["__str__", "album_id"]
autocomplete_fields = ["album_id"]
@admin.register(models.Track)
class TrackAdmin(admin.ModelAdmin):
list_display = ["__str__", "album_id"]
@admin.register(models.Review)
class ReviewAdmin(admin.ModelAdmin):
autocomplete_fields = ["album_id", "reviewer_id"]
list_display = ["__str__", "album_id", "reviewer_id"]
@admin.register(models.FavoriteReviewerArtist)
class FavoriteReviewerArtistAdmin(admin.ModelAdmin):
autocomplete_fields = ["artist_id", "reviewer_id"]
list_display = ["artist_id", "reviewer_id"]
@admin.register(models.ReviewerLink)
class ReviewerLinkAdmin(admin.ModelAdmin):
autocomplete_fields = ["reviewer_id"]
list_display = ["reviewer_id", "service_name"]
| 2.046875 | 2 |
data_shift.py | zackchase/label_shift | 14 | 12795363 | import mxnet as mx
from mxnet import nd, autograd
import numpy as np
##################################3
# X, y - training data
# n - number of data points in dataset
# Py - desired label distribution
###################################
def tweak_dist(X, y, num_labels, n, Py):
shape = (n, *X.shape[1:])
Xshift = np.zeros(shape)
yshift = np.zeros(n, dtype=np.int8)
# get indices for each label
indices_by_label = [(y==k).nonzero()[0] for k in range(10)]
labels = np.argmax(
np.random.multinomial(1, Py, n), axis=1)
for i in range(n):
# sample an example from X with replacement
idx = np.random.choice(indices_by_label[labels[i]])
Xshift[i] = X[idx]
yshift[i] = y[idx]
return Xshift, yshift
def tweak_one(X, y, num_labels, n, knockout_label, p):
# create Py
# call down to tweak_dist
Py = np.full(num_labels, (1.-p)/(num_labels-1))
Py[knockout_label] = p
print(Py)
return tweak_dist(X, y, num_labels, n, Py)
| 3.015625 | 3 |
v1.0.0.test/toontown/hood/AnimatedProp.py | TTOFFLINE-LEAK/ttoffline | 4 | 12795364 | <filename>v1.0.0.test/toontown/hood/AnimatedProp.py
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
class AnimatedProp(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('AnimatedProp')
def __init__(self, node):
self.node = node
def delete(self):
pass
def uniqueName(self, name):
return name + '-' + str(self.node.this)
def enter(self):
self.notify.debug('enter')
def exit(self):
self.notify.debug('exit') | 2.0625 | 2 |
tests/test_microsoft_trans.py | nidhaloff/deep_translator | 118 | 12795365 | <filename>tests/test_microsoft_trans.py
#!/usr/bin/env python
"""Tests for `deep_translator` package."""
from unittest.mock import patch
import pytest
import requests
from deep_translator import MicrosoftTranslator, exceptions
# mocked request.post
@patch.object(requests, "post")
def test_microsoft_successful_post_mock(mock_request_post):
returned_json = [{"translations": [{"text": "See you later!", "to": "en"}]}]
def res():
r = requests.Response()
def json_func():
return returned_json
r.json = json_func
return r
mock_request_post.return_value = res()
assert (
MicrosoftTranslator(api_key="an_api_key", source="de", target="en").translate(
"auf wiedersehen!"
)
== "See you later!"
)
def test_MicrosoftAPIerror():
with pytest.raises(exceptions.MicrosoftAPIerror):
MicrosoftTranslator(api_key="empty", source="de", target="en").translate("text")
# the remaining tests are actual requests to Microsoft API and use an api key
# if APIkey variable is None, they are skipped
APIkey = None
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_microsoft_successful_post_onetarget():
posted = MicrosoftTranslator(api_key=APIkey, target="en").translate(
"auf wiedersehen!"
)
assert isinstance(posted, str)
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_microsoft_successful_post_twotargets():
posted = MicrosoftTranslator(api_key=APIkey, target=["en", "ru"]).translate(
"auf wiedersehen!"
)
assert isinstance(posted, str)
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_incorrect_target_attributes():
with pytest.raises(exceptions.ServerException):
MicrosoftTranslator(api_key=APIkey, target="")
with pytest.raises(exceptions.ServerException):
MicrosoftTranslator(api_key="", target="nothing")
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_abbreviations():
m1 = MicrosoftTranslator(api_key=APIkey, source="en", target="fr")
m2 = MicrosoftTranslator(api_key=APIkey, source="English", target="French")
assert "".join(m1._source) == "".join(m2._source)
assert "".join(m1._target) == "".join(m2._target)
| 2.546875 | 3 |
build_newlib.py | codyd51/axle | 453 | 12795366 | <reponame>codyd51/axle<filename>build_newlib.py
#!/usr/bin/python3
import os
import tempfile
from pathlib import Path
from typing import Tuple
from build_utils import download_and_unpack_archive, run_and_check
def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]:
tool_src_dir = download_and_unpack_archive(build_dir, url)
tool_name = url.split("/")[-1].removesuffix(".tar.gz")
tool_build_dir = build_dir / f"build-{tool_name}"
tool_build_dir.mkdir(exist_ok=True)
return tool_src_dir, tool_build_dir
def build() -> None:
axle_dir = Path(__file__).parent
sysroot_dir = axle_dir / "axle-sysroot"
arch_target = "i686-elf"
toolchain_dir = axle_dir / "i686-toolchain"
binaries_dir = toolchain_dir / "bin"
with tempfile.TemporaryDirectory() as build_dir_raw:
build_dir = Path(build_dir_raw)
build_products_dir = Path(__file__).parent / "newlib-build-products"
if False:
automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir(
build_dir, "https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz"
)
automake_configure_path = automake_src_dir / "configure"
run_and_check(
[automake_configure_path.as_posix(), f"--prefix={build_products_dir}"], cwd=automake_build_dir
)
run_and_check(["make"], cwd=automake_build_dir)
run_and_check(["make", "install"], cwd=automake_build_dir)
autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir(
build_dir, "https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz"
)
autoconf_configure_path = autoconf_src_dir / "configure"
run_and_check(
[autoconf_configure_path.as_posix(), f"--prefix={build_products_dir}"], cwd=autoconf_build_dir
)
run_and_check(["make"], cwd=autoconf_build_dir)
run_and_check(["make", "install"], cwd=autoconf_build_dir)
newlib_src_dir = axle_dir / "ports" / "newlib" / "newlib-2.5.0.20171222"
newlib_build_dir = build_dir / "build-newlib"
newlib_build_dir.mkdir()
os.symlink((binaries_dir / "i686-elf-ar").as_posix(), (newlib_build_dir / "i686-axle-ar").as_posix())
os.symlink((binaries_dir / "i686-elf-as").as_posix(), (newlib_build_dir / "i686-axle-as").as_posix())
os.symlink((binaries_dir / "i686-elf-gcc").as_posix(), (newlib_build_dir / "i686-axle-gcc").as_posix())
os.symlink((binaries_dir / "i686-elf-cc").as_posix(), (newlib_build_dir / "i686-axle-cc").as_posix())
os.symlink((binaries_dir / "i686-elf-ranlib").as_posix(), (newlib_build_dir / "i686-axle-ranlib").as_posix())
env = {"PATH": f'{newlib_build_dir}:{os.environ["PATH"]}'}
newlib_configure_path = newlib_src_dir / "configure"
run_and_check(
[newlib_configure_path.as_posix(), "--prefix=/usr", "--target=i686-axle"],
cwd=newlib_build_dir,
env_additions=env,
)
run_and_check(["make", "all"], cwd=newlib_build_dir, env_additions=env)
run_and_check(["make", f"DESTDIR={sysroot_dir.as_posix()}", "install"], cwd=newlib_build_dir, env_additions=env)
# If you make some kind of config change to the axle target, such as adding new files within the newlib port,
# you may have to run this command
# You may see an error like the following while running this script:
# /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory
# ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle
# Fail when newlib doesn't compile
# set -e
# make all
if __name__ == "__main__":
build()
| 2.171875 | 2 |
bread/protocols/__init__.py | systocrat/bread | 11 | 12795367 | __all__ = [
'flash',
'http',
'irc',
'ssh2'
] | 1.078125 | 1 |
sdk/python/pulumiverse_unifi/get_network.py | pulumiverse/pulumi-unifi | 1 | 12795368 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetNetworkResult',
'AwaitableGetNetworkResult',
'get_network',
'get_network_output',
]
@pulumi.output_type
class GetNetworkResult:
"""
A collection of values returned by getNetwork.
"""
def __init__(__self__, dhcp_dns=None, dhcp_enabled=None, dhcp_lease=None, dhcp_start=None, dhcp_stop=None, dhcpd_boot_enabled=None, dhcpd_boot_filename=None, dhcpd_boot_server=None, domain_name=None, id=None, igmp_snooping=None, ipv6_interface_type=None, ipv6_pd_interface=None, ipv6_pd_prefixid=None, ipv6_ra_enable=None, ipv6_static_subnet=None, name=None, network_group=None, purpose=None, site=None, subnet=None, vlan_id=None, wan_dns=None, wan_egress_qos=None, wan_gateway=None, wan_ip=None, wan_netmask=None, wan_networkgroup=None, wan_type=None, wan_username=None, x_wan_password=<PASSWORD>):
if dhcp_dns and not isinstance(dhcp_dns, list):
raise TypeError("Expected argument 'dhcp_dns' to be a list")
pulumi.set(__self__, "dhcp_dns", dhcp_dns)
if dhcp_enabled and not isinstance(dhcp_enabled, bool):
raise TypeError("Expected argument 'dhcp_enabled' to be a bool")
pulumi.set(__self__, "dhcp_enabled", dhcp_enabled)
if dhcp_lease and not isinstance(dhcp_lease, int):
raise TypeError("Expected argument 'dhcp_lease' to be a int")
pulumi.set(__self__, "dhcp_lease", dhcp_lease)
if dhcp_start and not isinstance(dhcp_start, str):
raise TypeError("Expected argument 'dhcp_start' to be a str")
pulumi.set(__self__, "dhcp_start", dhcp_start)
if dhcp_stop and not isinstance(dhcp_stop, str):
raise TypeError("Expected argument 'dhcp_stop' to be a str")
pulumi.set(__self__, "dhcp_stop", dhcp_stop)
if dhcpd_boot_enabled and not isinstance(dhcpd_boot_enabled, bool):
raise TypeError("Expected argument 'dhcpd_boot_enabled' to be a bool")
pulumi.set(__self__, "dhcpd_boot_enabled", dhcpd_boot_enabled)
if dhcpd_boot_filename and not isinstance(dhcpd_boot_filename, str):
raise TypeError("Expected argument 'dhcpd_boot_filename' to be a str")
pulumi.set(__self__, "dhcpd_boot_filename", dhcpd_boot_filename)
if dhcpd_boot_server and not isinstance(dhcpd_boot_server, str):
raise TypeError("Expected argument 'dhcpd_boot_server' to be a str")
pulumi.set(__self__, "dhcpd_boot_server", dhcpd_boot_server)
if domain_name and not isinstance(domain_name, str):
raise TypeError("Expected argument 'domain_name' to be a str")
pulumi.set(__self__, "domain_name", domain_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if igmp_snooping and not isinstance(igmp_snooping, bool):
raise TypeError("Expected argument 'igmp_snooping' to be a bool")
pulumi.set(__self__, "igmp_snooping", igmp_snooping)
if ipv6_interface_type and not isinstance(ipv6_interface_type, str):
raise TypeError("Expected argument 'ipv6_interface_type' to be a str")
pulumi.set(__self__, "ipv6_interface_type", ipv6_interface_type)
if ipv6_pd_interface and not isinstance(ipv6_pd_interface, str):
raise TypeError("Expected argument 'ipv6_pd_interface' to be a str")
pulumi.set(__self__, "ipv6_pd_interface", ipv6_pd_interface)
if ipv6_pd_prefixid and not isinstance(ipv6_pd_prefixid, str):
raise TypeError("Expected argument 'ipv6_pd_prefixid' to be a str")
pulumi.set(__self__, "ipv6_pd_prefixid", ipv6_pd_prefixid)
if ipv6_ra_enable and not isinstance(ipv6_ra_enable, bool):
raise TypeError("Expected argument 'ipv6_ra_enable' to be a bool")
pulumi.set(__self__, "ipv6_ra_enable", ipv6_ra_enable)
if ipv6_static_subnet and not isinstance(ipv6_static_subnet, str):
raise TypeError("Expected argument 'ipv6_static_subnet' to be a str")
pulumi.set(__self__, "ipv6_static_subnet", ipv6_static_subnet)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_group and not isinstance(network_group, str):
raise TypeError("Expected argument 'network_group' to be a str")
pulumi.set(__self__, "network_group", network_group)
if purpose and not isinstance(purpose, str):
raise TypeError("Expected argument 'purpose' to be a str")
pulumi.set(__self__, "purpose", purpose)
if site and not isinstance(site, str):
raise TypeError("Expected argument 'site' to be a str")
pulumi.set(__self__, "site", site)
if subnet and not isinstance(subnet, str):
raise TypeError("Expected argument 'subnet' to be a str")
pulumi.set(__self__, "subnet", subnet)
if vlan_id and not isinstance(vlan_id, int):
raise TypeError("Expected argument 'vlan_id' to be a int")
pulumi.set(__self__, "vlan_id", vlan_id)
if wan_dns and not isinstance(wan_dns, list):
raise TypeError("Expected argument 'wan_dns' to be a list")
pulumi.set(__self__, "wan_dns", wan_dns)
if wan_egress_qos and not isinstance(wan_egress_qos, int):
raise TypeError("Expected argument 'wan_egress_qos' to be a int")
pulumi.set(__self__, "wan_egress_qos", wan_egress_qos)
if wan_gateway and not isinstance(wan_gateway, str):
raise TypeError("Expected argument 'wan_gateway' to be a str")
pulumi.set(__self__, "wan_gateway", wan_gateway)
if wan_ip and not isinstance(wan_ip, str):
raise TypeError("Expected argument 'wan_ip' to be a str")
pulumi.set(__self__, "wan_ip", wan_ip)
if wan_netmask and not isinstance(wan_netmask, str):
raise TypeError("Expected argument 'wan_netmask' to be a str")
pulumi.set(__self__, "wan_netmask", wan_netmask)
if wan_networkgroup and not isinstance(wan_networkgroup, str):
raise TypeError("Expected argument 'wan_networkgroup' to be a str")
pulumi.set(__self__, "wan_networkgroup", wan_networkgroup)
if wan_type and not isinstance(wan_type, str):
raise TypeError("Expected argument 'wan_type' to be a str")
pulumi.set(__self__, "wan_type", wan_type)
if wan_username and not isinstance(wan_username, str):
raise TypeError("Expected argument 'wan_username' to be a str")
pulumi.set(__self__, "wan_username", wan_username)
if x_wan_password and not isinstance(x_wan_password, str):
raise TypeError("Expected argument 'x_wan_password' to be a str")
pulumi.set(__self__, "x_wan_password", x_wan_password)
@property
@pulumi.getter(name="dhcpDns")
def dhcp_dns(self) -> Sequence[str]:
"""
IPv4 addresses for the DNS server to be returned from the DHCP server.
"""
return pulumi.get(self, "dhcp_dns")
@property
@pulumi.getter(name="dhcpEnabled")
def dhcp_enabled(self) -> bool:
"""
whether DHCP is enabled or not on this network.
"""
return pulumi.get(self, "dhcp_enabled")
@property
@pulumi.getter(name="dhcpLease")
def dhcp_lease(self) -> int:
"""
lease time for DHCP addresses.
"""
return pulumi.get(self, "dhcp_lease")
@property
@pulumi.getter(name="dhcpStart")
def dhcp_start(self) -> str:
"""
The IPv4 address where the DHCP range of addresses starts.
"""
return pulumi.get(self, "dhcp_start")
@property
@pulumi.getter(name="dhcpStop")
def dhcp_stop(self) -> str:
"""
The IPv4 address where the DHCP range of addresses stops.
"""
return pulumi.get(self, "dhcp_stop")
@property
@pulumi.getter(name="dhcpdBootEnabled")
def dhcpd_boot_enabled(self) -> bool:
"""
Toggles on the DHCP boot options. will be set to true if you have dhcpd*boot*filename, and dhcpd*boot*server set.
"""
return pulumi.get(self, "dhcpd_boot_enabled")
@property
@pulumi.getter(name="dhcpdBootFilename")
def dhcpd_boot_filename(self) -> str:
"""
the file to PXE boot from on the dhcpd*boot*server.
"""
return pulumi.get(self, "dhcpd_boot_filename")
@property
@pulumi.getter(name="dhcpdBootServer")
def dhcpd_boot_server(self) -> str:
"""
IPv4 address of a TFTP server to network boot from.
"""
return pulumi.get(self, "dhcpd_boot_server")
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> str:
"""
The domain name of this network.
"""
return pulumi.get(self, "domain_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the network.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="igmpSnooping")
def igmp_snooping(self) -> bool:
"""
Specifies whether IGMP snooping is enabled or not.
"""
return pulumi.get(self, "igmp_snooping")
@property
@pulumi.getter(name="ipv6InterfaceType")
def ipv6_interface_type(self) -> str:
"""
Specifies which type of IPv6 connection to use.
"""
return pulumi.get(self, "ipv6_interface_type")
@property
@pulumi.getter(name="ipv6PdInterface")
def ipv6_pd_interface(self) -> str:
"""
Specifies which WAN interface is used for IPv6 Prefix Delegation.
"""
return pulumi.get(self, "ipv6_pd_interface")
@property
@pulumi.getter(name="ipv6PdPrefixid")
def ipv6_pd_prefixid(self) -> str:
"""
Specifies the IPv6 Prefix ID.
"""
return pulumi.get(self, "ipv6_pd_prefixid")
@property
@pulumi.getter(name="ipv6RaEnable")
def ipv6_ra_enable(self) -> bool:
"""
Specifies whether to enable router advertisements or not.
"""
return pulumi.get(self, "ipv6_ra_enable")
@property
@pulumi.getter(name="ipv6StaticSubnet")
def ipv6_static_subnet(self) -> str:
"""
Specifies the static IPv6 subnet (when ipv6*interface*type is 'static').
"""
return pulumi.get(self, "ipv6_static_subnet")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the network.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkGroup")
def network_group(self) -> str:
"""
The group of the network.
"""
return pulumi.get(self, "network_group")
@property
@pulumi.getter
def purpose(self) -> str:
"""
The purpose of the network. One of `corporate`, `guest`, `wan`, or `vlan-only`.
"""
return pulumi.get(self, "purpose")
@property
@pulumi.getter
def site(self) -> str:
"""
The name of the site to associate the network with.
"""
return pulumi.get(self, "site")
@property
@pulumi.getter
def subnet(self) -> str:
"""
The subnet of the network (CIDR address).
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> int:
"""
The VLAN ID of the network.
"""
return pulumi.get(self, "vlan_id")
@property
@pulumi.getter(name="wanDns")
def wan_dns(self) -> Sequence[str]:
"""
DNS servers IPs of the WAN.
"""
return pulumi.get(self, "wan_dns")
@property
@pulumi.getter(name="wanEgressQos")
def wan_egress_qos(self) -> int:
"""
Specifies the WAN egress quality of service.
"""
return pulumi.get(self, "wan_egress_qos")
@property
@pulumi.getter(name="wanGateway")
def wan_gateway(self) -> str:
"""
The IPv4 gateway of the WAN.
"""
return pulumi.get(self, "wan_gateway")
@property
@pulumi.getter(name="wanIp")
def wan_ip(self) -> str:
"""
The IPv4 address of the WAN.
"""
return pulumi.get(self, "wan_ip")
@property
@pulumi.getter(name="wanNetmask")
def wan_netmask(self) -> str:
"""
The IPv4 netmask of the WAN.
"""
return pulumi.get(self, "wan_netmask")
@property
@pulumi.getter(name="wanNetworkgroup")
def wan_networkgroup(self) -> str:
"""
Specifies the WAN network group. One of either `WAN`, `WAN2` or `WAN_LTE_FAILOVER`.
"""
return pulumi.get(self, "wan_networkgroup")
@property
@pulumi.getter(name="wanType")
def wan_type(self) -> str:
"""
Specifies the IPV4 WAN connection type. One of either `disabled`, `static`, `dhcp`, or `pppoe`.
"""
return pulumi.get(self, "wan_type")
@property
@pulumi.getter(name="wanUsername")
def wan_username(self) -> str:
"""
Specifies the IPV4 WAN username.
"""
return pulumi.get(self, "wan_username")
@property
@pulumi.getter(name="xWanPassword")
def x_wan_password(self) -> str:
"""
Specifies the IPV4 WAN password.
"""
return pulumi.get(self, "x_wan_password")
class AwaitableGetNetworkResult(GetNetworkResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkResult(
dhcp_dns=self.dhcp_dns,
dhcp_enabled=self.dhcp_enabled,
dhcp_lease=self.dhcp_lease,
dhcp_start=self.dhcp_start,
dhcp_stop=self.dhcp_stop,
dhcpd_boot_enabled=self.dhcpd_boot_enabled,
dhcpd_boot_filename=self.dhcpd_boot_filename,
dhcpd_boot_server=self.dhcpd_boot_server,
domain_name=self.domain_name,
id=self.id,
igmp_snooping=self.igmp_snooping,
ipv6_interface_type=self.ipv6_interface_type,
ipv6_pd_interface=self.ipv6_pd_interface,
ipv6_pd_prefixid=self.ipv6_pd_prefixid,
ipv6_ra_enable=self.ipv6_ra_enable,
ipv6_static_subnet=self.ipv6_static_subnet,
name=self.name,
network_group=self.network_group,
purpose=self.purpose,
site=self.site,
subnet=self.subnet,
vlan_id=self.vlan_id,
wan_dns=self.wan_dns,
wan_egress_qos=self.wan_egress_qos,
wan_gateway=self.wan_gateway,
wan_ip=self.wan_ip,
wan_netmask=self.wan_netmask,
wan_networkgroup=self.wan_networkgroup,
wan_type=self.wan_type,
wan_username=self.wan_username,
x_wan_password=self.x_wan_password)
def get_network(id: Optional[str] = None,
name: Optional[str] = None,
site: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult:
"""
`Network` data source can be used to retrieve settings for a network by name or ID.
## Example Usage
```python
import pulumi
import pulumi_unifi as unifi
lan_network = unifi.get_network(name="LAN")
my_device = unifi.get_user(mac="01:23:45:67:89:ab")
my_network = unifi.get_network(id=my_device.network_id)
```
:param str id: The ID of the network.
:param str name: The name of the network.
:param str site: The name of the site to associate the network with.
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
__args__['site'] = site
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
__ret__ = pulumi.runtime.invoke('unifi:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value
return AwaitableGetNetworkResult(
dhcp_dns=__ret__.dhcp_dns,
dhcp_enabled=__ret__.dhcp_enabled,
dhcp_lease=__ret__.dhcp_lease,
dhcp_start=__ret__.dhcp_start,
dhcp_stop=__ret__.dhcp_stop,
dhcpd_boot_enabled=__ret__.dhcpd_boot_enabled,
dhcpd_boot_filename=__ret__.dhcpd_boot_filename,
dhcpd_boot_server=__ret__.dhcpd_boot_server,
domain_name=__ret__.domain_name,
id=__ret__.id,
igmp_snooping=__ret__.igmp_snooping,
ipv6_interface_type=__ret__.ipv6_interface_type,
ipv6_pd_interface=__ret__.ipv6_pd_interface,
ipv6_pd_prefixid=__ret__.ipv6_pd_prefixid,
ipv6_ra_enable=__ret__.ipv6_ra_enable,
ipv6_static_subnet=__ret__.ipv6_static_subnet,
name=__ret__.name,
network_group=__ret__.network_group,
purpose=__ret__.purpose,
site=__ret__.site,
subnet=__ret__.subnet,
vlan_id=__ret__.vlan_id,
wan_dns=__ret__.wan_dns,
wan_egress_qos=__ret__.wan_egress_qos,
wan_gateway=__ret__.wan_gateway,
wan_ip=__ret__.wan_ip,
wan_netmask=__ret__.wan_netmask,
wan_networkgroup=__ret__.wan_networkgroup,
wan_type=__ret__.wan_type,
wan_username=__ret__.wan_username,
x_wan_password=__ret__.x_wan_password)
@_utilities.lift_output_func(get_network)
def get_network_output(id: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
site: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkResult]:
"""
`Network` data source can be used to retrieve settings for a network by name or ID.
## Example Usage
```python
import pulumi
import pulumi_unifi as unifi
lan_network = unifi.get_network(name="LAN")
my_device = unifi.get_user(mac="01:23:45:67:89:ab")
my_network = unifi.get_network(id=my_device.network_id)
```
:param str id: The ID of the network.
:param str name: The name of the network.
:param str site: The name of the site to associate the network with.
"""
...
| 1.921875 | 2 |
radinput/create_npy_files.py | ropewe56/simpleco2 | 0 | 12795369 | <filename>radinput/create_npy_files.py
import os
import numpy as np
import os, sys
from timeit import default_timer as timer
import platform
script_root = os.path.abspath(os.path.dirname(__file__))
def interpolate(x0, y0, n):
"""Interpolate data onto a equidistant grid with n grid points
Arguments:
x0 {numpy float} -- [description]
y0 {numpy float} -- [description]
n {int} -- [description]
Returns:
(numpy, numpy) -- [description]
"""
x0 = np.array(x0)
y0 = np.array(y0)
n0 = x0.shape[0]
x = np.mgrid[x0[0]:x0[-1]:n*1j]
y = np.zeros(n)
j = 0
for i in range(n):
xx = x[i]
while not (x0[j] <= xx and x[i] <= x0[j+1]):
j += 1
if (j > n0-2):
break
j = min(j, n0-2)
v = (x[i] - x0[j]) / (x0[j+1] - x0[j])
y[i] = (1.0 - v) * y0[j] + v * y0[j+1]
return x, y
def make_lookup_for_Q(CO2_Q_dir, Tmax=300.0):
"""Create lokup tables for the CO2 partition function
https://hitran.org/docs/iso-meta/
global ID local ID Formula AFGL code Abundance Molar Mass /g·mol-1 Q(296 K) Q (full range) gi
7 1 12C16O2 626 0.984204 43.98983 286.09 q7.txt 1
Arguments:
CO2_Q_file {str} -- file with T, Q values (HITRAN data)
n {int} -- number of T,Q pairs
Returns:
T, Q -- [description]
"""
with open(os.path.join(CO2_Q_dir, "q7-q122-description.txt"), 'r') as ein:
lines = ein.read().splitlines()[2:]
paths = []
isotope_id = []
isotope_c = []
isotope_m = []
gis = []
# mass[kg] => g/mol * mass_factor
mass_factor = 1.0e-3/6.02214076e23
# read the dexription file
for i, line in enumerate(lines):
ls = line.split()
global_id = int(ls[0])
isotope_id.append(int(ls[1]))
isotope_c.append(float(ls[4]))
paths.append(ls[7])
gis.append(int(ls[8]))
mass = float(ls[5]) * mass_factor
isotope_m.append(mass)
T = []
Q = []
# read the partition function files
for path in paths:
with open(os.path.join(CO2_Q_dir, path), 'r') as ein:
lines = ein.read().splitlines()
TQ = np.array([[float(x) for x in line.split()] for line in lines])
TT = np.array(TQ[:,0])
dT = TT[1:]-TT[:-1]
#if np.amax(dT) > 1.0 or np.amin(dT) < 1.0:
# print(TT)
QQ = np.array(TQ[:,1])
index = np.where(TT < Tmax, True, False)
T.append(TT[index])
Q.append(QQ[index])
n = T[0].shape[0]
m = len(T)
TQ = np.zeros((n, m + 1), np.double)
TQ[:,0] = T[0]
for i in range(m):
#if np.abs(np.sum(T[0] - T[i])) > 1.0e-3:
# print(np.abs(np.sum(T[0] - T[i])))
TQ[:,i+1] = Q[i]
return TQ, paths, isotope_id, isotope_c, isotope_m, gis
def make_T_p_over_height(data_dir):
"""Create
Arguments:
Returns:
[type] -- [description]
"""
h1 = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]
p1 = [1013.0, 902.0, 802.0, 710.0, 628.0, 554.0, 487.0, 426.0, 372.0, 324.0, 281.0, 243.0, 209.0]
h2 = [ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 70.0]
p2 = [179.0, 153.0, 130.0, 111.0, 95.0, 81.2, 69.5, 59.5, 51.0, 43.4, 27.7, 13.2, 6.52, 3.33, 1.76, 0.951, 0.067]
n = 100
h_p = np.array(h1 + h2) * 1.0e3
p_p = np.array(p1 + p2) * 1.0e2
h_p, p_p = interpolate(h_p, p_p, n)
h_T = np.array([0.0, 13.0, 17.0, 25.0, 30.0, 45.0, 50.0, 70.0])*1.0e3
T_T = np.array([288.0, 215.8, 215.7, 225.1, 233.7, 269.9, 275.7, 218.1])
h_T, T_T = interpolate(h_T, T_T, n)
T = np.zeros((h_T.shape[0], 2), np.double)
p = np.zeros((h_p.shape[0], 2), np.double)
h_T_path = os.path.join(data_dir, "h_T.npy")
h_p_path = os.path.join(data_dir, "h_p.npy")
T[:,0] = h_T
T[:,1] = T_T
p[:,0] = h_p
p[:,1] = p_p
np.save(h_T_path, T)
np.save(h_p_path, p)
def make_spectrum(hitran_file, data_dir, lmin, lmax, lids, abus, masss, gis):
h = 6.62607004e-34
c0 = 2.99792458e8
with open(os.path.join(data_dir, hitran_file + ".out"), 'r') as ein:
lines = ein.read().splitlines()[1:]
m = np.array([[float(x) for x in line.split(',')] for line in lines])
mid = m[:, 0]
iid = m[:, 1]
ν = m[:, 2]
ν = ν / 1.0e-2
λ = 1.0 / ν
index1 = np.where(λ >= lmin, True, False)
index2 = np.where(λ <= lmax, True, False)
index = index1*index2
λ = λ[index]
iid = iid[index]
S = m[index, 3]
A = m[index, 4]
γ_a = m[index, 5]
γ_s = m[index, 6]
ν_l = m[index, 7]
n_a = m[index, 8]
δ_a = m[index, 9]
g_u = m[index, 10]
g_l = m[index, 11]
ν_l = ν_l / 1.0e-2 # cm => m, bar => pascal
γ_a = γ_a / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal
γ_s = γ_s / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal
δ_a = δ_a / 1.0e-2 * 1.0e-5 # cm => m, bar => pascal
ΔE_ul = h * c0 / λ
E_l = h * c0 * ν_l
E_u = E_l + ΔE_ul
n = A.shape[0]
c = np.zeros((n, 14))
c[:, 0] = λ
c[:, 1] = E_l
c[:, 2] = E_u
c[:, 3] = S
c[:, 4] = A
c[:, 5] = γ_a
c[:, 6] = γ_s
c[:, 7] = n_a
c[:, 8] = δ_a
c[:, 9] = g_u
c[:, 10] = g_l
i = np.argmax(c[:,3])
#print(i, c[i,:])
# 0 1 2 3 4 5 6 7 8 9 10 11
# 1 2 3 4 5 6 7 8 9 0 11 12
itoj = [9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 10, 11]
for i in range(n):
ii = int(iid[i])
j = itoj[ii]
c[i, 11] = j
c[i, 12] = masss[j]
c[i, 13] = abus[j]
np.save(os.path.join(data_dir, hitran_file + ".npy"), c)
for j in range(30,50):
s = []
for i in range(14):
s.append("%12.6e" % c[j, i])
#print(" ".join(s))
def create_npy_data_files(data_dir):
make_T_p_over_height(data_dir)
CO2_Q_dir = os.path.join(data_dir, "CO2_Q")
TQ, paths, isotope_id, isotope_c, isotope_m, gis = make_lookup_for_Q(CO2_Q_dir, Tmax = 300.0)
TQ_path = os.path.join(data_dir, "T_Q.npy")
np.save(TQ_path, TQ)
lmin, lmax = 1.19e-5, 1.81e-5
hitran_file = os.path.join(data_dir, "CO2_rwfmt_ISO-0-12_wl-12-18-mum")
make_spectrum(hitran_file, data_dir, lmin, lmax, isotope_id, isotope_c, isotope_m, gis) | 2.625 | 3 |
debug-app-qt/gen_linalgd.py | blaind/ttf2mesh | 63 | 12795370 | #!/usr/bin/env python
import sys
import os
#----------------------------------------------
file = open("linalgf.h", "r")
s = file.read()
file.close()
s = s.replace("LINALGF_H", "LINALGD_H");
s = s.replace("float", "double");
s = s.replace("mat2f", "mat2d");
s = s.replace("mat3f", "mat3d");
s = s.replace("mat4f", "mat4d");
s = s.replace("vec2f", "vec2d");
s = s.replace("vec3f", "vec3d");
s = s.replace("vec4f", "vec4d");
s = s.replace("cpxf", "cpxd");
s = s.replace("v2f_", "v2d_");
s = s.replace("v3f_", "v3d_");
s = s.replace("v4f_", "v4d_");
s = s.replace("_v2f", "_v2d");
s = s.replace("_v3f", "_v3d");
s = s.replace("_v4f", "_v4d");
s = s.replace("m2f_", "m2d_");
s = s.replace("m3f_", "m3d_");
s = s.replace("m4f_", "m4d_");
s = s.replace("_m2f", "_m2d");
s = s.replace("_m3f", "_m3d");
s = s.replace("_m4f", "_m4d");
s = s.replace("Vec2f", "Vec2d");
s = s.replace("Vec3f", "Vec3d");
s = s.replace("Mat2f", "Mat2d");
s = s.replace("Mat3f", "Mat3d");
s = s.replace("Mat4f", "Mat4d");
s = s.replace("linalgf_", "linalgd_");
s = s.replace("linsolverf", "linsolverd");
file = open("linalgd.h", "w")
file.write(s)
file.close()
#----------------------------------------------
file = open("linalgf.c", "r")
s = file.read()
file.close()
s = s.replace("linalgf.h", "linalgd.h");
s = s.replace("float", "double");
s = s.replace("mat2f", "mat2d");
s = s.replace("mat3f", "mat3d");
s = s.replace("mat4f", "mat4d");
s = s.replace("vec2f", "vec2d");
s = s.replace("vec3f", "vec3d");
s = s.replace("vec4f", "vec4d");
s = s.replace("cpxf", "cpxd");
s = s.replace("v2f_", "v2d_");
s = s.replace("v3f_", "v3d_");
s = s.replace("v4f_", "v4d_");
s = s.replace("_v2f", "_v2d");
s = s.replace("_v3f", "_v3d");
s = s.replace("_v4f", "_v4d");
s = s.replace("m2f_", "m2d_");
s = s.replace("m3f_", "m3d_");
s = s.replace("m4f_", "m4d_");
s = s.replace("_m2f", "_m2d");
s = s.replace("_m3f", "_m3d");
s = s.replace("_m4f", "_m4d");
s = s.replace("linalgf_", "linalgd_");
s = s.replace("linsolverf", "linsolverd");
s = s.replace("find_leading_order_f", "find_leading_order_d");
s = s.replace("linear_solver_base_f", "linear_solver_base_d");
s = s.replace("det2f", "det2d");
s = s.replace("det3f", "det3d");
file = open("linalgd.c", "w")
file.write(s)
file.close()
#----------------------------------------------
file = open("linalgf.h", "r")
linalgfh = file.read()
file.close()
file = open("linalgd.h", "r")
linalgdh = file.read()
file.close()
file = open("linalgf.c", "r")
linalgfc = file.read()
file.close()
file = open("linalgd.c", "r")
linalgdc = file.read()
file.close()
linalgh = linalgfh + linalgdh
linalgc = linalgfc + linalgdc
linalgc = linalgc.replace("#include \"linalgf.h\"\n", "")
linalgc = linalgc.replace("#include \"linalgd.h\"\n", "")
linalgc = linalgc.replace("#include <assert.h>\n", "")
linalgc = "#include \"linalg.h\"\n#include <assert.h>\n" + linalgc
file = open("linalg.h", "w")
file.write(linalgh)
file.close()
file = open("linalg.c", "w")
file.write(linalgc)
file.close()
| 2.640625 | 3 |
aoc2020/day3.py | rfrazier716/aoc_2020 | 0 | 12795371 | <reponame>rfrazier716/aoc_2020
from pathlib import Path
import numpy as np
def tree_in_path(map_line,map_x_coord):
"""
Checks if a tree is in the x-cord of the map line, looping if x is > len(map_line)
returns: True if a tree is in the path, False otherwise
rtype: Bool
"""
offset = map_x_coord % len(map_line) # module operater for rollover
return map_line[offset]=='#'
def traverse_map(map, x_step, y_step):
"""
iterates over a "map" (array of strings) starting at the top left until reaching the
bottom of the map. every iteration advances position by <x_step,y_step> and checks if
a tree is hit
returns: the total number of Trees hit
rtype: int
"""
trees_hit = 0
map_depth = len(map)
y_steps = range(0,map_depth,y_step)
for j,step in enumerate(y_steps):
trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0
return trees_hit
if __name__ == "__main__":
# Load the puzzle import to a map
input_file = Path(__file__).resolve().parents[2] / "inputs" / "day3.txt"
with open(input_file) as fii:
map = [line.rstrip('\n') for line in fii] # Strip newline characters
# Part one of the puzzle, traverse the map with a 3-1 slope and count trees
# encountered
print(f"Part One Solution: {traverse_map(map,3,1)}")
# part two of the puzzle - try the 5 given slopes and spit out the total product
slopes_to_test = [[1,1],[3,1],[5,1],[7,1],[1,2]]
trees_hit_per_slope = [traverse_map(map,*slope) for slope in slopes_to_test]
product_of_trees = np.prod(trees_hit_per_slope)
# print the results for part 2
print() # print a newline
for slope,hit_count in zip(slopes_to_test,trees_hit_per_slope):
print(f"Slope of {slope} results in {hit_count} trees hit")
print(f"Part Two Solution: {product_of_trees}")
| 3.953125 | 4 |
test_scrapy/test_scrapy/test.py | lijie28/python_demo | 0 | 12795372 | #coding=utf-8
import requests
from lxml import etree
url = 'http://weibo.cn/fishli28' #此处请修改为微博地址
url_login = 'https://login.weibo.cn/login/'
html = requests.get(url_login).content
selector = etree.HTML(html)
password = selector.xpath('//input[@type="password"]/@name')[0]
vk = selector.xpath('//input[@name="vk"]/@value')[0]
action = selector.xpath('//form[@method="post"]/@action')[0]
imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0]
index = imgsrc.find('cpt=')
capId = imgsrc[index + 4:]
print imgsrc ### 验证码
code = raw_input("plz input code:")
print action
print password
print vk
new_url = url_login + action
data = {
'mobile' : '<EMAIL>',#你的微博帐号
password : '<PASSWORD>', #你的微博密码
'remember' : 'on',
'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址
'backTitle' : u'微博',
'tryCount' : '',
'vk' : vk,
'capId':capId,
'code':code,
'submit' : u'登录'
}
newhtml = requests.post(new_url,data=data).content
new_selector = etree.HTML(newhtml)
content = new_selector.xpath('//span[@class="ctt"]')
for each in content:
text = each.xpath('string(.)')
print text | 2.75 | 3 |
gradeit/visualization.py | NREL/gradeit | 0 | 12795373 | <reponame>NREL/gradeit<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
def plot_data(df, general_filter, plot_param):
if plot_param[0]:
# visualization of elevation data
if general_filter:
plt.plot(df['cumulative_uniform_distance_ft'], df['elevation_ft_filtered'],
df['cumulative_original_distance_ft'], df['elevation_ft'])
plt.ylabel('Elevation [ft]')
plt.xlabel('Distance [ft]')
plt.grid()
plt.legend(['filtered', 'unfiltered'])
plt.title('Elevation vs. Distance')
plt.show()
else:
plt.plot(df['cumulative_original_distance_ft'], df['elevation_ft'])
plt.ylabel('Elevation [ft]')
plt.xlabel('Distance [ft]')
plt.grid()
plt.title('Elevation vs. Distance')
plt.show()
if plot_param[1]:
# visulalization of grade data
if general_filter:
plt.plot(df['cumulative_uniform_distance_ft'], df['grade_dec_filtered'],
df['cumulative_original_distance_ft'], df['grade_dec_unfiltered'])
plt.ylabel('Grade]')
plt.xlabel('Distance [ft]')
plt.grid()
plt.legend(['filtered', 'unfiltered'])
plt.title('Grade vs. Distance')
plt.show()
else:
plt.plot(df_filtered['cumulative_uniform_distance_ft'], df_filtered['grade_dec_unfiltered'])
plt.ylabel('Grade]')
plt.xlabel('Distance [ft]')
plt.grid()
plt.title('Grade vs. Distance')
plt.show()
if not plot_param[0] and not plot_param[0]:
print('No visualization selected.') | 2.921875 | 3 |
src/main.py | UAws/dear-gitlab-workhorse-ee | 0 | 12795374 | from selenium_controller.github import Github
from selenium_controller.gitlab import Gitlab
from utils.shell_executor.executor import execute_now
def main():
github = Github()
gitlab = Gitlab()
new_tags = list()
execute_now('git fetch --all')
gitlab_versions = gitlab.fetch_gitlab_map_versions()
github_tags = github.fetch_github_available_docker_versions()
github_tags = [t.replace('v', '').replace('.m1', '') for t in github_tags]
# match the gitlab version which not inside GitHub tags, the github tags contains gitlab version
for gitlab_version in gitlab_versions:
if gitlab_version not in github_tags and int(gitlab_version.split('.')[0]) > 12:
new_tags.append(gitlab_version)
for tag in new_tags:
github.create_new_branch(tag)
if __name__ == "__main__":
main()
| 2.765625 | 3 |
examples/ecs/v1/tag.py | wangrui1121/huaweicloud-sdk-python | 43 | 12795375 | # -*-coding:utf-8 -*-
from openstack import connection
# create connection
username = "xxxxxx"
password = "<PASSWORD>"
projectId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # tenant ID
userDomainId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # user account ID
auth_url = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # endpoint url
conn = connection.Connection(auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password)
def create_server_tags(server_id):
data = {
"tags": [
{
"key": "key1",
"value": "value1"
},
{
"key": "key2",
"value": "value3"
}
]
}
conn.ecs.create_server_tags(server_id, **data)
def delete_server_tags(server_id):
data = {
"tags": [
{
"key": "key1",
"value": "value1"
}
]
}
conn.ecs.delete_server_tags(server_id, **data)
def get_server_tags(server_id):
tags = conn.ecs.get_server_tags(server_id)
for tag in tags:
print(tag.key, tag.value)
def get_project_tags():
tags = conn.ecs.get_project_tags()
for tag in tags:
print(tag.key, tag.values)
if __name__ == "__main__":
server_id = "b0a9d2b4-2cae-4b66-a6ba-6af70f3bd7f8"
create_server_tags(server_id)
get_server_tags(server_id)
delete_server_tags(server_id)
get_project_tags()
| 2.34375 | 2 |
.github/workflows/set_env.py | kaleido-public/django-client-framework-typescript | 0 | 12795376 | #!/usr/bin/env python3
import os
from subprocess import CalledProcessError, run
from typing import Dict, List, Union
import json
from pathlib import Path
import click
__dir__ = Path(__file__).parent.absolute()
def github_repo_name() -> str:
if repo_full := os.environ.get("GITHUB_REPOSITORY"):
return repo_full.split("/")[1]
else:
return ""
def git_list_changes() -> List[str]:
return run(
["git", "log", "-1", "--name-only", "--pretty="],
check=True,
capture_output=True,
text=True,
).stdout.splitlines()
def git_branch_name() -> str:
if fullref := os.environ.get("GITHUB_REF", ""):
return fullref[len("refs/heads/") :]
else:
return ""
def target_branch() -> str:
if git_branch_name() == "staging":
return "release"
else:
return "staging"
def git_commit_title() -> str:
return run(
["git", "log", "-1", r"--pretty=format:%s"],
check=True,
capture_output=True,
text=True,
).stdout.splitlines()[0]
def git_short_sha() -> str:
if fullsha := os.environ.get("GITHUB_SHA", ""):
return fullsha[:7]
else:
return ""
def is_dev_branch() -> bool:
return git_branch_name() not in ["release", "staging"]
def ci_yaml_changed() -> bool:
return ".github/workflows/ci.yml" in git_list_changes()
def docker_tag() -> str:
return f"{git_branch_name()}-{git_short_sha()}"
def docker_stack_name() -> str:
return f"{github_repo_name()}-{git_branch_name()}-{git_short_sha()}"
def should_upload_package() -> bool:
return git_branch_name() == "release"
def should_upload_image() -> bool:
return git_branch_name() in ["release", "staging"]
def package_version() -> str:
with open("package.json", "rb") as content:
package = json.load(content)
return package["version"]
def pr_body() -> str:
if target_branch() == "staging":
return 'To merge into the staging branch, please use "Rebase and merge", or "Squash and merge".'
elif target_branch == "release":
return 'To merge into the release branch, please use "Create a merge commit".'
return ""
def overwrite_path() -> str:
return ":".join(
[
str(__dir__),
os.environ["PATH"],
]
)
def get_env() -> Dict[str, Union[str, bool]]:
return {
"PROJECT_NAME": github_repo_name(),
"DOCKER_TAG": docker_tag(),
"CI_YAML_CHANGED": ci_yaml_changed(),
"IS_DEV_BRANCH": is_dev_branch(),
"BRANCH_NAME": git_branch_name(),
"TARGET_BRANCH": target_branch(),
"COMMIT_TITLE": git_commit_title(),
"SHOULD_UPLOAD_PACKAGE": should_upload_package(),
"SHOULD_UPLOAD_IMAGE": should_upload_image(),
"PACKAGE_VERSION": package_version(),
"PATH": overwrite_path(),
"PR_BODY": pr_body(),
}
@click.command()
@click.option("-w", "--write", is_flag=True)
def main(write):
content = ""
for key, val in get_env().items():
if write:
content += f"{key}={val}\n"
else:
content += f"{key}={val.__repr__()}\n"
if write:
with open(os.environ["GITHUB_ENV"], "a") as env_file:
env_file.write(content)
else:
print(content, end="")
if __name__ == "__main__":
try:
main()
except CalledProcessError as err:
exit(err.stdout + err.stderr)
| 2.21875 | 2 |
tests.py | MoritzS/licht | 2 | 12795377 | <reponame>MoritzS/licht<gh_stars>1-10
#!/usr/bin/env python
import struct
import unittest
from licht.base import LightColor
from licht.utils import RESERVED, Bitfield, Field, FieldType
class BitFieldTest(unittest.TestCase):
def assertFieldsEqual(self, field, field_dict):
for key, val in field_dict.items():
self.assertEqual(field[key], val)
@classmethod
def setUpClass(cls):
class SimpleBitfield(Bitfield):
fields = [
Field('foo', 16, FieldType.int),
Field('bar', 6 * 8, FieldType.bytes),
Field('baz', 64, FieldType.float),
]
class FullBitfield(Bitfield):
fields = [
Field('foo', 1, FieldType.bool),
Field('bar', 30, FieldType.uint),
Field('baz', 33, FieldType.uint),
Field('fiz', 32, FieldType.float),
]
class ReservedSimpleBitfield(Bitfield):
fields = [
Field(RESERVED, 16),
Field('foo', 16, FieldType.bytes),
Field(RESERVED, 8),
Field('bar', 16, FieldType.bytes),
]
class ReservedFullBitfield(Bitfield):
fields = [
Field(RESERVED, 4),
Field('foo', 12, FieldType.uint),
Field(RESERVED, 5),
Field('bar', 3, FieldType.uint),
]
cls.SimpleBitfield = SimpleBitfield
cls.FullBitfield = FullBitfield
cls.ReservedSimpleBitfield = ReservedSimpleBitfield
cls.ReservedFullBitfield = ReservedFullBitfield
def test_to_bytes_simple(self):
f = self.SimpleBitfield(foo=1234, bar=b'hello!', baz=3.14)
expected = (1234).to_bytes(2, 'little') + b'hello!' + struct.pack('<d', 3.14)
self.assertEqual(f.to_bytes(), expected)
def test_to_bytes_full(self):
f = self.FullBitfield(foo=True, bar=123456, baz=987654, fiz=1.55)
expected = (((1 << 30) | 123456) << 33) | 987654
expected = expected.to_bytes(8, 'little') + struct.pack('<f', 1.55)
self.assertEqual(f.to_bytes(), expected)
def test_from_bytes_simple(self):
value = (-1234).to_bytes(2, 'little', signed=True) + b'foobar' + struct.pack('<d', 5.25)
f = self.SimpleBitfield.from_bytes(value)
expected = {'foo': -1234, 'bar': b'foobar', 'baz': 5.25}
self.assertFieldsEqual(f, expected)
def test_from_bytes_full(self):
val1 = (((1 << 30) | 9999) << 33) | 123123
value = val1.to_bytes(8, 'little') + struct.pack('<f', 6.125)
f = self.FullBitfield.from_bytes(value)
expected = {'foo': True, 'bar': 9999, 'baz': 123123, 'fiz': 6.125}
self.assertFieldsEqual(f, expected)
def test_reserved_simple(self):
f = self.ReservedSimpleBitfield(foo=b'qq', bar=b'aa')
self.assertEqual(f.to_bytes(), b'\x00\x00qq\x00aa')
data = b'zzqqzaa'
f = self.ReservedSimpleBitfield.from_bytes(data)
self.assertFieldsEqual(f, {'foo': b'qq', 'bar': b'aa'})
def test_reserved_full(self):
f = self.ReservedFullBitfield(foo=3456, bar=3)
self.assertEqual(f.to_bytes(), b'\x80\x0d\x03')
data = b'\x80\x9d\xab'
f = self.ReservedFullBitfield.from_bytes(data)
self.assertFieldsEqual(f, {'foo': 3456, 'bar': 3})
class ColorsTest(unittest.TestCase):
test_colors = [
((255, 0, 0), ( 0, 1.0, 1.0)),
((255, 255, 0), ( 60, 1.0, 1.0)),
(( 0, 255, 0), (120, 1.0, 1.0)),
(( 0, 255, 255), (180, 1.0, 1.0)),
(( 0, 0, 255), (240, 1.0, 1.0)),
((255, 0, 255), (300, 1.0, 1.0)),
]
def test_from_rgb(self):
for rgb, hsb in self.test_colors:
self.assertEqual(LightColor.from_rgb(rgb), hsb)
def test_to_rgb(self):
for rgb, hsb in self.test_colors:
self.assertEqual(LightColor(*hsb).rgb, rgb)
if __name__ == '__main__':
unittest.main()
| 2.6875 | 3 |
example.py | ebs-universe/cefkivy | 0 | 12795378 | <reponame>ebs-universe/cefkivy<gh_stars>0
from kivy.config import Config
Config.set('kivy', 'log_level', 'debug')
Config.set('kivy', 'keyboard_mode', 'systemandmulti')
from kivy_garden.ebs.cefkivy.browser import CefBrowser, cefpython
from kivy.app import App
class CefBrowserApp(App):
def build(self):
return CefBrowser(start_url='https://india.gov.in/')
def run():
CefBrowserApp().run()
cefpython.Shutdown()
if __name__ == '__main__':
run()
| 1.882813 | 2 |
twitter_scraper/models.py | debianitram/simple-twitter-scraper | 0 | 12795379 | from django.db import models
from django.db.models.signals import post_save
from . import tasks
### Define Querysets
class TwitterProfileQuerySet(models.QuerySet):
def search(self, query):
return self.filter(name__icontains=query)
class TaskQuerySet(models.QuerySet):
def search(self, query):
return self.filter(query__icontains=query)
def pending(self):
return self.filter(status='PD')
def done(self):
return self.filter(status='DN')
### Define Models
class TwitterProfile(models.Model):
class Meta:
ordering = ('popularity', 'name')
tw_id = models.PositiveIntegerField(unique=True)
name = models.CharField(max_length=200)
description = models.TextField(blank=True, null=True)
image = models.URLField(blank=True, null=True)
popularity = models.PositiveIntegerField(blank=True, default=0)
objects = models.Manager()
custom = TwitterProfileQuerySet.as_manager()
__str__ = lambda self: self.name
def update_(self, tw_user):
update_fields = []
if self.name != tw_user.name:
self.name = tw_user.name
update_fields.append('name')
if self.description != tw_user.description:
self.description = tw_user.description
update_fields.append('description')
if self.image != tw_user.profile_image_url:
self.image = tw_user.profile_image_url
update_fields.append('image')
if self.popularity != tw_user.followers_count:
self.popularity = tw_user.followers_count
update_fields.append('popularity')
if update_fields:
self.save(update_fields=update_fields)
class Task(models.Model):
class Meta:
ordering = ('query', )
PENDING = 'PD'
DONE = 'DN'
STATUS = (
(PENDING, 'Pending'),
(DONE, 'Done')
)
query = models.CharField(max_length=100)
status = models.CharField(max_length=2, choices=STATUS, default=PENDING)
objects = models.Manager()
custom = TaskQuerySet.as_manager()
def __str__(self):
return "%s -> Status: %s" % (self.query, self.get_status_display())
def update_to_done(self):
if self.status is not self.DONE:
self.status = self.DONE
self.save()
@staticmethod
def run(**kwargs):
if kwargs.get('created', False) or 'from_view' in kwargs:
tasks.twitter_scraper.delay(kwargs['instance'].id)
# Signals
post_save.connect(Task.run, Task) | 2.15625 | 2 |
7th.py | writtik/Hactober-Fest-2020 | 0 | 12795380 | largest = -1
smallest = None
while True:
num = input("Enter a number: ")
if num == "done" :
break
try:
inum=int(num)
except:
print("Invalid Number")
if inum > largest:
largest=inum
if smallest is None:
smallest=inum
elif inum<smallest:
smallest=inum
print("Maximum is", largest)
print("Minimum is", smallest)
| 4.09375 | 4 |
CondCore/ESSources/test/python/load_from_globaltag_cfg.py | PKUfudawei/cmssw | 1 | 12795381 | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
from Configuration.AlCa.autoCond import autoCond
process = cms.Process("TEST")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(3),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
from CondCore.ESSources.GlobalTag import GlobalTag
# Prepare the list of globalTags
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
globalTag = GlobalTag(autoCond['run2_data'],"frontier://FrontierProd/CMS_CONDITIONS")
process.GlobalTag.connect = cms.string(globalTag.connect())
process.GlobalTag.globaltag = globalTag.gt()
print("Final connection string =", process.GlobalTag.connect)
print("Final globalTag =", process.GlobalTag.globaltag)
process.path = cms.Path()
| 1.710938 | 2 |
test/test_dataToCode/test_to_python/test_system/test_system.py | CoffeeOverflow/drawtocode | 9 | 12795382 | <filename>test/test_dataToCode/test_to_python/test_system/test_system.py
import pytest
import os
import filecmp
import subprocess
from src.dataToCode.dataClasses.attribute import Attribute
from src.dataToCode.dataClasses.classData import ClassData
from src.dataToCode.dataClasses.interface import Interface
from src.dataToCode.dataClasses.method import Method
from src.dataToCode.write_files import write_files
from src.dataToCode.dataClasses.visibility import Visibility
from src.dataToCode.dataClasses.modifier import Modifier
def test_strategy_example(tmpdir):
def create_do_algorithm():
attribute = Attribute("data", "str")
method = Method("doAlgorithm", parameters=[attribute])
return method
def create_strategy():
method = create_do_algorithm()
strategy = Interface("Strategy", methods=[method])
return strategy
def create_context():
attribute = Attribute("strategy", "Strategy",
visibility=Visibility.public)
method = Method("doSomeBusinessLogic")
context = ClassData("Context", methods=[method], fields=[attribute])
return context
def create_concrete_a():
method = create_do_algorithm()
strategy = create_strategy()
concrete_a = ClassData("ConcreteStrategyA", methods=[method],
implementations=[strategy])
return concrete_a
def create_concrete_b():
method = create_do_algorithm()
strategy = create_strategy()
concrete_b = ClassData("ConcreteStrategyB", methods=[method],
implementations=[strategy])
return concrete_b
objects = [create_strategy(), create_context(), create_concrete_a(),
create_concrete_b()]
write_files(objects, tmpdir, "python")
files_path = ["strategy.py", "context.py", "concrete_strategy_a.py",
"concrete_strategy_b.py"]
strategy_path = os.path.abspath(os.path.join(__file__,
"../strategy_example"))
generated_path = [os.path.join(tmpdir, x) for x in files_path]
truth_path = [os.path.join(strategy_path, x) for x in files_path]
for truth_file_path, generated_file_path in zip(truth_path,
generated_path):
assert filecmp.cmp(truth_file_path, generated_file_path)
def test_strategy_xml(tmpdir):
main_path = os.path.abspath(os.path.join(__file__,"../../../../../main.py"))
xml_path = os.path.abspath(os.path.join(__file__,"../../../../strategy.xml"))
subprocess.run(["python3", main_path,
f"--xml_file={xml_path}", f"--code_path={tmpdir}",
"--language=python"])
files_path = ["strategy.py", "context.py", "concrete_strategy_a.py",
"concrete_strategy_b.py"]
strategy_path = os.path.abspath(os.path.join(__file__,
"../strategy_example"))
generated_path = [os.path.join(tmpdir, x) for x in files_path]
truth_path = [os.path.join(strategy_path, x) for x in files_path]
for truth_file_path, generated_file_path in zip(truth_path,
generated_path):
assert filecmp.cmp(truth_file_path, generated_file_path)
def test_ultimate_example(tmpdir):
def create_spell():
method = Method("doEffect")
interface = Interface("ISpell", methods=[method])
return interface
def create_food():
method = Method("getNutrients", return_type="str")
interface = Interface("IFood", methods=[method])
return interface
def create_weapon():
name = Attribute("name", "str", visibility=Visibility.public)
age = Attribute("age", "int", visibility=Visibility.private)
attribute = Attribute("attribute", "Attribute",
visibility=Visibility.protected)
getAttribute = Method("getAttribute", return_type="Attribute")
setAttribute = Method("setAttribute", return_type="void",
parameters=[attribute])
weapon = ClassData("Weapon", methods=[getAttribute, setAttribute],
fields=[name, age, attribute])
return weapon
def create_attribute():
method = Method("method")
field = Attribute("field", "Type", visibility=Visibility.public)
attribute = ClassData("Attribute", methods=[method],
fields=[field])
return attribute
def create_walk():
method = Method("walk")
interface = Interface("IWalk", methods=[method])
return interface
def create_attack():
damage = Attribute("damage", "int", visibility=Visibility.public)
method = Method("attack", parameters=[damage])
interface = Interface("IAttack", methods=[method])
return interface
def create_orc():
name = Attribute("name", "str", visibility=Visibility.public)
age = Attribute("age", "int", visibility=Visibility.private)
damage = Attribute("damage", "int", visibility=Visibility.public)
hours = Attribute("hours", "int", visibility=Visibility.public)
walk = create_walk()
attack_interface = create_attack()
attack_method = Method("attack", parameters=[damage])
sleep = Method("sleep", parameters=[hours],
visibility=Visibility.private)
orc = ClassData("Orc", methods=[attack_method, sleep],
fields=[name, age],
implementations=[attack_interface, walk])
return orc
def create_high_orc():
damage = Attribute("damage", "int", visibility=Visibility.public)
hours = Attribute("hours", "int", visibility=Visibility.public)
spell = Attribute("spell", "ISpell", visibility=Visibility.public)
attack = Method("attack", parameters=[damage],
modifier=Modifier.override)
sleep = Method("sleep", parameters=[hours],
visibility=Visibility.private,
modifier=Modifier.override)
orc = create_orc()
high_orc = ClassData("HighOrc", methods=[attack, sleep],
fields=[spell], inheritances=[orc])
return high_orc
def create_fat_orc():
food = Attribute("food", "IFood", visibility=Visibility.public)
eat = Method("eat", parameters=[food])
orc = create_orc()
fat_orc = ClassData("FatOrc", methods=[eat],
inheritances=[orc])
return fat_orc
def create_obese_orc():
food = Attribute("food", "IFood", visibility=Visibility.public)
heart_attack = Attribute("heartAttackChance", "int",
visibility=Visibility.public)
eat = Method("eat", parameters=[food], modifier=Modifier.override)
fat_orc = create_fat_orc()
obese_orc = ClassData("ObeseOrc", methods=[eat],
fields=[heart_attack], inheritances=[fat_orc])
return obese_orc
objects = [create_spell(), create_food(), create_weapon(),
create_attribute(), create_attack(), create_walk(),
create_orc(), create_high_orc(), create_fat_orc(),
create_obese_orc()]
write_files(objects, tmpdir, "python")
ultimate_path = os.path.abspath(os.path.join(__file__,
"../ultimate_example"))
all_files_path = os.listdir(ultimate_path)
files_path = []
for file_path in all_files_path:
if file_path.endswith(".py"):
files_path.append(file_path)
generated_path = [os.path.join(tmpdir, x) for x in files_path]
truth_path = [os.path.join(ultimate_path, x) for x in files_path]
for truth_file_path, generated_file_path in zip(truth_path,
generated_path):
assert filecmp.cmp(truth_file_path, generated_file_path)
| 2.390625 | 2 |
__init__.py | Sleemanmunk/approximate-randomization | 5 | 12795383 | from .approximate_randomization import meandiff, meanlt, meangt, chanceByChance | 0.921875 | 1 |
BurstCube/NoahSim/GRBgenerator.py | BurstCube/Simulation | 0 | 12795384 | <gh_stars>0
from healpy import nside2npix, pix2ang
class Sky():
"""
Generates an array of GRB's given
certains strength at different sky positions.
Output should be an array.
"""
def __init__(self, NSIDE, strength):
# depending on NSIDE, there will be anywhere
# from 12 to infinite spots on the sky w/ GRBs
self.Ao = strength
self.pixels = nside2npix(NSIDE)
# want to convert these pixels into theta phi coords.
self.sourceangs = []
for i in range(self.pixels):
self.sourceangs.append(pix2ang(NSIDE, i))
| 2.71875 | 3 |
chapter_6/catnapping.py | aaronmccollum/automate-the-boring-stuff-with-python | 0 | 12795385 | # Using triple-quote marks to create a multiline string in Python
print('''Dear Alice,
Eve's cat has been arrested for catnapping, cat burglary, and extortion.
Sincerely,
Bob''')
| 3.671875 | 4 |
app/staff/views.py | swelanauguste/treasury_seo_system_1 | 0 | 12795386 | from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import Q
from django.shortcuts import render
from django.views.generic import DetailView, ListView, UpdateView
from .forms import StaffUpdateForm
from .models import Staff
class SearchSearchView(ListView):
model = Staff
paginate_by = 10
queryset = Staff.objects.all()
def get_queryset(self):
query = self.request.GET.get("q")
if query:
return Staff.objects.filter(
Q(supplier_name__icontains=query)
| Q(tags__icontains=query)
| Q(email__icontains=query)
| Q(phone__icontains=query)
| Q(description__icontains=query)
| Q(address__icontains=query)
| Q(district__icontains=query)
)
else:
return Staff.objects.all()
class StaffListView(ListView):
model = Staff
class StaffDetailView(DetailView):
model = Staff
class StaffUpdateView(UpdateView):
model = Staff
form_class = StaffUpdateForm
template_name_suffix = "_update_form"
| 1.882813 | 2 |
core/pythonAction/pythonaction.py | samuelteixeiras/openwhisk | 0 | 12795387 | #
# Copyright 2015-2016 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import json
import subprocess
import codecs
import traceback
import flask
from gevent.wsgi import WSGIServer
proxy = flask.Flask(__name__)
proxy.debug = False
@proxy.route("/init", methods=['POST'])
def init():
flask.g = None
payload = flask.request.get_json(force=True,silent=True)
if not payload or not isinstance(payload, dict):
flask.abort(403)
message = payload.get("value", {})
if "code" in message:
# store the code
flask.g = message["code"]
return ('OK', 200)
else:
flask.abort(403)
@proxy.route("/run", methods=['POST'])
def run():
message = flask.request.get_json(force=True,silent=True)
if not message or not isinstance(message, dict):
flask.abort(403)
if not "value" in message:
flask.abort(403)
value = message["value"]
if not isinstance(value, dict):
flask.abort(403)
# initialize the namespace for the execution
namespace = {}
result = None
try:
exec(flask.g, namespace)
exec("param = " + json.dumps(value), namespace)
exec("fun = main(param)", namespace)
result = namespace['fun']
except Exception:
traceback.print_exc(file = sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result and isinstance(result, dict):
response = flask.jsonify(result)
response.status_code = 200
return response
else:
response = flask.jsonify({ "error": "the action did not return a dictionary", "action_output": result })
response.status_code = 502
return response
# start server in a forever loop
if __name__ == "__main__":
PORT = int(os.getenv("FLASK_PROXY_PORT", 8080))
server = WSGIServer(('', PORT), proxy, log=None)
server.serve_forever()
| 2.109375 | 2 |
datasets/ttf_utils.py | derwind/mxfont | 0 | 12795388 | <filename>datasets/ttf_utils.py<gh_stars>0
"""
MX-Font
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
from fontTools.ttLib import TTFont
from fontTools.pens.basePen import BasePen
from PIL import Image, ImageFont, ImageDraw
import numpy as np
class StopDraw(Exception):
pass
class SpaceOrNotPen(BasePen):
def __init__(self, glyphSet=None):
super().__init__(glyphSet)
self.is_space = True
def _moveTo(self, pt):
pass
def _lineTo(self, pt):
self.is_space = False
raise StopDraw
def _curveToOne(self, pt1, pt2, pt3):
self.is_space = False
raise StopDraw
def get_defined_chars(fontfile):
ttf = TTFont(fontfile)
chars = [chr(y) for y in ttf["cmap"].tables[0].cmap.keys()]
return chars
def is_space_char(char, ttFont):
cmap = ttFont.getBestCmap()
gs = ttFont.getGlyphSet()
uni = ord(char)
gname = cmap[uni]
g = gs[gname]
pen = SpaceOrNotPen(gs)
try:
g.draw(pen)
except StopDraw:
pass
return pen.is_space
def get_filtered_chars(fontpath):
# ttf = read_font(fontpath)
defined_chars = get_defined_chars(fontpath)
avail_chars = []
ttFont = TTFont(fontpath)
for char in defined_chars:
# img = np.array(render(ttf, char))
# if img.mean() == 255.:
# pass
is_space = is_space_char(char, ttFont)
if is_space:
pass
else:
avail_chars.append(char.encode('utf-16', 'surrogatepass').decode('utf-16'))
return avail_chars
def read_font(fontfile, size=150):
font = ImageFont.truetype(str(fontfile), size=size)
return font
def render(font, char, size=(128, 128), pad=20):
width, height = font.getsize(char)
max_size = max(width, height)
if width < height:
start_w = (height - width) // 2 + pad
start_h = pad
else:
start_w = pad
start_h = (width - height) // 2 + pad
img = Image.new("L", (max_size+(pad*2), max_size+(pad*2)), 255)
draw = ImageDraw.Draw(img)
draw.text((start_w, start_h), char, font=font)
img = img.resize(size, 2)
return img
| 2.265625 | 2 |
codes/edsr/model/srresnet.py | dnap512/SROD | 5 | 12795389 | # +
from model import common
import torch.nn as nn
import torch
from torch.autograd import Variable
import numpy.random as npr
import numpy as np
import torch.nn.functional as F
import random
import math
def make_model(args, parent=False):
return SRResNet(args)
class SRResNet(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(SRResNet, self).__init__()
n_resblocks = 5
n_feats = 64
kernel_size = 3
scale = args.scale[0]
act = nn.PReLU()
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
# define head module
m_head = [
nn.Conv2d(3, 64, kernel_size=9, padding=4),
act
]
# define body module
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, bn=True, act=act, res_scale=args.res_scale
) for _ in range(n_resblocks)
]
m_body.append(conv(n_feats, n_feats, kernel_size))
m_body.append(nn.BatchNorm2d(n_feats))
# define tail module
m_tail = [
common.Upsampler(conv, scale, n_feats, act='prelu'),
nn.Conv2d(n_feats, 3, kernel_size=9, padding=4)
]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x, flag=False, hr=None):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail[0](res)
if flag:
self.eval()
x_new = x.clone().detach()
x_new = Variable(x_new.data, requires_grad=True).cuda()
num_batch, num_channel, H, W = x_new.shape
HW = H*W
sr = self.tail[-1](x_new)
criterion = nn.L1Loss()
loss = criterion(sr, hr)
self.zero_grad()
loss.backward()
grads_val = x_new.grad.clone().detach()
grad_channel_mean = torch.mean(grads_val.view(num_batch, num_channel, -1), dim=2)
channel_mean = grad_channel_mean
grad_channel_mean = grad_channel_mean.view(num_batch, num_channel, 1, 1)
spatial_mean = torch.sum(x_new * grad_channel_mean, 1)
spatial_mean = spatial_mean.view(num_batch, HW)
self.zero_grad()
choose_one = random.randint(0,9)
if choose_one <= 4:
# ---------------------------- spatial -----------------------
spatial_drop_num = math.ceil(HW * 1 / 3.0)
th18_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num]
th18_mask_value = th18_mask_value.view(num_batch, 1).expand(num_batch, 36864)
mask_all_cuda = torch.where(spatial_mean > th18_mask_value, torch.zeros(spatial_mean.shape).cuda(),
torch.ones(spatial_mean.shape).cuda())
mask_all = mask_all_cuda.reshape(num_batch, H, H).view(num_batch, 1, H, H)
else:
# -------------------------- channel ----------------------------
vector_thresh_percent = math.ceil(num_channel * 1 / 3.2)
vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent]
vector_thresh_value = vector_thresh_value.view(num_batch, 1).expand(num_batch, num_channel)
vector = torch.where(channel_mean > vector_thresh_value,
torch.zeros(channel_mean.shape).cuda(),
torch.ones(channel_mean.shape).cuda())
mask_all = vector.view(num_batch, num_channel, 1, 1)
mask_all[int(num_batch/3):,:,:,:] = 1
self.train()
mask_all = Variable(mask_all, requires_grad=True)
x = x * mask_all
x = self.tail[-1](x)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
| 2.328125 | 2 |
kopen_of_huren.py | basnijholt/kopen-of-huren | 4 | 12795390 | <gh_stars>1-10
from collections import defaultdict
from functools import partial
from itertools import product
from numbers import Number
from typing import Any, Dict, Literal, Union
import matplotlib
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize
from loky import get_reusable_executor
from tqdm.notebook import tqdm
from maandlasten import maandlasten
from mortgage import Mortgage, dollar
matplotlib.rc("font", size=15)
def load_sp500() -> pd.Series:
# Daily data to need to resample it to quarterly like the huizenprijzen
df_stock = pd.read_csv("sp500.csv")
df_stock.Date = pd.to_datetime(df_stock.Date)
df_stock.set_index("Date", inplace=True)
# *Close price adjusted for splits
# **Adjusted close price adjusted for both dividends and splits.
stock_price = df_stock["Close*"].str.replace(",", "").astype(float)
# Create data points for each day
stock_price = stock_price.resample("D").interpolate()
return stock_price
def plot_sp500() -> None:
stock_price = load_sp500()
stock_price.plot(
xlabel="Datum",
ylabel="S&P500 prijs ($)",
title="S&P500 index vs. tijd, bron: Yahoo! Finance",
figsize=(7, 7),
)
plt.show()
def get_groei(regio="Nederland") -> pd.DataFrame:
stock_price = load_sp500()
stock_price = stock_price[
stock_price.index.day == 1
] # Keep only first of the month
first_year = stock_price.index.min().year
start = f"{first_year+1}-02-01"
stock_relative = {}
for date, value in stock_price[stock_price.index >= start].items():
date_prev = date.replace(date.year - 1)
prev = stock_price[date_prev]
stock_relative[date] = (value - prev) / prev * 100
stock_relative = pd.Series(stock_relative)
# Select at same dates as huis prijzen
huis_prijsindex = load_huizen_prijsindex_per_regio()[regio]
stock_relative = stock_relative[huis_prijsindex.index]
groei = pd.concat(
[huis_prijsindex, stock_relative], axis=1, keys=["huis", "aandelen"]
)
return groei
def plot_aandelen(groei: pd.DataFrame) -> None:
fig, ax = plt.subplots(figsize=(7, 7))
groei.aandelen.plot(
ax=ax,
xlabel="Datum",
ylabel="S&P500 prijs stijging/daling per jaar (%)",
title="S&P500 index vs. tijd, bron: Yahoo! Finance",
color="k",
)
fill_area(groei.aandelen, ax)
plt.show()
def load_huizen_prijsindex_per_regio():
# Gedownload van https://opendata.cbs.nl/statline/#/CBS/nl/dataset/83913NED/table?ts=1617045165965
# Col: "Prijsindex bestaande koopwoningen Ontwikkeling t.o.v. een jaar eerder"
# met alle kwartaal data sinds 1996.
df = pd.read_csv("huizen_prijsindex_per_regio.csv")
df.Perioden = pd.to_datetime(
df.Perioden.str.replace("e kwartaal", "").str.replace(" ", "-Q")
)
df.set_index("Perioden", inplace=True)
for col in df.columns:
df[col] = df[col].str.replace(",", ".").astype(float)
df = df.resample("D").interpolate()
df = df[df.index.day == 1]
return df
def plot_huizenprijzen(groei: pd.DataFrame) -> None:
fig, ax = plt.subplots(figsize=(7, 7))
groei.huis.plot(
ax=ax,
legend=False,
xlabel="Datum",
ylabel="Huizenprijs stijging/daling per jaar (%)",
title="Huizenprijs verschil vs. tijd, bron: CBS",
figsize=(8, 8),
color="k",
)
fill_area(groei.huis, ax)
plt.show()
def plot_aandelen_en_huis(groei: pd.DataFrame) -> None:
fig, ax = plt.subplots(figsize=(8, 8))
groei.aandelen[groei.huis.index].plot(ax=ax, label="Aandelen", legend=True)
groei.huis.plot(ax=ax, label="Huizenprijs", legend=True)
ax.set_title("Huizenprijs en aandelenprijs stijging/daling per jaar in %")
ax.set_xlabel("Datum")
ax.set_ylabel("Prijs stijging/daling per jaar (%)")
fill_area(groei.aandelen, ax, alpha=0.3)
fill_area(groei.huis, ax, alpha=0.3)
plt.show()
def vergelijkings_tabel(groei: pd.DataFrame):
example_periods = [
dict(van="2014-Q2", tot="2020-Q4", notities="de recente 'goede' jaren"),
dict(
van="2009-Q2", tot="2014-Q1", notities="slechtste jaren na de 2008 crisis"
),
dict(van="2009-Q2", tot="2020-Q4", notities="van 2008 crisis tot en met nu"),
dict(
van="1996-Q1", tot="2020-Q4", notities="alle data sinds 1996 tot en met nu"
),
]
for dct in example_periods:
mean = lambda x: x[(x.index >= dct["van"]) & (x.index <= dct["tot"])].mean()
dct["huis"] = f"{mean(groei.huis):.2f}%"
dct["aandelen"] = f"{mean(groei.aandelen):.2f}%"
winner = "huis" if mean(groei.huis) > mean(groei.aandelen) else "aandelen"
dct[winner] += " 🏆"
dct["verschil (🏠 - 📈)"] = f"{mean(groei.huis) - mean(groei.aandelen):.2f}%"
dt = (pd.to_datetime(dct["tot"]) - pd.to_datetime(dct["van"])).total_seconds()
dct["lengte periode"] = f"{round(dt / 86400 / 365)} jaar"
table = pd.DataFrame(example_periods)[
[
"van",
"tot",
"lengte periode",
"huis",
"aandelen",
"verschil (🏠 - 📈)",
"notities",
]
]
return table
def fill_area(x: pd.Series, ax, alpha: float = 1.0) -> None:
ax.fill_between(
x.index,
x.values,
where=x.values > 0,
color="green",
alpha=alpha,
zorder=-1,
)
ax.fill_between(
x.index,
x.values,
where=x.values < 0,
color="red",
alpha=alpha,
zorder=-1,
)
ax.hlines(0, x.index.min(), x.index.max(), ls="--", color="k")
def maandelijke_groei(
date: pd.Timestamp, groei: pd.DataFrame, which: Literal["huis", "aandelen"] = "huis"
) -> float:
pct = groei[which][groei.index == date].iloc[0] / 100
return (1 + pct) ** (1 / 12)
def bepaal_woz(huidige_prijs: float, date: pd.Timestamp, groei: pd.DataFrame):
"""WOZ waarde is bepaald aan de hand van de prijs van vorig jaar."""
vorig_jaar = date.year - 1
dates = groei.index[groei.index.year == vorig_jaar]
prijs = huidige_prijs
for _date in dates[::-1]:
# We rekenen terug naar de prijs van vorig jaar
prijs /= maandelijke_groei(_date, groei, "huis")
return prijs
def aantal_jaar(dates: pd.DatetimeIndex):
dt = dates.max() - dates.min()
return dt.total_seconds() / 86400 / 365.25
def maandelijks_onderhoud(huis_waarde: float, onderhoud_pct: float = 2):
return huis_waarde * onderhoud_pct / 100 / 12
def vermogensbelasting(
vermogen: float, schulden: float = 0, met_fiscaal_partner: bool = True
):
"""Vermogensbelasting vanaf 2021.
https://www.rijksoverheid.nl/onderwerpen/belastingplan/belastingwijzigingen-voor-ons-allemaal/box-3
"""
heffingvrij = 100_000 if met_fiscaal_partner else 50_000
vermogen -= heffingvrij
vermogen -= schulden
if vermogen < 0:
return 0
# De rest is in box 3
schijf_1 = 100_000 - 50_000
belastbaar_1 = min(vermogen, schijf_1)
vermogen -= belastbaar_1
inkomen_1 = belastbaar_1 * 1.90 / 100
schijf_2 = 1_000_000 - 100_000
belastbaar_2 = min(vermogen, schijf_2)
vermogen -= belastbaar_2
inkomen_2 = belastbaar_2 * 4.50 / 100
schijf_3 = float("inf")
belastbaar_3 = min(vermogen, schijf_3)
vermogen -= belastbaar_3
inkomen_3 = belastbaar_3 * 5.69 / 100
inkomen = inkomen_1 + inkomen_2 + inkomen_3
return inkomen * 31 / 100
def koop_huis_of_beleg(
aankoop_datum: Union[str, pd.Timestamp],
jaar_tot_verkoop: Number,
geleend: Number,
groei: pd.DataFrame,
huur: Number = 1000,
hypotheekrente: Number = 2.04,
hyptotheek_looptijd: int = 30 * 12,
jaarinkomen: Number = 90_000,
schulden: Number = 20_000,
onderhoud_pct: Number = 1,
met_fiscaal_partner: bool = True,
verbose: bool = True,
):
dates = groei.index[groei.index >= aankoop_datum][
: round(jaar_tot_verkoop * 12) + 1
]
if len(dates) < jaar_tot_verkoop * 12:
raise ValueError(
f"Een duur van {jaar_tot_verkoop} jaar is niet mogelijk als "
f"we starten op {aankoop_datum}. "
f"Een duur van {aantal_jaar(dates):.2f} is mogelijk."
)
persoon = maandlasten.Persoon(jaarinkomen)
onderhoud = partial(maandelijks_onderhoud, onderhoud_pct=onderhoud_pct)
hypotheek = Mortgage(hypotheekrente / 100, hyptotheek_looptijd, geleend)
betaalschema = hypotheek.monthly_payment_schedule()
rente_betaald: Dict[int, float] = defaultdict(float)
start_year = dates[0].year
betaald = 0
afgelost = 0
belegging = 0
huis_waarde = geleend
for date in dates:
huis_waarde *= maandelijke_groei(date, groei, "huis")
belegging *= maandelijke_groei(date, groei, "aandelen")
betaald += onderhoud(huis_waarde)
afbetaling, rente = next(betaalschema)
hypotheek_kosten = float(afbetaling) + float(rente)
rente_betaald[date.year] += float(rente)
betaald += hypotheek_kosten
belegging += hypotheek_kosten - huur
afgelost += float(afbetaling)
if date.month == 1 and date.year > start_year:
# Betaal vermogensbelasting over vorig jaar
belegging -= vermogensbelasting(belegging, schulden, met_fiscaal_partner)
# Krijg hypotheekrenteaftrek terug van vorig jaar!
woz_waarde = bepaal_woz(huis_waarde, date, groei)
hypotheek_aftrek = maandlasten.hypotheek_aftrek(
rente_betaald[date.year - 1], woz_waarde
)
persoon_met_aftrek = maandlasten.Persoon(persoon.bruto_jaarloon)
persoon_met_aftrek.aftrek = hypotheek_aftrek
teruggave = persoon_met_aftrek.netto_loon - persoon.netto_loon
betaald -= teruggave
af_te_lossen = geleend - afgelost
overdrachts_belasting = huis_waarde * 0.02
huis_winst = huis_waarde - af_te_lossen - betaald - overdrachts_belasting
if verbose:
winst_of_verlies = "winst" if huis_winst > 0 else "verlies"
print(
f"We hebben op {aankoop_datum} een huis van €{geleend/1000:.0f}k gekocht. "
f"Op {date.date()} (na {aantal_jaar(dates):.1f} jaar) hebben we €{betaald/1000:.0f}k betaald, "
f"€{afgelost/1000:.0f}k afgelost, een huiswaarde van €{huis_waarde/1000:.0f}k, "
f"en na een verkoop €{abs(huis_winst)/1000:.0f}k {winst_of_verlies}. "
f"Hadden we een huis gehuurd voor €{huur} per maand en belegd, dan hadden we €{belegging/1000:.0f}k. "
f"Dat is dus €{(belegging - huis_winst)/1000:.0f}k verschil."
)
return dict(
aankoop_datum=aankoop_datum,
verkoop_datum=dates[-1],
aantal_jaar=aantal_jaar(dates),
betaald=betaald,
afgelost=afgelost,
af_te_lossen=af_te_lossen,
huis_waarde=huis_waarde,
huis_winst=huis_winst,
belegging=belegging,
)
def run_monte_carlo(groei: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame:
start_jaar = groei.index.year.min() + 1
eind_jaar = groei.index.year.max()
n_jaar = eind_jaar - start_jaar + 1
results = {}
iterator = list(
product(groei.index[groei.index.year >= start_jaar], range(1, n_jaar))
)
def try_run_simulation(datum_jaar, parameters):
aankoop_datum, jaar_tot_verkoop = datum_jaar
try:
return koop_huis_of_beleg(
aankoop_datum,
jaar_tot_verkoop,
groei=groei,
verbose=False,
**parameters,
)
except ValueError:
# 'jaar' is niet mogelijk want we kunnen niet in de toekomst kijken
return
with get_reusable_executor() as executor:
results = list(
tqdm(
executor.map(
partial(try_run_simulation, parameters=parameters), iterator
),
"Monte Carlo simulatie",
total=len(iterator),
)
)
df = pd.DataFrame([r for r in results if r is not None])
df.aankoop_datum = pd.to_datetime(df.aankoop_datum)
df["verschil"] = (df.huis_winst - df.belegging) / 1000
df.aantal_jaar = df.aantal_jaar.round()
return df
def plot_result_scatter(df: pd.DataFrame) -> None:
fig, ax = plt.subplots()
df.plot.scatter(
ax=ax,
x="aankoop_datum",
y="aantal_jaar",
c="verschil",
s=100,
alpha=1,
norm=matplotlib.colors.TwoSlopeNorm(0),
cmap="seismic",
title="Kopen of huren?",
xlabel="Aankoop datum",
ylabel="verkopen na (jaar)",
figsize=(8, 8),
)
ax, cax = plt.gcf().get_axes()
cax.set_ylabel("verschil (x€1000)")
ax.text(
0.95,
0.95,
"rood is huis is beter\nblauw is belegging is beter",
horizontalalignment="right",
verticalalignment="top",
transform=ax.transAxes,
fontsize=14,
)
plt.show()
def plot_result_contour(df: pd.DataFrame) -> None:
ds = df.set_index(["aantal_jaar", "aankoop_datum"]).to_xarray()
fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(12, 8), sharex=True, sharey=True)
levels = 15
ds.verschil.plot.contourf(
ax=axs[0, 0],
norm=matplotlib.colors.TwoSlopeNorm(
0, vmin=ds.verschil.min(), vmax=ds.verschil.max()
),
add_colorbar=True,
levels=levels,
cbar_kwargs={"label": "Verschil (x€1000)"},
)
(ds.belegging / 1000).plot.contourf(
ax=axs[0, 1],
add_colorbar=True,
levels=levels,
cbar_kwargs={"label": "Waarde belegging (x€1000)"},
)
(ds.huis_winst / 1000).plot.contourf(
ax=axs[1, 0],
add_colorbar=True,
levels=levels,
norm=matplotlib.colors.TwoSlopeNorm(
0, vmin=ds.huis_winst.min() / 1000, vmax=ds.huis_winst.max() / 1000
),
cbar_kwargs={"label": "Winst vrkp huis (x€1000)"},
)
(ds.huis_waarde / 1000).plot.contourf(
ax=axs[1, 1],
add_colorbar=True,
cbar_kwargs={"label": "Huis waarde (x€1000)"},
cmap="magma",
levels=levels,
)
axs[0, 0].text(
0.95,
0.95,
"rood is huis is beter\nblauw is belegging is beter",
horizontalalignment="right",
verticalalignment="top",
transform=axs[0, 0].transAxes,
fontsize=12,
)
axs[1, 0].set_xlabel("Aankoop datum")
axs[1, 1].set_xlabel("Aankoop datum")
axs[0, 0].set_ylabel("Verkoop na (jaar)")
axs[1, 0].set_ylabel("Verkoop na (jaar)")
axs[0, 0].set_xlabel("")
axs[0, 1].set_xlabel("")
axs[0, 1].set_ylabel("")
axs[1, 1].set_ylabel("")
plt.show()
def plot_result_lines(df: pd.DataFrame) -> None:
jaren = df.aantal_jaar.unique()[1::2]
cmap = matplotlib.cm.get_cmap("tab20", len(jaren))
color_map = dict(zip(sorted(jaren), cmap.colors))
fig, ax = plt.subplots(figsize=(8, 8))
for jaar in jaren:
df[df.aantal_jaar == jaar].plot(
x="aankoop_datum", y="verschil", ax=ax, color=color_map[jaar], legend=False
)
cbar = fig.colorbar(
matplotlib.cm.ScalarMappable(cmap=cmap),
ax=ax,
)
cbar.set_ticks(np.linspace(0, 1, len(jaren)))
cbar.set_ticklabels([int(j) for j in color_map.keys()])
cbar.set_label("Verkoop na (jaar)")
ax.hlines(
0, df.aankoop_datum.min(), df.aankoop_datum.max(), ls="--", color="k", zorder=-1
)
ax.set_xlabel("Aankoop datum")
ax.set_ylabel("Winst kopen huis t.o.v. beleggen")
ax.set_title("Winst kopen huis t.o.v. beleggen")
plt.show()
def hyptotheek_van_huur(
huur: Number = 1000,
hypotheekrente: Number = 2.04,
hyptotheek_looptijd: int = 360,
onderhoud_pct: Number = 1,
) -> float:
def hyptotheek_kosten(huis_prijs):
hyptotheek_maandelijks = Mortgage(
hypotheekrente / 100, hyptotheek_looptijd, dollar(float(huis_prijs))
).monthly_payment()
onderhoud = onderhoud_pct / 100 * huis_prijs / 12
kosten = float(hyptotheek_maandelijks) + onderhoud
return kosten
res = scipy.optimize.minimize(
lambda huis_prijs: abs(hyptotheek_kosten(huis_prijs) - huur),
x0=100_000,
method="Nelder-Mead",
tol=1e-2,
)
return round(float(res.x), 2)
def hyptotheek_maandlasten_df() -> pd.DataFrame:
bedragen = list(range(400, 2000, 100))
hyptoheek_hoogstes = [
hyptotheek_van_huur(
huur=huur,
hypotheekrente=2.04,
hyptotheek_looptijd=360,
onderhoud_pct=1,
)
for huur in bedragen
]
hyptoheek_hoogstes = (np.array(hyptoheek_hoogstes) / 1000).round(1)
df = pd.DataFrame([bedragen, hyptoheek_hoogstes]).T
df.columns = ["maandlasten (€)", "hypotheek (x€1000)"]
return df
def analyseer_data(df: pd.DataFrame) -> None:
pct_blauw = 100 * (df.verschil < 0).sum() / len(df.verschil)
print(
f"In {pct_blauw:.1f}% van alle gevallen is het beter om aandelen "
f"te kopen en in {100-pct_rood:.1f}% is het beter om een huis te kopen."
)
mean_beleggen = df.belegging[df.verschil < 0].mean() / 1000
mean_huis = df.huis_winst[df.verschil > 0].mean() / 1000
print(
f"In het geval dat aandelen beter waren, dan is de verwachte winst €{mean_beleggen:.1f}k."
)
print(f"Als een huis kopen beter was, dan is de verwachte winst €{mean_huis:.1f}k.")
| 2.75 | 3 |
P5-Intro Machine Learning/exercises/text_learning/vectorize_text.py | lucasosouza/udacity-data-analysis | 0 | 12795391 | <reponame>lucasosouza/udacity-data-analysis
#!/usr/bin/python
import os
import pickle
import re
import sys
sys.path.append( "../tools/" )
from parse_out_email_text import parseOutText
"""
Starter code to process the emails from Sara and Chris to extract
the features and get the documents ready for classification.
The list of all the emails from Sara are in the from_sara list
likewise for emails from Chris (from_chris)
The actual documents are in the Enron email dataset, which
you downloaded/unpacked in Part 0 of the first mini-project. If you have
not obtained the Enron email corpus, run startup.py in the tools folder.
The data is stored in lists and packed away in pickle files at the end.
"""
from_sara = open("from_sara.txt", "r")
from_chris = open("from_chris.txt", "r")
from_data = []
word_data = []
### temp_counter is a way to speed up the development--there are
### thousands of emails from Sara and Chris, so running over all of them
### can take a long time
### temp_counter helps you only look at the first 200 emails in the list so you
### can iterate your modifications quicker
#temp_counter = 0
for name, from_person in [("sara", from_sara), ("chris", from_chris)]:
for path in from_person:
### only look at first 200 emails when developing
### once everything is working, remove this line to run over full dataset
#temp_counter += 1
#if temp_counter < 200:
try:
path = os.path.join('..', path[:-1])
print name, ': ', path
email = open(path, "r")
### use parseOutText to extract the text from the opened email
words = parseOutText(email)
### use str.replace() to remove any instances of the words
### ["sara", "shackleton", "chris", "germani"]
patt = 'sara|shackleton|chris|germani|sshacklensf|cgermannsf'
words = re.sub(patt,'',words)
#words is a string, not an iterator. wrong usage of plural here has misleaded the programmer.
### append the text to word_data
word_data.append(words)
### append a 0 to from_data if email is from Sara, and 1 if email is from Chris
from_data.append(1) if name == 'chris' else from_data.append(0)
email.close()
except:
pass
print "emails processed"
print word_data[152]
from_sara.close()
from_chris.close()
pickle.dump( word_data, open("your_word_data.pkl", "w") )
pickle.dump( from_data, open("your_email_authors.pkl", "w") )
### in Part 4, do TfIdf vectorization here
"""
#Remove english stopwords
from nltk.corpus import stopwords
sw = stopwords.words('english')
def remove_stopwords(text):
text = text.split(' ')
text = [word for word in text if word.lower() not in sw]
return ' '.join(text)
word_data2 = map(remove_stopwords, word_data)
"""
# Transform the word_data into a tf-idf matrix using the sklearn TfIdf transformation.
from sklearn.feature_extraction import text
word_matrix = text.TfidfVectorizer(stop_words='english')
word_matrix.fit(word_data)
# You can access the mapping between words and feature numbers using get_feature_names(), which returns a list of all the words in the vocabulary. How many different words are there?
print len(word_matrix.get_feature_names())
#import pdb;pdb.set_trace()
| 2.796875 | 3 |
authentication/decorators.py | felix781/market-access-python-frontend | 1 | 12795392 | <reponame>felix781/market-access-python-frontend
def public_view(func):
"""
Decorator for public views that do not require authentication
"""
orig_func = func
orig_func._public_view = True
return func
| 1.96875 | 2 |
berts_of_a_feather/files_for_replication/process_test_results.py | tommccoy1/hans | 109 | 12795393 | import sys
prefix = sys.argv[1]
fi = open(prefix + "/" + "test_results.tsv", "r")
fo = open(prefix + "/" + "preds.txt", "w")
fo.write("pairID,gold_label\n")
counter = 0
labels = ["contradiction", "entailment", "neutral"]
for line in fi:
parts = [float(x) for x in line.strip().split("\t")]
max_ind = 0
max_val = parts[0]
for ind, part in enumerate(parts):
if part > max_val:
max_val = part
max_ind = ind
fo.write("ex" + str(counter) + "," + labels[max_ind] + "\n")
counter += 1
| 2.546875 | 3 |
rosters/tests/test_views.py | Drazerr/roster-wizard | 0 | 12795394 | from django.http import HttpRequest
from django.test import SimpleTestCase
from django.urls import reverse
from .. import views
class HomePageTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "home.html")
def test_home_page_contains_correct_html(self):
response = self.client.get("/")
self.assertContains(
response, '<h1 class="display-4">Roster Wizard</h1>'
)
def test_home_page_does_not_contain_incorrect_html(self):
response = self.client.get("/")
self.assertNotContains(
response, "Hi there! I should not be on the page."
)
| 2.53125 | 3 |
SROMPy/target/UniformRandomVariable.py | datree-demo/SROMPy | 0 | 12795395 | <gh_stars>0
# Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Class for defining a uniform random variable
'''
import numpy as np
from scipy.stats import uniform as scipyuniform
from SROMPy.target.RandomVariable import RandomVariable
class UniformRandomVariable(RandomVariable):
'''
Class for defining a uniform random variable
'''
def __init__(self, min_val=0., max_val=0., max_moment=10):
'''
Initialize the uniform (gaussian) random variable with provided
minimum/maximum values. Implementation wraps scipy.stats.uniform to get
statistics/samples. Caches moments up to max_moment for speedup.
'''
if min_val >= max_val:
raise ValueError("Minimum value must be less than maximum value")
self._minimum_value = min_val
self._range_size = max_val - min_val
#set dimension (scalar), min/max to equal mean +/- 4stds
self._dim = 1
self._mins = [min_val]
self._maxs = [max_val]
#cache moments
self.generate_moments(max_moment)
self._max_moment = max_moment
def get_dim(self):
return self._dim
def get_variance(self):
'''
Returns variance of uniform random variable
'''
return self._std**2.0
def compute_moments(self, max_order):
'''
Returns moments up to order 'max_order' in numpy array.
'''
#TODO - calculate moments above max_moment on the fly & append to stored
if max_order <= self._max_moment:
moments = self._moments[:max_order]
else:
raise NotImplementedError("Moment above max_moment not handled yet")
return moments
def compute_CDF(self, x_grid):
'''
Returns numpy array of uniform CDF values at the points contained
in x_grid
'''
return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size)
def compute_inv_CDF(self, x_grid):
'''
Returns np array of inverse uniform CDF values at pts in x_grid
'''
return scipyuniform.ppf(x_grid, self._minimum_value, self._range_size)
def compute_pdf(self, x_grid):
'''
Returns numpy array of uniform pdf values at the points contained
in x_grid
'''
return scipyuniform.pdf(x_grid, self._minimum_value, self._range_size)
def draw_random_sample(self, sample_size):
'''
Draws random samples from the uniform random variable. Returns numpy
array of length 'sample_size' containing these samples
'''
#Use scipy uniform rv to return shifted/scaled samples automatically
return scipyuniform.rvs(self._minimum_value, self._range_size,
sample_size)
def generate_moments(self, max_moment):
'''
Calculate & store moments to retrieve more efficiently later
'''
self._moments = np.zeros((max_moment, 1))
#Rely on scipy.stats to return non-central moment
for i in range(max_moment):
self._moments[i] = scipyuniform.moment(i+1, self._minimum_value,
self._range_size)
| 2.28125 | 2 |
statsmodels/genmod/tests/test_gee.py | saedsaleh/statsmodels | 0 | 12795396 | """
Test functions for GEE
External comparisons are to R. The statmodels GEE implementation
should generally agree with the R GEE implementation for the
independence and exchangeable correlation structures. For other
correlation structures, the details of the correlation estimation
differ among implementations and the results will not agree exactly.
"""
from __future__ import print_function
from statsmodels.compat import lrange
import numpy as np
import os
from numpy.testing import assert_almost_equal
from statsmodels.genmod.generalized_estimating_equations import (GEE,
GEEMargins, Multinomial)
from statsmodels.genmod.families import Gaussian, Binomial, Poisson
from statsmodels.genmod.dependence_structures import (Exchangeable,
Independence, GlobalOddsRatio, Autoregressive, Nested)
import pandas as pd
import statsmodels.formula.api as sm
def load_data(fname, icept=True):
"""
Load a data set from the results directory. The data set should
be a CSV file with the following format:
Column 0: Group indicator
Column 1: endog variable
Columns 2-end: exog variables
If `icept` is True, an intercept is prepended to the exog
variables.
"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=",")
group = Z[:,0]
endog = Z[:,1]
exog = Z[:,2:]
if icept:
exog = np.concatenate((np.ones((exog.shape[0],1)), exog),
axis=1)
return endog,exog,group
class TestGEE(object):
def test_margins(self):
n = 300
exog = np.random.normal(size=(n, 4))
exog[:,0] = 1
exog[:,1] = 1*(exog[:,2] < 0)
group = np.kron(np.arange(n/4), np.ones(4))
time = np.zeros((n, 1))
beta = np.r_[0, 1, -1, 0.5]
lpr = np.dot(exog, beta)
prob = 1 / (1 + np.exp(-lpr))
endog = 1*(np.random.uniform(size=n) < prob)
fa = Binomial()
ex = Exchangeable()
md = GEE(endog, exog, group, time, fa, ex)
mdf = md.fit()
marg = GEEMargins(mdf, ())
marg.summary()
# This is in the release announcement for version 0.6.
def test_poisson_epil(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(cur_dir, "results", "epil.csv")
data = pd.read_csv(fname)
fam = Poisson()
ind = Independence()
md1 = GEE.from_formula("y ~ age + trt + base", data,
groups=data["subject"], cov_struct=ind,
family=fam)
mdf1 = md1.fit()
# Coefficients should agree with GLM
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
md2 = GLM.from_formula("y ~ age + trt + base", data,
family=families.Poisson())
mdf2 = md2.fit(scale="X2")
assert_almost_equal(mdf1.params, mdf2.params, decimal=6)
assert_almost_equal(mdf1.scale, mdf2.scale, decimal=6)
# TODO: why does this test fail?
def t_est_missing(self):
Y = np.random.normal(size=100)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.kron(lrange(20), np.ones(5))
Y[0] = np.nan
Y[5:7] = np.nan
X2[10:12] = np.nan
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3,
"groups": groups})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D["groups"], missing='drop')
mdf = md.fit()
assert(len(md.endog) == 95)
assert(md.exog.shape) == (95,4)
def test_default_time(self):
"""
Check that the time defaults work correctly.
"""
endog,exog,group = load_data("gee_logistic_1.csv")
# Time values for the autoregressive model
T = np.zeros(len(endog))
idx = set(group)
for ii in idx:
jj = np.flatnonzero(group == ii)
T[jj] = lrange(len(jj))
family = Binomial()
va = Autoregressive()
md1 = GEE(endog, exog, group, family=family, cov_struct=va)
mdf1 = md1.fit()
md2 = GEE(endog, exog, group, time=T, family=family,
cov_struct=va)
mdf2 = md2.fit()
assert_almost_equal(mdf1.params, mdf2.params, decimal=6)
assert_almost_equal(mdf1.standard_errors(),
mdf2.standard_errors(), decimal=6)
def test_logistic(self):
"""
R code for comparing results:
library(gee)
Z = read.csv("results/gee_logistic_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
mi = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="independence")
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="exchangeable")
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
ma = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
corstr="AR-M")
sma = summary(ma)
u = coefficients(sma)
cfa = paste(u[,1], collapse=",")
sea = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s],[%s]]", cfi, cfe, cfa)
sprintf("se = [[%s],[%s],[%s]]", sei, see, sea)
"""
endog,exog,group = load_data("gee_logistic_1.csv")
# Time values for the autoregressive model
T = np.zeros(len(endog))
idx = set(group)
for ii in idx:
jj = np.flatnonzero(group == ii)
T[jj] = lrange(len(jj))
family = Binomial()
ve = Exchangeable()
vi = Independence()
va = Autoregressive()
# From R gee
cf = [[0.0167272965285882,1.13038654425893,
-1.86896345082962,1.09397608331333],
[0.0178982283915449,1.13118798191788,
-1.86133518416017,1.08944256230299],
[0.0109621937947958,1.13226505028438,
-1.88278757333046,1.09954623769449]]
se = [[0.127291720283049,0.166725808326067,
0.192430061340865,0.173141068839597],
[0.127045031730155,0.165470678232842,
0.192052750030501,0.173174779369249],
[0.127240302296444,0.170554083928117,
0.191045527104503,0.169776150974586]]
for j,v in enumerate((vi,ve,va)):
md = GEE(endog, exog, group, T, family, v)
mdf = md.fit()
if id(v) != id(va):
assert_almost_equal(mdf.params, cf[j], decimal=6)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Test with formulas
D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),
axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=6)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Check for run-time exceptions in summary
# print(mdf.summary())
def test_autoregressive(self):
dep_params_true = [0, 0.589208623896, 0.559823804948]
params_true = [[1.08043787, 1.12709319, 0.90133927],
[0.9613677, 1.05826987, 0.90832055],
[1.05370439, 0.96084864, 0.93923374]]
np.random.seed(342837482)
num_group = 100
ar_param = 0.5
k = 3
ga = Gaussian()
for gsize in 1,2,3:
ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:]
ix = np.abs(ix)
cmat = ar_param ** ix
cmat_r = np.linalg.cholesky(cmat)
endog = []
exog = []
groups = []
for i in range(num_group):
x = np.random.normal(size=(gsize,k))
exog.append(x)
expval = x.sum(1)
errors = np.dot(cmat_r, np.random.normal(size=gsize))
endog.append(expval + errors)
groups.append(i*np.ones(gsize))
endog = np.concatenate(endog)
groups = np.concatenate(groups)
exog = np.concatenate(exog, axis=0)
ar = Autoregressive()
md = GEE(endog, exog, groups, family=ga, cov_struct = ar)
mdf = md.fit()
assert_almost_equal(ar.dep_params, dep_params_true[gsize-1])
assert_almost_equal(mdf.params, params_true[gsize-1])
def test_post_estimation(self):
family = Gaussian()
endog,exog,group = load_data("gee_linear_1.csv")
ve = Exchangeable()
md = GEE(endog, exog, group, None, family, ve)
mdf = md.fit()
assert_almost_equal(np.dot(exog, mdf.params),
mdf.fittedvalues)
assert_almost_equal(endog - np.dot(exog, mdf.params),
mdf.resid)
def test_linear(self):
"""
library(gee)
Z = read.csv("results/gee_linear_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
mi = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,
corstr="independence", tol=1e-8, maxit=100)
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,
corstr="exchangeable", tol=1e-8, maxit=100)
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s]]", cfi, cfe)
sprintf("se = [[%s],[%s]]", sei, see)
"""
family = Gaussian()
endog,exog,group = load_data("gee_linear_1.csv")
vi = Independence()
ve = Exchangeable()
# From R gee
cf = [[-0.01850226507491,0.81436304278962,
-1.56167635393184,0.794239361055003],
[-0.0182920577154767,0.814898414022467,
-1.56194040106201,0.793499517527478]]
se = [[0.0440733554189401,0.0479993639119261,
0.0496045952071308,0.0479467597161284],
[0.0440369906460754,0.0480069787567662,
0.049519758758187,0.0479760443027526]]
for j,v in enumerate((vi, ve)):
md = GEE(endog, exog, group, None, family, v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=10)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=10)
# Test with formulas
D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),
axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=10)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=10)
def test_linear_constrained(self):
family = Gaussian()
exog = np.random.normal(size=(300,4))
exog[:,0] = 1
endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\
np.random.normal(size=300)
group = np.kron(np.arange(100), np.r_[1,1,1])
vi = Independence()
ve = Exchangeable()
L = np.r_[[[0, 0, 0, 1]]]
R = np.r_[0,]
for j,v in enumerate((vi,ve)):
md = GEE(endog, exog, group, None, family, v,
constraint=(L,R))
mdf = md.fit()
assert_almost_equal(mdf.params[3], 0, decimal=10)
def test_nested_linear(self):
family = Gaussian()
endog,exog,group = load_data("gee_nested_linear_1.csv")
group_n = []
for i in range(endog.shape[0]//10):
group_n.extend([0,]*5)
group_n.extend([1,]*5)
group_n = np.array(group_n)[:,None]
dp = Independence()
md = GEE(endog, exog, group, None, family, dp)
mdf1 = md.fit()
# From statsmodels.GEE (not an independent test)
cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106]
se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989]
assert_almost_equal(mdf1.params, cf, decimal=6)
assert_almost_equal(mdf1.standard_errors(), se,
decimal=6)
ne = Nested()
md = GEE(endog, exog, group, None, family, ne,
dep_data=group_n)
mdf2 = md.fit(start_params=mdf1.params)
# From statsmodels.GEE (not an independent test)
cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969]
se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991]
assert_almost_equal(mdf2.params, cf, decimal=6)
assert_almost_equal(mdf2.standard_errors(), se,
decimal=6)
def test_ordinal(self):
family = Binomial()
endog, exog, groups = load_data("gee_ordinal_1.csv",
icept=False)
v = GlobalOddsRatio("ordinal")
md = GEE(endog, exog, groups, None, family, v)
md.setup_ordinal()
mdf = md.fit()
cf = np.r_[1.09238131, 0.02148193, -0.39879146, -0.01855666,
0.02983409, 1.18123172, 0.01845318, -1.10233886]
se = np.r_[0.10878752, 0.10326078, 0.11171241, 0.05488705,
0.05995019, 0.0916574, 0.05951445, 0.08539281]
assert_almost_equal(mdf.params, cf, decimal=5)
assert_almost_equal(mdf.bse, se, decimal=5)
def test_nominal(self):
family = Multinomial(3)
endog, exog, groups = load_data("gee_nominal_1.csv",
icept=False)
# Test with independence correlation
v = Independence()
md = GEE(endog, exog, groups, None, family, v)
md.setup_nominal()
mdf1 = md.fit()
# From statsmodels.GEE (not an independent test)
cf1 = np.r_[0.44944752, 0.45569985, -0.92007064, -0.46766728]
se1 = np.r_[0.09801821, 0.07718842, 0.13229421, 0.08544553]
assert_almost_equal(mdf1.params, cf1, decimal=5)
assert_almost_equal(mdf1.standard_errors(), se1, decimal=5)
# Test with global odds ratio dependence
v = GlobalOddsRatio("nominal")
md = GEE(endog, exog, groups, None, family, v)
md.setup_nominal()
mdf2 = md.fit(start_params=mdf1.params)
# From statsmodels.GEE (not an independent test)
cf2 = np.r_[0.45397549, 0.42278345, -0.91997131, -0.50115943]
se2 = np.r_[0.09646057, 0.07405713, 0.1324629 , 0.09025019]
assert_almost_equal(mdf2.params, cf2, decimal=5)
assert_almost_equal(mdf2.standard_errors(), se2, decimal=5)
def test_poisson(self):
"""
library(gee)
Z = read.csv("results/gee_poisson_1.csv", header=FALSE)
Y = Z[,2]
Id = Z[,1]
X1 = Z[,3]
X2 = Z[,4]
X3 = Z[,5]
X4 = Z[,6]
X5 = Z[,7]
mi = gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,
corstr="independence", scale.fix=TRUE)
smi = summary(mi)
u = coefficients(smi)
cfi = paste(u[,1], collapse=",")
sei = paste(u[,4], collapse=",")
me = gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,
corstr="exchangeable", scale.fix=TRUE)
sme = summary(me)
u = coefficients(sme)
cfe = paste(u[,1], collapse=",")
see = paste(u[,4], collapse=",")
sprintf("cf = [[%s],[%s]]", cfi, cfe)
sprintf("se = [[%s],[%s]]", sei, see)
"""
family = Poisson()
endog,exog,group_n = load_data("gee_poisson_1.csv")
vi = Independence()
ve = Exchangeable()
# From R gee
cf = [[-0.0364450410793481,-0.0543209391301178,
0.0156642711741052,0.57628591338724,
-0.00465659951186211,-0.477093153099256],
[-0.0315615554826533,-0.0562589480840004,
0.0178419412298561,0.571512795340481,
-0.00363255566297332,-0.475971696727736]]
se = [[0.0611309237214186,0.0390680524493108,
0.0334234174505518,0.0366860768962715,
0.0304758505008105,0.0316348058881079],
[0.0610840153582275,0.0376887268649102,
0.0325168379415177,0.0369786751362213,
0.0296141014225009,0.0306115470200955]]
for j,v in enumerate((vi,ve)):
md = GEE(endog, exog, group_n, None, family, v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=5)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Test with formulas
D = np.concatenate((endog[:,None], group_n[:,None],
exog[:,1:]), axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3 + X4 + X5", D,
None, groups=D.loc[:,"Id"],
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=5)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# print(mdf.params)
def test_compare_OLS(self):
"""
Gaussian GEE with independence correlation should agree
exactly with OLS for parameter estimates and standard errors
derived from the naive covariance estimate.
"""
vs = Independence()
family = Gaussian()
Y = np.random.normal(size=100)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.kron(lrange(20), np.ones(5))
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None,
groups=groups, family=family,
cov_struct=vs)
mdf = md.fit()
ols = sm.ols("Y ~ X1 + X2 + X3", data=D).fit()
assert_almost_equal(ols.params.values, mdf.params, decimal=10)
se = mdf.standard_errors(covariance_type="naive")
assert_almost_equal(ols.bse, se, decimal=10)
naive_tvalues = mdf.params / \
np.sqrt(np.diag(mdf.naive_covariance))
assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10)
def test_compare_logit(self):
vs = Independence()
family = Binomial()
Y = 1*(np.random.normal(size=100) < 0)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.random.randint(0, 4, size=100)
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None, groups=groups,
family=family, cov_struct=vs).fit()
sml = sm.logit("Y ~ X1 + X2 + X3", data=D).fit(disp=False)
assert_almost_equal(sml.params.values, md.params, decimal=10)
def test_compare_poisson(self):
vs = Independence()
family = Poisson()
Y = np.ceil(-np.log(np.random.uniform(size=100)))
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.random.randint(0, 4, size=100)
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", D, None, groups=groups,
family=family, cov_struct=vs).fit()
sml = sm.poisson("Y ~ X1 + X2 + X3", data=D).fit(disp=False)
assert_almost_equal(sml.params.values, md.params, decimal=10)
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| 2.46875 | 2 |
tools/write_clinrecconv.py | s-mackay/combinato | 34 | 12795397 | <reponame>s-mackay/combinato
# -*- coding: utf-8 -*-
# JN 2014-10-21
# script creates a clinRecConv.py from ncs files
import os
import numpy as np
from combinato import NcsFile
from matplotlib.dates import date2num
if __name__ == "__main__":
if os.path.exists('clinRecConv.py'):
print('File exists, doing nothing')
else:
fid = NcsFile('CSC1.ncs')
d = fid.header['opened']
n = date2num(d)
ts = fid.read(0, 1, 'timestep')
np.save('clinRecConv', np.array((ts, d)))
| 2.5625 | 3 |
company/migrations/0020_auto_20210729_1526.py | uktrade/dnb-service | 4 | 12795398 | # Generated by Django 2.2.20 on 2021-07-29 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0019_auto_20210512_1114'),
]
operations = [
migrations.AlterField(
model_name='company',
name='address_area_abbrev_name',
field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'),
),
migrations.AlterField(
model_name='company',
name='registered_address_area_abbrev_name',
field=models.CharField(blank=True, max_length=255, verbose_name='State (Abbreviated)'),
),
]
| 1.554688 | 2 |
comparison_with_benchmark.py | KGkiotsalitis/bus-holding-model-under-capacity-limitations | 3 | 12795399 | <reponame>KGkiotsalitis/bus-holding-model-under-capacity-limitations<gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
import random
zeta=300
M_2=100000000000
M_1=100000000000000
a_n_plus_1s=2500
ta=1.5
tb=4
l_n_1=50
beta_n_1=10
c_n_plus_1=60
c_n=60
lambda_s=0.02
Hs=600
phi_n=62
t=1500
di_1_s=1000
k=1+tb*lambda_s
h=(1+tb*lambda_s)*tb
theta=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+(a_n_plus_1s-t)*lambda_s)*k*tb-t-Hs
x_analytic_solution=max(0,min(zeta,(c_n-phi_n)/lambda_s,((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2)))
'''
if t<di_1_s+Hs:
if 0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)<Hs:
x_analytic_solution=di_1_s+Hs-t
else:
x_analytic_solution=di_1_s+(0.5*((a_n_plus_1s+lambda_s*(a_n_plus_1s-t)*tb+beta_n_1*ta)-di_1_s)+Hs)*0.5-t
else:
x_analytic_solution=0
print(x_analytic_solution)
'''
n_analytic_v_1=max(0,phi_n+x_analytic_solution*lambda_s-c_n)
n_analytic_v_2=max(0,l_n_1-beta_n_1-c_n_plus_1+(beta_n_1*ta*lambda_s+max(0,n_analytic_v_1)+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*k)
d_n_plus_1_s=a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb
Bigterm = ((lambda_s*h+1)*theta-(t-di_1_s-Hs))/(1+(lambda_s*h+1)**2)
busload_n_plus_1=l_n_1-beta_n_1+\
(beta_n_1*ta*lambda_s+max(0,phi_n+x_analytic_solution*lambda_s-c_n)+(a_n_plus_1s-(t+x_analytic_solution))*lambda_s)*(1+tb*lambda_s)
print(x_analytic_solution,n_analytic_v_1,n_analytic_v_2)
#print(d_n_plus_1_s)
print((t+x_analytic_solution-di_1_s-Hs)**2+\
(d_n_plus_1_s-t-x_analytic_solution-Hs)**2)
print((t+x_analytic_solution-di_1_s-Hs)**2+(a_n_plus_1s+beta_n_1*ta+(beta_n_1*ta*lambda_s+n_analytic_v_1+(a_n_plus_1s-t-x_analytic_solution)*lambda_s)*(1+tb*lambda_s)*tb-n_analytic_v_2*tb-t-x_analytic_solution-Hs)**2+M_1*n_analytic_v_1+M_2*n_analytic_v_2)
print('busload',l_n_1-beta_n_1,(a_n_plus_1s-(t+x_analytic_solution))*lambda_s*(1+tb*lambda_s),busload_n_plus_1,phi_n+x_analytic_solution*lambda_s)
print(d_n_plus_1_s-t-x_analytic_solution,t+x_analytic_solution-di_1_s)
| 2.09375 | 2 |
HarvardX/CS50W/flask/variables0/application.py | mohammedelzanaty/myRoad2BeFullStack | 2 | 12795400 | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
headline = "Hello World"
return render_template("index.html", headline=headline)
@app.route("/<string:name>")
def say_name(name):
return render_template("index.html", name=name)
if __name__ == "__main__":
app.run(debug=True)
| 2.8125 | 3 |
src/reddit_bot.py | ooknosi/finite_dino_bot | 0 | 12795401 | #!/usr/bin/env python3
"""Reddit Bot Common Routines
Contains common Reddit bot functions such as keyword comment retrieval,
processed comment caching, and comment posting.
Allows bot authors to concentrate on writing their custom bot functions.
"""
from collections import deque
from os import mkdir
import re
import signal
import sys
from time import sleep
import praw
from config import (
CACHE_FILE,
CACHE_SIZE,
KEYWORD,
RETRIEVAL_LIMIT,
SITE_NAME,
SUBREDDITS,
)
class RedditBot:
"""Superclass for Reddit bots which adds common bot routines.
Parameters
----------
site_name : str, optional
Initializes praw under site_name within praw.ini.
Defaults to config.SITE_NAME.
See: https://praw.readthedocs.io/en/latest/getting_started
/configuration/prawini.html#choosing-a-site
keyword : str, optional
Comment trigger word.
Defaults to config.KEYWORD.
retrieval_limit : int, optional
Maximum number of comments to retrieve at a time.
Defaults to config.RETRIEVAL_LIMIT.
See: https://praw.readthedocs.io/en/latest/code_overview/models
/subreddit.html#praw.models.Subreddit.comments
subreddits : str, optional
Subreddits to retrieve comments from.
Defaults to config.SUBREDDITS.
See: https://praw.readthedocs.io/en/latest/code_overview/models
/subreddit.html#subreddit
"""
def __init__(self,
site_name=SITE_NAME,
keyword=KEYWORD,
retrieval_limit=RETRIEVAL_LIMIT,
subreddits=SUBREDDITS,
):
print("Initializing bot...")
self.keyword = re.compile(keyword+r' ([ \w]+)', re.I)
self.reddit = None
self.retrieval_limit = retrieval_limit
self.site_name = site_name
self.subreddits = subreddits
self.username = site_name
self.processed_comments = self.read_cache(CACHE_FILE)
signal.signal(signal.SIGINT, self.bot_exit)
def authenticate(self, max_attempts=-1, seconds_between_attempts=60):
"""Authenticates SITE_NAME with Reddit.
Sets self.reddit and self.username on success.
Parameters
----------
max_attempts : int, optional
Maximum number of authentication attempts before failure.
Defaults to -1 (infinite attempts).
seconds_between_attempts : int, optional
Seconds to wait between authentication attempts.
Defaults to 60.
"""
attempt = 0
while attempt != max_attempts:
try:
print("Authenticating as {}...".format(self.site_name))
self.reddit = praw.Reddit(self.site_name)
self.username = self.reddit.user.me()
print("Successfully authenticated as {}".format(self.username))
return
except praw.exceptions.APIException as error:
print("Unable to authenticate:", error)
print("Retrying in {} "
"seconds".format(seconds_between_attempts))
sleep(seconds_between_attempts)
attempt += 1
raise RuntimeError('Failed to authenticate after {} '
'attempts'.format(max_attempts))
def retrieve_comments(self):
"""Retrieves comments from subreddits, filters for keyword trigger, and
excludes processed comments.
Returns
-------
generator
Dict of reddit.Comment and query.
"""
try:
print("Retrieving {} comments...".format(self.retrieval_limit))
comments = self.reddit.subreddit(self.subreddits).comments(
limit=self.retrieval_limit
)
for comment in comments:
if (comment.author != self.username
and comment not in self.processed_comments
#and not self.has_already_replied(comment)
#and not self.is_summon_chain(comment)
):
query = self.keyword.search(comment.body.lower())
if query:
self.processed_comments.append(comment.id)
yield {'comment': comment, 'query' : query.group(1)}
except praw.exceptions.APIException as error:
print("API Error:", error)
raise
except AttributeError as error:
print(error)
print("Unable to retrieve comments.")
raise
def submit_comment(self, target, comment):
"""Submit comment to target submission or comment.
Parameters
----------
target : reddit.submission object or reddit.comment object
Target Reddit submission or comment.
comment : str
Comment to post.
Returns
-------
object
reddit.comment of newly created comment.
"""
try:
if target.author != self.username:
print("Posting reply...")
return target.reply(comment)
except praw.exceptions.APIException as error:
print("API Error:", error)
raise
@staticmethod
def read_cache(file):
"""Opens and reads file, converting contents to \n separated list.
Creates cache file if does not exist.
Parameters
----------
file : str
Location of cache file.
Returns
-------
collections.deque
Contents of cache file, limited to config.CACHE_SIZE
"""
try:
print("Loading cache file into memory...")
with open(file, 'r') as data:
cache = data.read()
mem_cache = deque(cache.split('\n'), CACHE_SIZE)
print("Cache loaded.")
except FileNotFoundError:
print("Cache file not found.")
print("Creating cache directory...")
try:
path = ''
for subdirectory in file.split('/')[:-1]:
path += subdirectory + '/'
mkdir(path)
print("Cache directory created.")
except IOError as error:
print(error)
print("Unable to create cache file")
mem_cache = deque([], CACHE_SIZE)
return mem_cache
@staticmethod
def write_cache(file, mem_cache):
"""Writes list into file, converting list to \n separated contents.
Overwrites original cache file.
Creates cache file if does not exist.
Parameters
----------
file : str
Location of cache file.
mem_cache : list or deque
Items in memory cache
"""
try:
print("Saving memory into cache file...")
with open(file, 'w') as cache_file:
try:
cache_file.write(mem_cache.popleft())
for entry in mem_cache:
cache_file.write('\n'+entry)
# avoid adding \n to end of file so that we don't get empty
# entries in deque when next loaded
print("Cache saved")
except IndexError:
print("No items in cache")
except IOError as error:
print(error)
print("Unable to create cache file")
def bot_exit(self, *args, **kwargs):
"""Saves self.processed_comments into cache file before exiting."""
# pylint: disable=unused-argument
print("\nStopping bot...")
self.write_cache(CACHE_FILE, self.processed_comments)
print("Bot stopped")
sys.exit()
def is_summon_chain(self, target):
"""Checks if parent comment of target is from self.
Used to prevent infinite reply loop caused by another bot.
Parameters
----------
target : reddit.comment object
Target Reddit comment.
Returns
-------
bool
True if parent comment of target is from bot. False otherwise.
"""
return True if (
not target.is_root and target.parent().author == self.username
) else False
def has_already_replied(self, target):
"""Checks if target comment has already been replied by bot.
Used to prevent multiple replies to the same request.
Parameters
----------
target : reddit.comment object
Target Reddit comment.
Returns
-------
bool
True if parent comment of target is from bot. False otherwise.
"""
try:
# implement replace_more()?
target.refresh()
for reply in target.replies.list():
if reply.author == self.username:
print("Comment already processed.")
return True
print("Processing comment...")
return False
except praw.exceptions.APIException as error:
print("API Error:", error)
# Failsafe
return True
| 2.8125 | 3 |
autotweet/logger_factory.py | Kjwon15/autotweet | 5 | 12795402 | from __future__ import unicode_literals
import logging
root_logger = logging.getLogger('autotweet')
logging.basicConfig(
format='%(asctime)s {%(module)s:%(levelname)s}: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
def set_level(level):
root_logger.setLevel(level)
get_logger = root_logger.getChild
| 2.234375 | 2 |
temp-snn/snn/var_th.py | Tab-ct/Spiking-Neural-Network | 1 | 12795403 | ############################################## README #################################################
# This calculates threshold for an image depending upon its spiking activity.
########################################################################################################
import numpy as np
from snn.neuron import neuron
import random
from matplotlib import pyplot as plt
from snn.recep_field import rf
from snn.spike_train import encode
from snn.rl import rl
from snn.rl import update
from snn.reconstruct import reconst_weights
from snn.parameters import param as par
import os
def threshold(train):
tu = np.shape(train[0])[0]
thresh = 0
for i in range(tu):
simul_active = sum(train[:,i])
if simul_active>thresh:
thresh = simul_active
return (thresh/3)*par.scale
if __name__ == '__main__':
# img = cv2.imread("mnist1/" + str(1) + ".png", 0)
img = np.array(Image.open("mnist1/" + str(1) + ".png", 0))
print(img)
# pot = rf(img)
# train = np.array(encode(pot))
# print threshold(train) | 3.03125 | 3 |
configs/_base_/models/attnet_swin.py | 404479768/Swin-ATT | 1 | 12795404 | <reponame>404479768/Swin-ATT
norm_cfg = dict(type='SyncBN', requires_grad=True)
backbone_norm_cfg = dict(type='LN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=128,
patch_size=4,
window_size=12,
mlp_ratio=4,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
strides=(4, 2, 2, 2),
out_indices=(0, 1, 2, 3),
qkv_bias=True,
qk_scale=None,
patch_norm=True,
drop_rate=0.2,
attn_drop_rate=0.2,
drop_path_rate=0.2,
use_abs_pos_embed=False,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN', requires_grad=True)),
decode_head=dict(
type='AttHead',
in_channels=[128, 256, 512, 1024],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=128,
dropout_ratio=0.1,
num_classes=3,
norm_cfg=dict(type='SyncBN', requires_grad=True),
align_corners=False,
loss_decode=dict(type='LovaszLoss', reduction='none',
loss_weight=1.0)),
train_cfg=dict(),
test_cfg=dict(mode='whole')) | 1.414063 | 1 |
accounts/migrations/0002_auto_20201110_0811.py | Landgate/Staff-Calibration | 1 | 12795405 | # Generated by Django 3.1 on 2020-11-10 00:11
from __future__ import unicode_literals
from django.db import migrations, models
import csv
from datetime import datetime
def load_initial_data(apps, schema_editor):
Authority = apps.get_model("accounts", "Authority")
with open("assets/authority/authority_names.csv", 'r') as f:
reader = csv.reader(f)
header = next(reader)
authoritys = []
for row in reader:
authority = Authority.objects.create(authority_abbrev = row[0], authority_name = row[1])
#print(authority)
def reverse_func(apps, schema_editor):
Authority = apps.get_model("accounts", Authority)
Authority.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RunPython(load_initial_data, reverse_func),
]
| 2.203125 | 2 |
main_onnx.py | qzc438/pythonProject | 1 | 12795406 | # all the data from train data set, k-fold validation
import numpy as np
import onnxruntime
import torch
from pandas import read_csv
from tensorflow.python.keras.utils.np_utils import to_categorical
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
# load a single file as a numpy array
def load_file(filepath):
dataframe = read_csv(filepath, header=None, delim_whitespace=True)
return dataframe.values
# load a list of files into a 3D array of [samples, timesteps, features]
def load_group(filenames, prefix=''):
loaded = list()
for name in filenames:
data = load_file(prefix + name)
loaded.append(data)
# stack group so that features are the 3rd dimension
loaded = np.dstack(loaded)
return loaded
# load a dataset group, such as train or test
def load_dataset_group(group, prefix=''):
filepath = prefix + group + '/Inertial Signals/'
# load all 9 files as a single array
filenames = list()
# total acceleration
filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt']
# body acceleration
filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt']
# body gyroscope
filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt']
# load input data
X = load_group(filenames, filepath)
# load class output
y = load_file(prefix + group + '/y_' + group + '.txt')
return X, y
# load the dataset, returns train and test X and y elements
def load_dataset(prefix=''):
# load all train
trainX, trainy = load_dataset_group('train', prefix + 'UCI HAR Dataset/')
# print(trainX.shape, trainy.shape)
# load all test
testX, testy = load_dataset_group('test', prefix + 'UCI HAR Dataset/')
# print(testX.shape, testy.shape)
# zero-offset class values
trainy = trainy - 1
testy = testy - 1
# one hot encode y
trainy = to_categorical(trainy)
testy = to_categorical(testy)
print(trainX.shape, trainy.shape, testX.shape, testy.shape)
return trainX, trainy, testX, testy
# summarize scores
def summarize_results(scores):
print('scores:', scores)
mean, std = np.mean(scores), np.std(scores)
return [mean, std]
# run an experiment
def run_experiment(repeats=10):
# load data
trainX, trainy, testX, testy = load_dataset()
# sess = onnxruntime.InferenceSession('./models/model1.onnx')
sess = onnxruntime.InferenceSession('./cnn-pytorch.onnx')
for i in sess.get_inputs():
print(i.name)
print(i.shape)
for i in sess.get_outputs():
print(i.name)
print(i.shape)
# y_predict = sess.run(None, {sess.get_inputs()[0].name: testX.astype(np.float32)})
testX = np.transpose(testX, (0, 2, 1))
testX = torch.utils.data.DataLoader(testX, batch_size=32, shuffle=True, num_workers=0)
testy = torch.utils.data.DataLoader(testy, batch_size=32, shuffle=True, num_workers=0)
for features, labels in zip(testX, testy):
y_predict = sess.run(None, {sess.get_inputs()[0].name: features.float().numpy()})
print('y_predict', y_predict)
# y_predict = np.array(y_predict)
# y_predict = np.argmax(y_predict, axis=2)
# testy = labels
# y_true = np.reshape(testy, [-1])
# y_pred = np.reshape(y_predict, [-1])
# accuracy = accuracy_score(y_true, y_pred)
# precision = precision_score(y_true, y_pred, average='macro')
# recall = recall_score(y_true, y_pred, average='macro')
# f1score = f1_score(y_true, y_pred, average='macro')
# print(accuracy, precision, recall, f1score)
run_experiment()
| 2.671875 | 3 |
ProjectApplication/project_core/forms/person.py | code-review-doctor/project-application | 5 | 12795407 | from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
from django import forms
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import RegexValidator, ValidationError
from django.forms import Form
from phonenumber_field.formfields import PhoneNumberField
from project_core.models import PersonTitle, Gender, PhysicalPerson, PersonPosition, Contact, CareerStage
from project_core.utils.orcid import orcid_div, field_set_read_only
from .utils import organisations_name_autocomplete, get_field_information
from ..utils.utils import create_person_position
from ..widgets import XDSoftYearMonthPickerInput
HELP_TEXTS_HEAD_OF_YOUR_RESEARCH = {'orcid': 'Enter the ORCID iD (e.g.: 0000-0002-1825-0097).<br>'
'Please ask head of research unit if unknown',
'first_name': 'Populated from ORCID iD',
'surname': 'Populated from ORCID iD',
'academic_title': 'Mandatory if ORCID iD is entered'}
class PersonForm(Form):
def __init__(self, *args, **kwargs):
self.person_position = kwargs.pop('person_position', None)
self._only_basic_fields = kwargs.pop('only_basic_fields', False)
self._all_fields_are_optional = kwargs.pop('all_fields_are_optional', False)
help_texts = kwargs.pop('help_texts', {})
career_stage_queryset = kwargs.pop('career_stages_queryset', None)
super().__init__(*args, **kwargs)
orcid_initial = first_name_initial = surname_initial = organisations_initial = group_initial = \
academic_title_initial = email_initial = phone_initial = gender_initial = career_stage_initial = phd_date_initial = None
if self.person_position:
orcid_initial = self.person_position.person.orcid
first_name_initial = self.person_position.person.first_name
surname_initial = self.person_position.person.surname
organisations_initial = self.person_position.organisation_names.all()
group_initial = self.person_position.group
academic_title_initial = self.person_position.academic_title
career_stage_initial = self.person_position.career_stage
gender_initial = self.person_position.person.gender
email_initial = self.person_position.main_email()
phone_initial = self.person_position.main_phone()
if self.person_position.person.phd_date:
# In the database is always saved as yyyy-mm (validator in the model) but it's visualized as mm-yyyy
phd_date_parts = self.person_position.person.phd_date.split('-')
phd_date_initial = f'{phd_date_parts[1]}-{phd_date_parts[0]}'
self.fields['orcid'] = forms.CharField(initial=orcid_initial,
**get_field_information(PhysicalPerson, 'orcid', label='ORCID iD',
required=True,
help_text='Enter your ORCID iD (e.g.: 0000-0002-1825-0097).<br>'
'Please create an <a href="https://orcid.org">ORCID iD</a> if you do not already have one'))
self.fields['academic_title'] = forms.ModelChoiceField(queryset=PersonTitle.objects.all(),
initial=academic_title_initial,
required=not self._only_basic_fields)
self.fields['first_name'] = forms.CharField(initial=first_name_initial,
label='First name(s)',
help_text='Your name is populated from your ORCID record. If you would like to change it please amend it in <a href="https://orcid.org/login">ORCID</a>')
self.fields['surname'] = forms.CharField(initial=surname_initial,
label='Surname(s)',
help_text='Your surname is populated from your ORCID record. If you would like to change it please amend it in <a href="https://orcid.org/login">ORCID</a>')
field_set_read_only([self.fields['first_name'], self.fields['surname']])
if self._only_basic_fields == False:
self.fields['gender'] = forms.ModelChoiceField(queryset=Gender.objects.all(),
initial=gender_initial)
if career_stage_queryset is None:
career_stage_queryset = CareerStage.objects.all().order_by('list_order', 'name')
self.fields['career_stage'] = forms.ModelChoiceField(
queryset=career_stage_queryset,
initial=career_stage_initial)
self.fields['email'] = forms.EmailField(initial=email_initial,
help_text='Please write a valid email address. You will receive a confirmation email when saving and submitting your application form. This email address will also be used for communication purposes')
self.fields['phone'] = PhoneNumberField(initial=phone_initial,
help_text='Phone number e.g.: +41222222222 . Extension can be added with xNN at the end')
self.fields['phd_date'] = forms.CharField(initial=phd_date_initial,
label='Date of PhD',
help_text='Where applicable, please enter the date on which you were awarded, or expect to be awarded your PhD (use the format mm-yyyy)',
required=False,
widget=XDSoftYearMonthPickerInput,
validators=[RegexValidator(regex='^[0-9]{2}-[0-9]{4}$',
message='Format is mm-yyyy',
code='Invalid format')])
self.fields['organisation_names'] = organisations_name_autocomplete(initial=organisations_initial,
help_text='Please select the organisation(s) to which you are affiliated for the purposes of this proposal.')
self.fields['group'] = forms.CharField(initial=group_initial,
help_text='Please type the names of the group(s) or laboratories to which you are affiliated for the purposes of this proposal',
label='Group / lab',
required=False)
# If adding fields here: see below to remove them from the self.helper.layout
used_help_texts = []
for field_str, field in self.fields.items():
if self._all_fields_are_optional:
field.required = False
if field_str in help_texts:
self.fields[field_str].help_text = help_texts[field_str]
used_help_texts.append(field_str)
if len(used_help_texts) != len(help_texts):
print('Unused help texts:', help_texts.keys() - used_help_texts)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
orcid_div('orcid'),
Div(
Div('first_name', css_class='col-4'),
Div('surname', css_class='col-4'),
Div('academic_title', css_class='col-2'),
Div('gender', css_class='col-2'),
css_class='row'
),
Div(
Div('career_stage', css_class='col-8'),
Div('phd_date', css_class='col-4'),
css_class='row'
),
Div(
Div('email', css_class='col-6'),
Div('phone', css_class='col-6'),
css_class='row'
),
Div(
Div('organisation_names', css_class='col-12'),
css_class='row'
),
Div(
Div('group', css_class='col-12'),
css_class='row'
),
)
if self._only_basic_fields:
# The Layout always includes all the fields. Now it's better to remove the fields that don't exist
# to avoid django-crispy-forms warnings (not fatal)
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'gender')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'career_stage')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'email')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phone')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'phd_date')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'organisation_names')
PersonForm._delete_field_from_layout(self.helper.layout.fields, 'group')
@staticmethod
def _delete_field_from_layout(container, field_str):
for item in container:
if type(item) == Div:
PersonForm._delete_field_from_layout(item, field_str)
elif type(item) == str and item == field_str:
container.remove(field_str)
def get_person_positions(self):
""" Matches and returns the person_position from the database. """
try:
physical_person = PhysicalPerson.objects.get(
orcid=self.cleaned_data['orcid']
)
except ObjectDoesNotExist:
# Non-existing PHysicalPerson so it doesn't have any PersonPositions associated
return []
person_positions = PersonPosition.objects.filter(
person=physical_person,
academic_title=self.cleaned_data['academic_title'],
group=self.cleaned_data['group'],
career_stage=self.cleaned_data['career_stage']
)
return person_positions
def clean_phd_date(self):
if 'phd_date' not in self.cleaned_data:
return None
if self.cleaned_data['phd_date'] == '':
return None
# It has the correct format mm-yyyy because the field has a validator
# In the DB it's always yyyy-mm because the model has this validator (consistent with general mysql date format)
month, year = self.cleaned_data['phd_date'].split('-')
month_int = int(month)
if month_int < 1 or month_int > 12:
raise ValidationError(f'Invalid month: {month}', code='invalid', params={'value': month})
return f'{year}-{month}'
def clean(self):
cd = super().clean()
if self.errors:
# If there are errors they might be related to orcid (e.g. using the example
# ORCID iD, so cd['orcid'] doesn't exist. At this point we don't do further cleaning:
# the user needs to fix the errors in the form before further cleaning is done.
return cd
# If ORCID iD is filled in: other fields are mandatory
if self._all_fields_are_optional and cd['orcid']:
for field_str, field in self.fields.items():
if field_str not in cd or not cd[field_str]: # It needs to be in cd and have a value
self.add_error(field_str, 'Mandatory field if ORCiD iD is filled in')
if self._all_fields_are_optional and not cd['orcid']:
for field_str, field in self.fields.items():
if field_str in cd and cd[field_str]:
self.add_error(field_str, 'It cannot contain any information if ORCiD ID is empty')
return cd
def save_person(self):
cd = self.cleaned_data
person_position = create_person_position(cd['orcid'], cd['first_name'], cd['surname'],
gender=cd.get('gender', None), phd_date=cd.get('phd_date', None),
academic_title=cd.get('academic_title'), group=cd.get('group'),
career_stage=cd.get('career_stage'),
organisation_names=cd.get('organisation_names', []))
if cd.get('email', None):
# Should this be in the model?
# TODO: discuss how to replace emails
email_contact = person_position.main_email_model()
if email_contact is None:
email_contact = Contact()
email_contact.method = Contact.EMAIL
email_contact.person_position = person_position
email_contact.entry = cd.get('email')
email_contact.save()
if cd.get('phone', None):
# Like before, should this be in the model and consolidated?
# TODO: discuss how to replace phones and handling of multiple phones
phone_contact = person_position.main_phone_model()
if phone_contact is None:
phone_contact = Contact()
phone_contact.method = Contact.PHONE
phone_contact.person_position = person_position
phone_contact.entry = cd.get('phone').as_international
phone_contact.save()
return person_position
| 2.265625 | 2 |
whole_cell_patch/paramWidget.py | 11uc/whole_cell_patch | 2 | 12795408 | # class derived from a GridLayout with a bunch of widgets
from PyQt5.QtWidgets import QLabel, QGridLayout, QLineEdit, \
QVBoxLayout, QHBoxLayout, QComboBox, QPushButton, QCheckBox
import numpy as np
import pandas as pd
class ParamWidget(QGridLayout):
'''
Collecting all the input boxes and labels to assign data.
'''
def __init__(self, paramTyp, param, projMan = None, parent = None):
'''
Build the boxes.
Parameters
----------
paramTyp: dictionary
Defining types of parameters in the set.
param: dictionary
The parameters in the set read from paramMan.
projMan: Project
Project management class, used for access raw data.
Attributes
----------
param: dictionary
Parameter set managed by this grid widget.
err: bool
Whether there's an error in the parameters.
senderList:
'''
super().__init__(parent)
self.err = False
self.param = param
self.paramTyp = paramTyp
self.projMan = projMan
self.senderList = []
for i, (k, v) in enumerate(paramTyp.items()):
self.addWidget(QLabel(k), i, 0)
val = self.param[k]
if v == "protocol" and projMan != None:
cb = QComboBox()
cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x))
self.addWidget(cb, i, 1)
self.senderList.append(cb)
elif v == "int" or v == "float":
le = QLineEdit()
le.textEdited.connect(lambda x, ind = k, typ = v:
self.updateParam(ind, typ, x))
self.addWidget(le, i, 1)
self.senderList.append(le)
elif v == "intr" or v == "floatr":
le0 = QLineEdit()
le1 = QLineEdit()
le0.textEdited.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x, begin = True))
le1.textEdited.connect(lambda x, ind = k, typ = v:
self.updateParam(ind, typ, x, begin = False))
twoHB = QHBoxLayout()
twoHB.addWidget(le0)
twoHB.addWidget(QLabel("to"))
twoHB.addWidget(le1)
self.addLayout(twoHB, i, 1)
self.senderList.append([le0, le1])
elif v == "intl" or v == "floatl" or v == "strl":
le = QLineEdit()
le.textEdited.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x))
btn = QPushButton("...")
lstHB = QHBoxLayout()
lstHB.addWidget(le)
lstHB.addWidget(btn)
self.addLayout(lstHB, i, 1)
self.senderList.append(le)
elif v == "bool":
cb = QCheckBox()
cb.stateChanged.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x))
self.addWidget(cb, i, 1)
self.senderList.append(cb)
elif "combo" in v:
options = v.split(',')[1:]
cb = QComboBox()
for j in options:
cb.addItem(j)
cb.currentTextChanged.connect(lambda x, ind = k, typ = v: \
self.updateParam(ind, typ, x))
cb.setCurrentIndex(0)
self.addWidget(cb, i, 1)
self.senderList.append(cb)
else:
print("Unknown parameter type.")
self.updateDisp()
self.updateDisp(param)
def updateDisp(self, param = None):
'''
After parameter changes due to importing or change of protocols,
update display of parameters.
Parameters
----------
param: dictionary, optional
New parameters. Default is None, only tend to update protocols.
'''
if param == None:
for i, (k, v) in enumerate(self.paramTyp.items()):
if v == "protocol" and self.projMan != None:
cb = self.senderList[i]
cb.clear()
pt = self.projMan.getProtocols()
for j in pt:
cb.addItem(j)
if len(pt):
cb.setCurrentIndex(0)
else:
self.err = True
else:
self.param = param
for i, (k, v) in enumerate(self.paramTyp.items()):
val = param[k]
if v == "protocol" and self.projMan != None:
cb = self.senderList[i]
cb.clear()
pt = self.projMan.getProtocols()
for j in pt:
cb.addItem(j)
if len(pt):
cb.setCurrentIndex(0)
else:
self.err = True
elif v == "int" or v == "float":
if v == "int" or (1e-3 < abs(val) and abs(val) < 1e3):
ds = str(val)
else:
ds = "{:.3e}".format(val)
le = self.senderList[i]
le.setText(ds)
elif v == "intr" or v == "floatr":
le0, le1 = self.senderList[i]
if v == "intr" or (1e-3 < abs(val[0]) and abs(val[0]) < 1e3):
ds = str(val[0])
else:
ds = "{:.3e}".format(val[0])
le0.setText(ds)
if v == "intr" or (1e-3 < abs(val[1]) and abs(val[1]) < 1e3):
ds = str(val[1])
else:
ds = "{:.3e}".format(val[1])
le1.setText(ds)
elif v == "intl" or v == "floatl":
if len(val):
if v == "intl" or (1e-3 < min(map(abs, val)) and \
max(map(abs, val)) < 1e3):
ds = ", ".join(map(str, val))
else:
ds = ", ".join(["{:.3e}".format(d) for d in val])
else:
ds = ''
le = self.senderList[i]
le.setText(ds)
elif v == "strl":
if len(val):
ds = ", ".join(val)
else:
ds = ''
le = self.senderList[i]
le.setText(ds)
elif v == "bool":
cb = self.senderList[i]
cb.setChecked(val)
elif "combo" in v:
cb = self.senderList[i]
cb.setCurrentText(val)
else:
print("Unknown parameter type")
print(v, val)
self.update()
def updateParam(self, ind, typ, val, **kargs):
'''
Update individual parameters in profile using values get
from input widgets.
Parameters
----------
ind: string
Key of the individual parameter to be set.
typ: string
Type of the individual parameter.
val: string
Text out of the input widget with the value.
**kargs:
Arguments come with some special types of parameters.
- begin: bool
Whether it's the first one of the two value range parameters.
'''
try:
self.err = False
self.sender().setStyleSheet("background:#FFFFFF;")
if typ == "int":
self.param[ind] = int(val)
elif typ == "float":
self.param[ind] = float(val)
elif typ == "intr":
if kargs["begin"]:
self.param[ind][0] = int(val)
else:
self.param[ind][1] = int(val)
elif typ == "floatr":
if kargs["begin"]:
self.param[ind][0] = float(val)
else:
self.param[ind][1] = float(val)
elif typ == "intl":
if len(val):
self.param[ind] = list(map(int, val.split(',')))
else:
self.param[ind] = []
elif typ == "floatl":
if len(val):
self.param[ind] = list(map(float, val.split(',')))
else:
self.param[ind] = []
elif typ == "strl":
if len(val):
self.param[ind] = [d.strip() for d in val.split(',')]
else:
self.param[ind] = []
elif typ == "protocol":
self.param[ind] = val
elif typ == "bool":
self.param[ind] = bool(val)
elif "combo" in typ:
self.param[ind] = val
else:
print("Unknown parameter type")
except ValueError:
self.sender().setStyleSheet("background:#FF0000;")
self.err = True
def getParam(self):
'''
Get parameters managed in this widget.
'''
if not self.err:
return self.param
else:
return None
| 2.625 | 3 |
bioprocs/scripts/imtherapy/pTopiary.py | pwwang/biopipen | 2 | 12795409 | <reponame>pwwang/biopipen<gh_stars>1-10
"""
./topiary \
--vcf somatic.vcf \
--mhc-predictor netmhcpan \
--mhc-alleles HLA-A*02:01,HLA-B*07:02 \
--ic50-cutoff 500 \
--percentile-cutoff 2.0 \
--mhc-epitope-lengths 8-11 \
--rna-gene-fpkm-tracking-file genes.fpkm_tracking \
--rna-min-gene-expression 4.0 \
--rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \
--rna-min-transcript-expression 1.5 \
--output-csv epitopes.csv
"""
import re
from os import environ
from pathlib import Path
from cyvcf2 import VCF
from gff import Gff
from diot import Diot
from cmdy import CmdyReturnCodeException
from bioprocs.utils import shell2 as shell, logger
from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord
{% from os import path%}
infile = {{i.infile | quote}}
afile = {{i.afile | ?path.isfile | =readlines | !alwaysList | repr}}
outfile = Path({{o.outfile | quote}})
outdir = Path({{o.outdir | quote}})
topiary = {{args.topiary | quote}}
netmhc = {{args.netmhc | quote}}
netmhcpan = {{args.netmhcpan | quote}}
netmhciipan = {{args.netmhciipan | quote}}
netmhccons = {{args.netmhccons | quote}}
smm = {{args.smm | quote}}
smm_pmbec = {{args.smm_pmbec | quote}}
mhc_predictor = {{args.mhc_predictor | quote}}
genome = {{args.genome | quote}}
params = {{args.params | repr}}
refall = {{args.refall | quote}}
tmpdir = Path({{args.tmpdir | quote}}) / '.'.join([
{{proc.id | quote}}, {{proc.tag | quote}}, {{proc.suffix | quote}}, {{job.index | quote}}])
tmpdir.mkdir(exist_ok = True, parents = True)
# check if we have downloaded annotation data for the genome
# topiary will use it to annotate the data
gmaps = {'hg19': 'GRCh37', 'hg38': 'GRCh38'}
datadir = Path.home().joinpath('.cache', 'pyensembl')
if not datadir.joinpath(genome).is_dir() and not datadir.joinpath(gmaps.get(genome, genome)).is_dir():
raise RuntimeError("You don't have annotation data for genome {}{} installed. "
"Either you run 'pyensembl install' first or "
"specify 'params.download_reference_genome_data = True'. "
"If you have it installed somewhere else, make a symbolic link to {}".format(genome, ('/' + gmaps[genome]) if genome in gmaps else '', datadir))
# if not datadir.joinpath(genome).is_dir() and datadir.joinpath(gmaps.get(genome, genome)).is_dir():
# genome = gmaps[genome]
# extract expression from VCF file
vcf = VCF(infile)
gxfile = txfile = False
features = set()
if vcf.contains('GX'):
if not vcf.contains('CSQ'):
raise ValueError('VCF file has to be annotated with by VEP')
# tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status
# ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0 0 0 OK
# ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK
gxfile = outfile.with_suffix('.gx_nopos')
writer = TsvWriter(gxfile)
writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status']
writer.writeHead()
for variant in vcf:
# try..except
try:
gx = variant.format('GX')[0]
except (KeyError, TypeError):
continue
csqs = variant.INFO['CSQ'].split(',')
gxs = gx.split(',')
for gx in gxs:
gene, expr = gx.split('|', 1)
csq = [csq for csq in csqs if f'|{gene}|' in csq][0].split('|')
r = TsvRecord()
r.tracking_id = csq[4]
r.class_code = '-'
r.nearest_ref_id = '-'
r.gene_id = csq[4]
r.gene_short_name = csq[3]
r.tss_id = '-'
r.locus = '<pos>'
r.length = '-'
r.coverage = '-'
r.FPKM = expr
r.FPKM_conf_lo = 0
r.FPKM_conf_hi = 1000
r.FPKM_status = 'OK'
writer.write(r)
features.add(r.tracking_id)
writer.close()
if vcf.contains('TX'):
if not vcf.contains('CSQ'):
raise ValueError('VCF file has to be annotated with by VEP')
# tracking_id class_code nearest_ref_id gene_id gene_short_name tss_id locus length coverage FPKM FPKM_conf_lo FPKM_conf_hi FPKM_status
# ENSG00000240361 - - ENSG00000240361 OR4G11P - chr1:62947-63887 - - 0 0 0 OK
# ENSG00000268020 - - ENSG00000268020 AL627309.1 - chr1:53048-54936 - - 0 0 0 OK
txfile = outfile.with_suffix('.tx_nopos')
writer = TsvWriter(txfile)
writer.cnames = ['tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id', 'locus', 'length', 'coverage', 'FPKM', 'FPKM_conf_lo', 'FPKM_conf_hi', 'FPKM_status']
writer.writeHead()
for variant in vcf:
# try..except
try:
tx = variant.format('TX')[0]
except (KeyError, TypeError):
continue
csqs = variant.INFO['CSQ'].split('|')
txs = tx.split(',')
for tx in txs:
transcript, expr = tx.split('|', 1)
csq = [csq for csq in csqs if f'|{transcript}|' in csq][0].split('|')
r = TsvRecord()
r.tracking_id = csq[6]
r.class_code = '-'
r.nearest_ref_id = '-'
r.gene_id = csq[4]
r.gene_short_name = csq[3]
r.tss_id = '-'
r.locus = '<pos>'
r.length = '-'
r.coverage = '-'
r.FPKM = expr
r.FPKM_conf_lo = 0
r.FPKM_conf_hi = 1000
r.FPKM_status = 'OK'
writer.write(r)
features.add(r.tracking_id)
writer.close()
if gxfile or txfile:
allpos = {}
for gff in Gff(refall):
if gff['type'] == 'gene':
feature = gff['attributes']['gene_id']
elif gff['type'] == 'transcript':
feature = gff['attributes']['transcript_id']
else:
continue
if feature not in features:
continue
allpos[feature] ='{}:{}-{}'.format(gff['seqid'], gff['start'], gff['end'])
if gxfile:
gxfile2 = outfile.with_suffix('.gx')
with open(gxfile) as fin, open(gxfile2, 'w') as fout:
for line in fin:
if '<pos>' not in line:
fout.write(line)
else:
feature_id = line.split('\t', 1)[0]
if feature_id not in allpos:
logger.warning('Cannot find position information for: %s, skipping', feature_id)
else:
fout.write(line.replace('<pos>', allpos[feature_id]))
gxfile = gxfile2
if txfile:
txfile2 = outfile.with_suffix('.tx')
with open(txfile) as fin, open(txfile2, 'w') as fout:
for line in fin:
if '<pos>' not in line:
fout.write(line)
else:
feature_id = line.split('\t', 1)[0]
if feature_id not in allpos:
logger.warning('Cannot find position information for: %s, skipping', feature_id)
else:
fout.write(line.replace('<pos>', allpos[feature_id]))
txfile = txfile2
params['rna-gene-fpkm-tracking-file'] = gxfile
params['rna-transcript-fpkm-tracking-file'] = txfile
shell.load_config(topiary = topiary)
if infile.endswith('.vcf') or infile.endswith('.vcf.gz'):
params.vcf = infile
else:
params.maf = infile
alleles = [allele.replace('*', '') for allele in afile]
params['mhc-alleles'] = ','.join(alleles)
params.genome = genome
params['output-csv'] = outfile.with_suffix('.nowt')
params['mhc-predictor'] = mhc_predictor
# make sure those mhc-predictors are in PATH
PATHs = set()
for mhcpred in (netmhc, netmhcpan, netmhciipan, netmhccons, smm, smm_pmbec):
try:
PATHs.add(str(Path(shell.which(mhcpred)).parent))
except CmdyReturnCodeException:
continue
params._env = Diot(PATH = environ['PATH'] + ':' + ':'.join(PATHs))
shell.fg.topiary(**params)
# add wildtype binding
# #,variant,peptide_offset,peptide,allele,affinity,percentile_rank,prediction_method_name,peptide_length,gene,gene_id,transcript_id,transcript_name,effect,effect_type,contains_mutant_residues,mutation_start_in_peptide,mutation_end_in_peptide,gene_expression
# 0,chr6 g.31237146C>A,353,AACSNSAHG,HLA-A*02:01,35651.3,65.0,netMHC,9,HLA-C,ENSG00000204525,ENST00000383329,HLA-C-002,p.Q361H,Substitution,True,7,8,0.0
# 1,chr6 g.33037619G>T,40,AAFVQTHRT,HLA-A*02:01,22758.73,32.0,netMHC,9,HLA-DPA1,ENSG00000231389,ENST00000419277,HLA-DPA1-001,p.P49T,Substitution,True,8,9,0.0
if mhc_predictor in ('netmhc', 'netmhcpan', 'netmhciipan', 'netmhccons', 'smm', 'smm_pmbec'):
wildpeps = set()
mutpeps = {}
tpreader = TsvReader(outfile.with_suffix('.nowt'), comment = '###', delimit = ',')
for r in tpreader:
if r.effect_type != 'Substitution':
# I don't know how to get the wildtype peptides if it is not a substitution
continue
# parse effect: p.N84S
m = re.match(r'^p\.([A-Z])\d+([A-Z])$', r.effect)
if not m:
continue
wildpep = r.peptide
index = int(r.mutation_start_in_peptide)
if wildpep[index] != m.group(2):
continue
wildpep = wildpep[:index] + m.group(1) + wildpep[(index+1):]
mutpeps[r.peptide + '\t' + r.allele] = wildpep
wildpeps.add(wildpep)
def run_netmhc():
shell.load_config(netmhc = netmhc)
mhcallele2 = params['mhc-alleles'].replace(':', '').replace('*', '')
wildfile = outfile.parent / 'wildtype.peptides.txt'
wildfile.write_text('\n'.join(wildpeps))
nparams = Diot(
a = mhcallele2, v = True, inptype = 1, f = wildfile, _prefix = '-',
_iter = True, _debug = True)
res = shell.netmhc(**nparams)
pos_hit = False
wildbindings = {allele: {} for allele in mhcallele2.split(',')}
for line in res:
if 'PEPLIST' not in line or line.startswith('Protein'):
continue
parts = line.split()
wildbindings[parts[1]][parts[2]] = parts[12]
writer = TsvWriter(outfile)
writer.cnames = ['HLA_allele', 'Peptide', 'Affinity', 'Gene', 'ENSG', 'ENST', 'Ref_peptide', 'Ref_affinity', 'Mutation', 'AAChange']
writer.writeHead()
writerall = TsvWriter(outfile.with_suffix('.all.txt'))
writerall.cnames = writer.cnames
writerall.writeHead()
tpreader.rewind()
for r in tpreader:
out = TsvRecord()
out.HLA_allele = r.allele
out.Peptide = r.peptide
out.Affinity = r.affinity
out.Gene = r.gene
out.ENSG = r.gene_id
out.ENST = r.transcript_id
wtpep = mutpeps.get(r.peptide + '\t' + r.allele, '-')
out.Ref_peptide = wtpep
out.Ref_affinity = wildbindings[r.allele.replace(':', '').replace('*', '')].get(wtpep, '>500')
out.Mutation = r.variant
out.AAChange = r.effect
writerall.write(out)
if float(out.Affinity) < 500 and ('>' in out.Ref_affinity or float(out.Ref_affinity) >= 2000):
writer.write(out)
def run_netmhcpan():
shell.load_config(netmhcpan = netmhcpan)
mhcallele2 = params['mhc-alleles'] if 'mhc-alleles' in params else ','.join(
allele for allele in Path(params['mhc-alleles-file']).read_text().splitlines() if allele
)
wildfile = outfile.parent / 'wildtype.peptides.txt'
wildfile.write_text('\n'.join(wildpeps))
xlsfile = outfile.parent / 'wildtype.binding.txt'
nparams = Diot(
a = mhcallele2, v = True, BA = True, inptype = 1, f = wildfile, _prefix = '-',
xls = True, xlsfile = xlsfile)
shell.fg.netmhcpan(**nparams)
if not xlsfile.is_file():
raise RuntimeError("Failed to run netmhcpan, output file not generated.")
# read the output
"""
HLA-A24:02 HLA-A29:02
Pos Peptide ID core icore 1-log50k nM Rank core icore 1-log50k nM Rank
0 LYLPALWFH PEPLIST LYLPALWFH LYLPALWFH 0.1353 11560.5488 6.1138 LYLPALWFH LYLPALWFH 0.4137 568.6087 1.1231
0 RRQRRQRRW PEPLIST RRQRRQRRW RRQRRQRRW 0.0788 21311.8301 12.3392 RRQRRQRRW RRQRRQRRW 0.0308 35829.9805 47.6206
"""
with xlsfile.open('r') as f:
alleles = [allele.replace('HLA-A', 'HLA-A*').replace('HLA-B', 'HLA-B*').replace('HLA-C', 'HLA-C*')
for allele in f.readline().strip().split('\t') if allele]
reader = TsvReader(xlsfile, comment = '\t\t\t')
wildbindings = {}
for r in reader:
peptide = r[1]
for i, hla in enumerate(alleles):
wildbindings[peptide + '\t' + hla] = float(r[7 + i*5])
writer = TsvWriter(outfile)
writer.cnames = tpreader.cnames + ['wildpeptide', 'wildaffinity', 'deltaaffinity']
writer.writeHead()
nwriter = TsvWriter(neatfile)
nwriter.cnames = ['HLA_allele', 'mt_peptide', 'mt_affinity', 'wt_peptide', 'wt_affinity', 'delta_affinity', 'gene']
nwriter.writeHead()
tpreader.rewind()
for r in tpreader:
r.wildpeptide = mutpeps.get(r.peptide + '\t' + r.allele, '-')
r.wildaffinity = wildbindings.get(r.wildpeptide + '\t' + r.allele, '-')
if r.wildaffinity != '-':
r.deltaaffinity = float(r.affinity) - r.wildaffinity
else:
r.deltaaffinity = '-'
nwriter.write([ r.allele, r.peptide, r.affinity, r.wildpeptide,
r.wildaffinity, r.deltaaffinity, r.gene])
writer.write(r)
def run_netmhciipan():
pass
def run_netmhccons():
pass
def run_smm():
pass
def run_smm_pmbec():
pass
runner = {
'netmhc' : run_netmhc,
'netmhcpan' : run_netmhcpan,
'netmhciipan' : run_netmhciipan,
'netmhccons' : run_netmhccons,
'smm' : run_smm,
'smm-pmbec' : run_smm_pmbec,
}
runner.get(mhc_predictor)()
| 1.53125 | 2 |
auth_api/auth_api/urls.py | rwreynolds/auth-api | 0 | 12795410 | from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
from auth_api import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^docs/', include_docs_urls(title='Todo API', description='RESTful API for Todo')),
url(r'^$', views.api_root),
url(r'^', include('users.urls', namespace='users')),
url(r'^', include('todos.urls', namespace='todos')),
]
| 1.78125 | 2 |
hot_crawler/utils.py | wf1314/hot-crawler | 1 | 12795411 | <filename>hot_crawler/utils.py
import redis
from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
"""
正则匹配路由
"""
def __init__(self, url_map, *args):
super().__init__(url_map)
self.regex = args[0]
def get_redis(host='localhost', port=6379):
"""
获取redis操作对象
:param host:
:param port:
:return:
"""
pool = redis.ConnectionPool(host=host, port=port, decode_responses=True)
redis_con = redis.Redis(connection_pool=pool)
return redis_con
| 2.59375 | 3 |
app/tests/intergrations/test_opsgenie.py | cds-snc/sre-bot | 0 | 12795412 | <filename>app/tests/intergrations/test_opsgenie.py<gh_stars>0
from integrations import opsgenie
from unittest.mock import patch
@patch("integrations.opsgenie.api_get_request")
@patch("integrations.opsgenie.OPSGENIE_KEY", "OPSGENIE_KEY")
def test_get_on_call_users(api_get_request_mock):
api_get_request_mock.return_value = (
'{"data": {"onCallParticipants": [{"name": "test_user"}]}}'
)
assert opsgenie.get_on_call_users("test_schedule") == ["test_user"]
api_get_request_mock.assert_called_once_with(
"https://api.opsgenie.com/v2/schedules/test_schedule/on-calls",
{"name": "GenieKey", "token": "OPSGENIE_KEY"},
)
@patch("integrations.opsgenie.api_get_request")
def test_get_on_call_users_with_exception(api_get_request_mock):
api_get_request_mock.return_value = "{]"
assert opsgenie.get_on_call_users("test_schedule") == []
@patch("integrations.opsgenie.Request")
@patch("integrations.opsgenie.urlopen")
def test_api_get_request(urlopen_mock, request_mock):
urlopen_mock.return_value.read.return_value.decode.return_value = (
'{"data": {"onCallParticipants": [{"name": "test_user"}]}}'
)
assert (
opsgenie.api_get_request(
"test_url", {"name": "GenieKey", "token": "OPSGENIE_KEY"}
)
== '{"data": {"onCallParticipants": [{"name": "test_user"}]}}'
)
request_mock.assert_called_once_with("test_url")
request_mock.return_value.add_header.assert_called_once_with(
"Authorization", "GenieKey OPSGENIE_KEY"
)
urlopen_mock.assert_called_once_with(request_mock.return_value)
| 2.25 | 2 |
japanese2phoneme/exceptions.py | iory/japanese2phoneme | 0 | 12795413 | <reponame>iory/japanese2phoneme<filename>japanese2phoneme/exceptions.py<gh_stars>0
class UnidentifiedJapaneseText(Exception):
def __init__(self, sentence, word):
super(UnidentifiedJapaneseText, self).__init__()
self.sentence = sentence
self.word = word
def __str__(self):
return (u"No match in dictionary for word '%s' in sentence: \n'%s'" %
(self.word, self.sentence))
class ChunkingError(Exception):
"""Raised when a katakana string cannot be parsed correctly
"""
def __init__(self, txt):
super(ChunkingError, self).__init__()
self.textStr = txt
def __str__(self):
return u"Chunking error for string: \n %s" % self.textStr
class EmptyStrError(Exception):
def __str__(self):
return "Empty string passed in"
class NonKatakanaError(Exception):
def __init__(self, char, utterance):
super(NonKatakanaError, self).__init__()
self.char = char
self.utterance = utterance
def __str__(self):
return (u"Wrongly interpreted character '%s' as kana in utterance:\n%s"
% (self.char, self.utterance))
| 3.03125 | 3 |
N10189/main.py | carmocca/UVA | 3 | 12795414 | <gh_stars>1-10
import sys
def get_neighbours(row, col, rows, cols):
neighbours = []
for i in range(-1, 2):
for j in range(-1, 2):
if i == 0 and j == 0:
continue
elif -1 < row + i < rows and -1 < col + j < cols:
neighbours.append((row + i, col + j))
return neighbours
def solve(field, rows, cols):
res = []
for row in range(rows):
line = ''
for col in range(cols):
if field[row][col] == 1:
line += '*'
continue
neighbours = get_neighbours(row, col, rows, cols)
mines = sum(field[r][c] for r, c in neighbours)
line += str(mines)
res.append(line + '\n')
return res
def main(file):
res = []
field_num = 1
while True:
rows, cols = [int(x) for x in file.readline().split()]
if rows == cols == 0: break
field = [[0 for _ in range(cols)] for _ in range(rows)]
for row in range(rows):
for col, char in enumerate(file.readline()):
if char == '*':
field[row][col] = 1
res.append('Field #{}:\n'.format(field_num))
res.extend(solve(field, rows, cols))
res.append('\n')
field_num += 1
return res[0: -1]
if __name__ == '__main__':
print(''.join(main(sys.stdin)), end='')
| 3.21875 | 3 |
src/python/track_ic/corner_detection.py | SJungert/computer_gaze_tracking | 0 | 12795415 | <reponame>SJungert/computer_gaze_tracking
import cv2
import numpy as np
#filename = 'chessboard2.jpg'
#img = cv2.imread(filename)
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
def corner_detect(gray_input, img_input, row, col):
height, width = gray_input.shape
#print(height, width)
gray = gray_input
img = img_input
#crop_img = img[y:y+h, x:x+w]
#gray = gray_input[col+col:h, row+row:w]
#img = img_input[col:col+70, row:row+70]
# find Harris corners
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.004) #0.04
dst = cv2.dilate(dst,None)
ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) #0.01*dst.max() was original value
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
# Now draw them
res = np.hstack((centroids,corners))
res = np.int0(res)
img[res[:,1],res[:,0]]=[0,0,255]
img[res[:,3],res[:,2]] = [0,255,0]
print(corners)
corners = np.array(corners).astype(int)
centroids = np.array(centroids).astype(int)
for corner in corners:
row = corner[0]
col = corner[1]
cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,0,255),2)
for corner in centroids:
row = corner[0]
col = corner[1]
cv2.rectangle(img,(col, row),(col + 2,row + 2),(0,255,0),2)
cv2.imwrite('subpixel5.png',img)
return img | 2.984375 | 3 |
code.py | kaushik0033/manipulating-data-with-numpy-code-along-practice | 0 | 12795416 | <filename>code.py
# --------------
import numpy as np
# Not every data format will be in csv there are other file formats also.
# This exercise will help you deal with other file formats and how toa read it.
from numpy import genfromtxt
my_data = genfromtxt(path, delimiter=',',skip_header=1)
# Number of unique matches
unique_team = np.unique(my_data[:,0],axis=0)
print("Uniue no of mataches=", unique_team.shape[0])
print("Set of unique_team which played match=", unique_team[:-1])
print("Sum of all extras in all delivery=",np.sum(my_data[:,17].astype(int), axis = 0))
print("Get all deliveries which given player is out,tell wickettype=",my_data[my_data[:,22]!=np.nan][:,11])
toss_won_by_mum=len(my_data[my_data[:,5]=='Mumbai Indians'])
print("Toss won by Mumbai indians=",toss_won_by_mum)
print("Batsman who scored 6 runs",my_data[my_data[:,16].astype(int)>=6].shape[0])
# How many matches were held in total we need to know so that we can analyze further statistics keeping that in mind.
# Number of unique teams
# this exercise deals with you getting to know that which are all those six teams that played in the tournament.
# Sum of all extras
# An exercise to make you familiar with indexing and slicing up within data.
# Delivery number when a given player got out
# Get the array of all delivery numbers when a given player got out. Also mention the wicket type.
# Number of times Mumbai Indians won the toss
# this exercise will help you get the statistics on one particular team
# Filter record where batsman scored six and player with most number of sixex
# An exercise to know who is the most aggresive player or maybe the scoring player
| 3.8125 | 4 |
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/_azure_machine_learning_workspaces.py | dubiety/azure-sdk-for-python | 1 | 12795417 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from msrest import Deserializer, Serializer
from ._configuration import AzureMachineLearningWorkspacesConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient):
"""These APIs allow end users to operate on Azure Machine Learning Workspace resources.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2022-05-01'
_PROFILE_TAG = "azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'assets': '1.0.0',
'async_operations': 'v1.0',
'batch_job_deployment': '2020-09-01-dataplanepreview',
'batch_job_endpoint': '2020-09-01-dataplanepreview',
'data_call': '1.5.0',
'data_container': '1.5.0',
'data_version': '1.5.0',
'dataset_containers': '2021-10-01',
'dataset_controller_v2': '1.5.0',
'dataset_v2': '1.5.0',
'dataset_versions': '2021-10-01',
'datasets_v1': '1.5.0',
'delete': 'v1.0',
'events': 'v1.0',
'experiments': 'v1.0',
'extensive_model': '1.0.0',
'get_operation_status': '1.5.0',
'metric': 'v1.0',
'migration': '1.0.0',
'models': '1.0.0',
'registry_management_non_workspace': 'v1.0',
'run': 'v1.0',
'run_artifacts': 'v1.0',
'runs': 'v1.0',
'spans': 'v1.0',
'temporary_data_references': '2021-10-01-dataplanepreview',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
api_version=None, # type: Optional[str]
base_url="https://management.azure.com", # type: str
profile=KnownProfiles.default, # type: KnownProfiles
**kwargs # type: Any
):
self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(AzureMachineLearningWorkspaces, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>`
* 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>`
* v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>`
* v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>`
* 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>`
* 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>`
* 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>`
* 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>`
* 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>`
* 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>`
"""
if api_version == '1.5.0':
from .dataset_dataplane import models
return models
elif api_version == '1.0.0':
from .model_dataplane import models
return models
elif api_version == 'v1.0':
from .registry_discovery import models
return models
elif api_version == 'v1.0':
from .runhistory import models
return models
elif api_version == '2020-09-01-dataplanepreview':
from .v2020_09_01_dataplanepreview import models
return models
elif api_version == '2021-10-01':
from .v2021_10_01 import models
return models
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview import models
return models
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview import models
return models
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview import models
return models
elif api_version == '2022-05-01':
from .v2022_05_01 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def assets(self):
"""Instance depends on the API version:
* 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.AssetsOperations>`
"""
api_version = self._get_api_version('assets')
if api_version == '1.0.0':
from .model_dataplane.operations import AssetsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'assets'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def async_operations(self):
"""Instance depends on the API version:
* v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.AsyncOperationsOperations>`
"""
api_version = self._get_api_version('async_operations')
if api_version == 'v1.0':
from .registry_discovery.operations import AsyncOperationsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'async_operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_deployments(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchDeploymentsOperations>`
* 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchDeploymentsOperations>`
* 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchDeploymentsOperations>`
"""
api_version = self._get_api_version('batch_deployments')
if api_version == '2021-10-01':
from .v2021_10_01.operations import BatchDeploymentsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import BatchDeploymentsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import BatchDeploymentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_deployments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_endpoints(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.BatchEndpointsOperations>`
* 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.BatchEndpointsOperations>`
* 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.BatchEndpointsOperations>`
"""
api_version = self._get_api_version('batch_endpoints')
if api_version == '2021-10-01':
from .v2021_10_01.operations import BatchEndpointsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import BatchEndpointsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import BatchEndpointsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_endpoints'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_job_deployment(self):
"""Instance depends on the API version:
* 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobDeploymentOperations>`
"""
api_version = self._get_api_version('batch_job_deployment')
if api_version == '2020-09-01-dataplanepreview':
from .v2020_09_01_dataplanepreview.operations import BatchJobDeploymentOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_job_deployment'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_job_endpoint(self):
"""Instance depends on the API version:
* 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.operations.BatchJobEndpointOperations>`
"""
api_version = self._get_api_version('batch_job_endpoint')
if api_version == '2020-09-01-dataplanepreview':
from .v2020_09_01_dataplanepreview.operations import BatchJobEndpointOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_job_endpoint'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def code_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeContainersOperations>`
* 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeContainersOperations>`
* 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeContainersOperations>`
"""
api_version = self._get_api_version('code_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import CodeContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import CodeContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import CodeContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import CodeContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'code_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def code_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.CodeVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.CodeVersionsOperations>`
* 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.CodeVersionsOperations>`
* 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.CodeVersionsOperations>`
"""
api_version = self._get_api_version('code_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import CodeVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import CodeVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import CodeVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import CodeVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'code_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def component_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentContainersOperations>`
* 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentContainersOperations>`
* 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentContainersOperations>`
"""
api_version = self._get_api_version('component_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ComponentContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import ComponentContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import ComponentContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ComponentContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'component_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def component_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComponentVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ComponentVersionsOperations>`
* 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ComponentVersionsOperations>`
* 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComponentVersionsOperations>`
"""
api_version = self._get_api_version('component_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ComponentVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'component_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def compute(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ComputeOperations>`
* 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.ComputeOperations>`
* 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ComputeOperations>`
"""
api_version = self._get_api_version('compute')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ComputeOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import ComputeOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ComputeOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'compute'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_call(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataCallOperations>`
"""
api_version = self._get_api_version('data_call')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DataCallOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_call'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_container(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataContainerOperations>`
"""
api_version = self._get_api_version('data_container')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DataContainerOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_container'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_containers(self):
"""Instance depends on the API version:
* 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataContainersOperations>`
* 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataContainersOperations>`
"""
api_version = self._get_api_version('data_containers')
if api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import DataContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import DataContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_version(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DataVersionOperations>`
"""
api_version = self._get_api_version('data_version')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DataVersionOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_version'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_versions(self):
"""Instance depends on the API version:
* 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DataVersionsOperations>`
* 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DataVersionsOperations>`
"""
api_version = self._get_api_version('data_versions')
if api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import DataVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import DataVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetContainersOperations>`
"""
api_version = self._get_api_version('dataset_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import DatasetContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_controller_v2(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetControllerV2Operations>`
"""
api_version = self._get_api_version('dataset_controller_v2')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DatasetControllerV2Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_controller_v2'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_v2(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetV2Operations>`
"""
api_version = self._get_api_version('dataset_v2')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DatasetV2Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_v2'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatasetVersionsOperations>`
"""
api_version = self._get_api_version('dataset_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import DatasetVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def datasets_v1(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DatasetsV1Operations>`
"""
api_version = self._get_api_version('datasets_v1')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DatasetsV1Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'datasets_v1'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def datastores(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.DatastoresOperations>`
* 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.DatastoresOperations>`
* 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.DatastoresOperations>`
"""
api_version = self._get_api_version('datastores')
if api_version == '2021-10-01':
from .v2021_10_01.operations import DatastoresOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import DatastoresOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import DatastoresOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'datastores'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def delete(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.DeleteOperations>`
* v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.operations.DeleteOperations>`
"""
api_version = self._get_api_version('delete')
if api_version == '1.5.0':
from .dataset_dataplane.operations import DeleteOperations as OperationClass
elif api_version == 'v1.0':
from .runhistory.operations import DeleteOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'delete'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def environment_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentContainersOperations>`
* 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentContainersOperations>`
* 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentContainersOperations>`
"""
api_version = self._get_api_version('environment_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import EnvironmentContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'environment_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def environment_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.EnvironmentVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.EnvironmentVersionsOperations>`
* 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.EnvironmentVersionsOperations>`
* 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.EnvironmentVersionsOperations>`
"""
api_version = self._get_api_version('environment_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import EnvironmentVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'environment_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def events(self):
"""Instance depends on the API version:
* v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.operations.EventsOperations>`
"""
api_version = self._get_api_version('events')
if api_version == 'v1.0':
from .runhistory.operations import EventsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'events'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def experiments(self):
"""Instance depends on the API version:
* v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.operations.ExperimentsOperations>`
"""
api_version = self._get_api_version('experiments')
if api_version == 'v1.0':
from .runhistory.operations import ExperimentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'experiments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def extensive_model(self):
"""Instance depends on the API version:
* 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ExtensiveModelOperations>`
"""
api_version = self._get_api_version('extensive_model')
if api_version == '1.0.0':
from .model_dataplane.operations import ExtensiveModelOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'extensive_model'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def get_operation_status(self):
"""Instance depends on the API version:
* 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.operations.GetOperationStatusOperations>`
"""
api_version = self._get_api_version('get_operation_status')
if api_version == '1.5.0':
from .dataset_dataplane.operations import GetOperationStatusOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'get_operation_status'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def jobs(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.JobsOperations>`
* 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.JobsOperations>`
* 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.JobsOperations>`
"""
api_version = self._get_api_version('jobs')
if api_version == '2021-10-01':
from .v2021_10_01.operations import JobsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import JobsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import JobsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'jobs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def metric(self):
"""Instance depends on the API version:
* v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.operations.MetricOperations>`
"""
api_version = self._get_api_version('metric')
if api_version == 'v1.0':
from .runhistory.operations import MetricOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'metric'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def migration(self):
"""Instance depends on the API version:
* 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.MigrationOperations>`
"""
api_version = self._get_api_version('migration')
if api_version == '1.0.0':
from .model_dataplane.operations import MigrationOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'migration'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def model_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelContainersOperations>`
* 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelContainersOperations>`
* 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelContainersOperations>`
"""
api_version = self._get_api_version('model_containers')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ModelContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import ModelContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import ModelContainersOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ModelContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'model_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def model_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.ModelVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.ModelVersionsOperations>`
* 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.ModelVersionsOperations>`
* 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.ModelVersionsOperations>`
"""
api_version = self._get_api_version('model_versions')
if api_version == '2021-10-01':
from .v2021_10_01.operations import ModelVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import ModelVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import ModelVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import ModelVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'model_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def models(self):
"""Instance depends on the API version:
* 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.operations.ModelsOperations>`
"""
api_version = self._get_api_version('models')
if api_version == '1.0.0':
from .model_dataplane.operations import ModelsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'models'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def online_deployments(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineDeploymentsOperations>`
* 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineDeploymentsOperations>`
* 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineDeploymentsOperations>`
"""
api_version = self._get_api_version('online_deployments')
if api_version == '2021-10-01':
from .v2021_10_01.operations import OnlineDeploymentsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import OnlineDeploymentsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import OnlineDeploymentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'online_deployments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def online_endpoints(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.OnlineEndpointsOperations>`
* 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.operations.OnlineEndpointsOperations>`
* 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.OnlineEndpointsOperations>`
"""
api_version = self._get_api_version('online_endpoints')
if api_version == '2021-10-01':
from .v2021_10_01.operations import OnlineEndpointsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from .v2022_02_01_preview.operations import OnlineEndpointsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import OnlineEndpointsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'online_endpoints'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.operations.Operations>`
* 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.Operations>`
* 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2021-10-01':
from .v2021_10_01.operations import Operations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import Operations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_endpoint_connections(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateEndpointConnectionsOperations>`
* 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateEndpointConnectionsOperations>`
* 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateEndpointConnectionsOperations>`
"""
api_version = self._get_api_version('private_endpoint_connections')
if api_version == '2021-10-01':
from .v2021_10_01.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import PrivateEndpointConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_link_resources(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.PrivateLinkResourcesOperations>`
* 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.PrivateLinkResourcesOperations>`
* 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.PrivateLinkResourcesOperations>`
"""
api_version = self._get_api_version('private_link_resources')
if api_version == '2021-10-01':
from .v2021_10_01.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import PrivateLinkResourcesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def quotas(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.QuotasOperations>`
* 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.QuotasOperations>`
* 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.QuotasOperations>`
"""
api_version = self._get_api_version('quotas')
if api_version == '2021-10-01':
from .v2021_10_01.operations import QuotasOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import QuotasOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import QuotasOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'quotas'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def registry_management_non_workspace(self):
"""Instance depends on the API version:
* v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.operations.RegistryManagementNonWorkspaceOperations>`
"""
api_version = self._get_api_version('registry_management_non_workspace')
if api_version == 'v1.0':
from .registry_discovery.operations import RegistryManagementNonWorkspaceOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'registry_management_non_workspace'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def run(self):
"""Instance depends on the API version:
* v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunOperations>`
"""
api_version = self._get_api_version('run')
if api_version == 'v1.0':
from .runhistory.operations import RunOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'run'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def run_artifacts(self):
"""Instance depends on the API version:
* v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunArtifactsOperations>`
"""
api_version = self._get_api_version('run_artifacts')
if api_version == 'v1.0':
from .runhistory.operations import RunArtifactsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'run_artifacts'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def runs(self):
"""Instance depends on the API version:
* v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.operations.RunsOperations>`
"""
api_version = self._get_api_version('runs')
if api_version == 'v1.0':
from .runhistory.operations import RunsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'runs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def spans(self):
"""Instance depends on the API version:
* v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.operations.SpansOperations>`
"""
api_version = self._get_api_version('spans')
if api_version == 'v1.0':
from .runhistory.operations import SpansOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'spans'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def temporary_data_references(self):
"""Instance depends on the API version:
* 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.operations.TemporaryDataReferencesOperations>`
"""
api_version = self._get_api_version('temporary_data_references')
if api_version == '2021-10-01-dataplanepreview':
from .v2021_10_01_dataplanepreview.operations import TemporaryDataReferencesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'temporary_data_references'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def usages(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.UsagesOperations>`
* 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.UsagesOperations>`
* 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.UsagesOperations>`
"""
api_version = self._get_api_version('usages')
if api_version == '2021-10-01':
from .v2021_10_01.operations import UsagesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import UsagesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import UsagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'usages'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_sizes(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.VirtualMachineSizesOperations>`
* 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.VirtualMachineSizesOperations>`
* 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.VirtualMachineSizesOperations>`
"""
api_version = self._get_api_version('virtual_machine_sizes')
if api_version == '2021-10-01':
from .v2021_10_01.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import VirtualMachineSizesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_sizes'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspace_connections(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceConnectionsOperations>`
* 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceConnectionsOperations>`
* 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceConnectionsOperations>`
"""
api_version = self._get_api_version('workspace_connections')
if api_version == '2021-10-01':
from .v2021_10_01.operations import WorkspaceConnectionsOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import WorkspaceConnectionsOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import WorkspaceConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspace_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspace_features(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspaceFeaturesOperations>`
* 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspaceFeaturesOperations>`
* 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspaceFeaturesOperations>`
"""
api_version = self._get_api_version('workspace_features')
if api_version == '2021-10-01':
from .v2021_10_01.operations import WorkspaceFeaturesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import WorkspaceFeaturesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import WorkspaceFeaturesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspace_features'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspaces(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.operations.WorkspacesOperations>`
* 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.operations.WorkspacesOperations>`
* 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.operations.WorkspacesOperations>`
"""
api_version = self._get_api_version('workspaces')
if api_version == '2021-10-01':
from .v2021_10_01.operations import WorkspacesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from .v2022_01_01_preview.operations import WorkspacesOperations as OperationClass
elif api_version == '2022-05-01':
from .v2022_05_01.operations import WorkspacesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspaces'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
| 1.609375 | 2 |
test.py | olivetree123/Winney | 0 | 12795418 | <filename>test.py
from winney.winney import Address
from winney import Winney, retry
from winney.mock import Mock
class UserMock(Mock):
data = {"name": "olivetree"}
class UserCenter(object):
def __init__(self):
addr = Address(host="localhost", port=5000)
self.winney = Winney(host="localhost", port=5000, addrs=[addr])
self.init_functions()
def init_functions(self):
self.winney.register(method="post",
name="login",
uri="/api/login",
mock=False,
mock_data=None)
self.winney.register(method="get",
name="get_user",
uri="/api/user",
mock=True,
mock_data=UserMock())
@retry
def login(self, account, password):
r = self.winney.login(json={"account": account, "password": password})
return r.json()
@retry
def get_user(self, user_id):
r = self.winney.get_user(data={"user_id": user_id})
return r.json()
if __name__ == "__main__":
uc = UserCenter()
uc.login("hello", "123456")
| 2.84375 | 3 |
AWSteria/src_Testbench_AWS/Top/Gen_Bytevec/Gen_Bytevec_Mux.py | zwadood/BESSPIN-CloudGFE | 0 | 12795419 | #!/usr/bin/python3 -B
# Copyright (c) 2020 <NAME>
# See README for details
# ================================================================
import sys
import os
import stat
import importlib
import pprint
from Gen_Bytevec_Mux_BSV import *
from Gen_Bytevec_Mux_C import *
pp = pprint.PrettyPrinter()
# ================================================================
def mkHelp_text (argv):
return "Usage: " + argv [0] + " <spec_file.py>" + '''
<spec_file.py> should be a Python source file defining three variables:
C_to_BSV_structs
BSV_to_C_structs
package_name
The first two are lists of 'struct specs', each of which has the following form:
{ 'struct_name': "Foo",
'fields' : [ { 'field_name' : 'fieldfoo', 'width_bits': width },
...
{ 'field_name' : 'fieldfoo', 'width_bits': width } ]}
Struct names should be globally unique.
Field names should be unique within a struct.
It is ok for a field-width to be 0 (e.g., unused 'user' field in an AXI channel).
Generates three output files:
package_name.bsv
package_name.h
package_name.c
The C/BSV code contains:
Struct defs for each struct, where each field has type:
BSV: Bit #(w) where w is the specified bit-width
C: uint8_t, uint16_t, uint32_t or uint64_t, as appropriate, if width <= 64 bits,
uint8_t [..] if wider
A 'state' struct containing queues and communication 'credits' for each struct type,
Functions for C application code to enqueue each type of send-struct into a pending queue
Functions for C application code to dequeue each type of receive-struct from a pending queue
A function for the C application code to encode an already
queued send-struct into a bytevec ready for transmission
A function for the C application code to decode a received
bytevec into a queued receive-struct
'''
# ================================================================
def main (argv = None):
if ((len (argv) != 2)
or (argv [1] == "-h")
or (argv [1] == "--help")):
sys.stdout.write (mkHelp_text (argv))
return 0
spec_filename = argv [1]
if spec_filename.endswith (".py"):
spec_filename = spec_filename [:-3]
try:
# Warning:
# This dynamic import of the spec_filename spec file is fragile (only works if both
# this Python executable and spec_filename.py are in the current dir.
# Study importlib examples where there is some notion of 'finding' from a path etc.
spec = importlib.import_module (spec_filename) # ("type_specs")
except:
sys.stdout.write ("ERROR: unable to import module '{:s}'\n".format (spec_filename))
sys.exit (1)
sys.stdout.write ("Spec file imported: '{:s}'\n".format (spec_filename))
package_name = spec.package_name
sys.stdout.write ("Package name: '{:s}'\n".format (package_name))
# Compute all necessary byte-widths for transmission and C structs
# Each of the 'field' structs extends with 'width_bytes' and 'dimension'
sys.stdout.write ("Computing all necessary byte-widths for packet formats and C structs.\n")
C_to_BSV_structs = [compute_width_bytes (s) for s in spec.C_to_BSV_structs]
BSV_to_C_structs = [compute_width_bytes (s) for s in spec.BSV_to_C_structs]
# Data structure for different parts of a packet: C to BSV
max_C_to_BSV_struct_bytes = max ([ s ['size_bytes'] for s in C_to_BSV_structs ])
C_to_BSV_packet_bytes = { 'packet_len' : 1,
'num_credits' : len (BSV_to_C_structs),
'channel_id' : 1,
'payload' : max_C_to_BSV_struct_bytes }
# Data structure for different parts of a packet: BSV to C
max_BSV_to_C_struct_bytes = max ([ s ['size_bytes'] for s in BSV_to_C_structs ])
BSV_to_C_packet_bytes = { 'packet_len' : 1,
'num_credits' : len (C_to_BSV_structs),
'channel_id' : 1,
'payload' : max_BSV_to_C_struct_bytes }
# Generate the .bsv file
Gen_BSV (spec_filename,
package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes)
# Generate .h and .c files
Gen_C (spec_filename,
package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes)
return 0
# ================================================================
# This is a struct spec -> struct spec function
# In struct_spec_in, each field spec has attributes 'field_name' and 'width_bits'
# In struct_spec_out, we add attributes 'width_bytes' and 'dimension'
# and we add struct attribute 'size_bytes' for total # of bytes
# Fields <= 64b wide, fit in C scalars (uint8_t/uint16_t/uint32_t/uint64_t)
# have dimension 1 and width_bytes of 1,2,4 or 8
# Larger fields are represented in C as uint8_t [N]
# have dimension N and width_bytes 1
def compute_width_bytes (struct_spec_in):
fields_out = []
size_bytes = 0
for f in struct_spec_in ['fields']:
field_name = f ['field_name']
width_bits = f ['width_bits']
width_bytes = 0
dimension = 1;
if (width_bits == 0):
width_bytes = 0
elif (width_bits <= 8):
width_bytes = 1
elif (width_bits <= 16):
width_bytes = 2
elif (width_bits <= 32):
width_bytes = 4
elif (width_bits <= 64):
width_bytes = 8
else:
width_bytes = 1
dimension = (width_bits + 7) // 8
field_out = {'field_name' : field_name,
'width_bits' : width_bits,
'width_bytes': width_bytes,
'dimension' : dimension}
fields_out.append (field_out)
size_bytes += width_bytes * dimension
struct_spec_out = {'struct_name': struct_spec_in ['struct_name'],
'fields' : fields_out,
'size_bytes' : size_bytes}
return struct_spec_out
# ================================================================
# For non-interactive invocations, call main() and use its return value
# as the exit code.
if __name__ == '__main__':
sys.exit (main (sys.argv))
| 2.234375 | 2 |
worker/statistics/StatisticsPrinter.py | Larcius/gta5-modder-utils | 3 | 12795420 | <reponame>Larcius/gta5-modder-utils
import os
import re
from natsort import natsorted
from common.ymap.LodLevel import LodLevel
from common.ytyp.YtypItem import YtypItem
from common.ytyp.YtypParser import YtypParser
class StatisticsPrinter:
countProps: dict[str, dict[str, int]]
inputDir: str
ytypItems: dict[str, YtypItem]
def __init__(self, inputDir: str):
self.inputDir = inputDir
def run(self):
self.readYtypItems()
self.countProps = {}
self.processFiles()
def readYtypItems(self):
self.ytypItems = YtypParser.readYtypDirectory(os.path.join(os.path.dirname(__file__), "..", "..", "resources", "ytyp"))
def processFiles(self):
for filename in natsorted(os.listdir(self.inputDir)):
if not filename.endswith(".ymap.xml") or filename.endswith("_lod.ymap.xml"):
continue
f = open(os.path.join(self.inputDir, filename), 'r')
content = f.read()
expression = '<Item type="CEntityDef">' + \
'\\s*<archetypeName>([^<]+)</archetypeName>' + \
'(?:\\s*<[^/].*>)*?' + \
'\\s*<lodLevel>(?:' + LodLevel.HD + "|" + LodLevel.ORPHAN_HD + ')</lodLevel>' + \
'(?:\\s*<[^/].*>)*?' + \
'\\s*</Item>'
for match in re.finditer(expression, content):
archetypeName = match.group(1).lower()
if archetypeName in self.ytypItems:
ytypName = self.ytypItems[archetypeName].parent
else:
ytypName = "others"
# if not tree.startswith("prop_s_pine_") and not tree.startswith("prop_tree_") and not tree.startswith("prop_w_r_cedar_") and not tree.startswith("test_tree_"):
# continue
if ytypName not in self.countProps:
self.countProps[ytypName] = {}
if archetypeName not in self.countProps[ytypName]:
self.countProps[ytypName][archetypeName] = 0
self.countProps[ytypName][archetypeName] += 1
totalCount = 0
ytypCounts = {}
for ytyp in natsorted(list(self.countProps.keys())):
ytypCounts[ytyp] = 0
print(ytyp + ":")
for prop in natsorted(list(self.countProps[ytyp])):
num = self.countProps[ytyp][prop]
ytypCounts[ytyp] += num
print("\t" + prop + ":\t\t" + str(num))
totalCount += ytypCounts[ytyp]
print("\t----------------------------------------------")
print("\t" + ytyp + " total:\t\t" + str(ytypCounts[ytyp]) + "\n")
print("\nsummary:")
for ytyp in natsorted(list(ytypCounts.keys())):
print(ytyp + ":\t\t" + str(ytypCounts[ytyp]))
print("----------------------------------------------")
print("total:\t\t" + str(totalCount))
| 2.171875 | 2 |
data.py | ilyakava/tfST | 0 | 12795421 | <gh_stars>0
import os
import numpy as np
import scipy.io as sio
def get_dataset(opt):
if opt.dataset == 'IP':
mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_corrected.mat'))
data = mat_contents['indian_pines_corrected'].astype(np.float32)
data /= np.max(np.abs(data))
mat_contents = sio.loadmat(os.path.join(opt.data_root, 'Indian_pines_gt.mat'))
labels = mat_contents['indian_pines_gt']
else:
raise NotImplementedError('dataset: %s' % opt.dataset)
return data, labels
| 2.5 | 2 |
callflow/modules/histogram_rank.py | jarusified/CallFlow | 2 | 12795422 | # Copyright 2017-2020 Lawrence Livermore National Security, LLC and other
# CallFlow Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import pandas as pd
class RankHistogram:
def __init__(self, state, name):
self.graph = state.new_gf.graph
self.df = state.new_gf.df
self.entire_df = state.new_entire_gf.df
self.name = name
self.entry_funcs = {}
self.result = self.run()
def run(self):
ret = []
module = self.name.split("=")[0]
func_in_module = self.df[self.df.module == module]["name"].unique().tolist()
for idx, func in enumerate(func_in_module):
ret.append(
{
"name": func,
"time (inc)": self.df.loc[self.df["name"] == func][
"time (inc)"
].tolist(),
"time": self.df.loc[self.df["name"] == func]["time"].tolist(),
"rank": self.df.loc[self.df["name"] == func]["rank"].tolist(),
"dataset": self.df.loc[self.df["name"] == func]["dataset"].tolist(),
}
)
ret_df = pd.DataFrame(ret)
return ret_df.to_json(orient="columns")
| 2.40625 | 2 |
test_loader.py | hasancaslan/CountingBeautifulStrings | 0 | 12795423 | import os
def read_test_case(file_path):
"""
reads one test case from file.
returns contents of test case
Parameters
----------
file_path : str
the path of the test case file to read.
Returns
-------
list
a list of contents of the test case.
"""
file = open(file_path, "r")
number = int(file.readline().strip())
case = list()
for i in range(number):
case.append(file.readline().strip())
file.close()
return case
def load_test_cases(dir, file_name):
"""
loads one test case from file.
returns a map contents of all test cases.
Parameters
----------
dir : str
directory of the files to load.
file_name : str
the name of the file that contains all
test case files name to read.
Returns
-------
dict
a dict of contents of all test cases.
"""
path = os.path.join(dir, file_name)
test_cases_files = open(path, "r")
test_cases = dict()
for file_name in test_cases_files.readlines():
case_name = file_name.strip().split(".")[0]
file_path = os.path.join(dir, file_name.strip())
test_cases[case_name] = read_test_case(file_path)
test_cases_files.close()
return test_cases
| 3.65625 | 4 |
Q/questionnaire/views/views_errors.py | trubliphone/esdoc-test | 0 | 12795424 | <filename>Q/questionnaire/views/views_errors.py
####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.shortcuts import render
from Q.questionnaire import q_logger
def q_error(request, error_msg="", status_code=400):
# print error_msg...
q_logger.error(error_msg)
# gather all the extra information required by the template...
context = {
"error_msg": error_msg,
"status_code": status_code,
}
return render(request, "questionnaire/q_error.html", context=context, status=status_code)
# def q_400(request):
# context = {
# "error_msg": "bad request",
# }
# return render(request, "questionnaire/q_error.html", context=context, status=400)
#
#
# def q_403(request):
# context = {
# "error_msg": "permission_denied",
# }
# return render(request, "questionnaire/q_error.html", context=context, status=403)
def q_404(request):
context = {}
return render(request, "questionnaire/q_404.html", context=context, status=404)
def q_500(request):
context = {}
return render(request, "questionnaire/q_500.html", context=context, status=404)
| 2.03125 | 2 |
python/lusol.py | Rioghasarig/sr-cur | 0 | 12795425 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 8 12:18:32 2022
@author: oekenta
"""
from ctypes import c_ulonglong, c_double, cdll, byref
import numpy as np
class lusol:
liblusol = 0
@classmethod
def loadlibrary(cls):
cls.liblusol = cdll.LoadLibrary('/home/grad/oekenta/sr-cur/src/libclusol.so')
def __init__(self, A : np.array ):
# LUSOL input parameters
self.rank = 0
self.maxcol = 0
self.pivot = 0
self.keepLU = 0
self.Ltol1 = 0
self.Ltol2 = 0
self.small = 0
self.Utol1 = 0
self.Utol2 = 0
self.Uspace = 0
self.dens1 = 0
self.dens2 = 0
#LU1FAC Inputs
self.m = c_ulonglong(A.shape[0])
self.n = c_ulonglong(A.shape[1])
self.nelem = c_ulonglong(np.count_nonzero(A))
self.lena = c_ulonglong(10000)
self.ap = c_ulonglong*A.shape[0]
self.aq = c_ulonglong*A.shape[1]
def factorize():
A = np.array([[1,2],[3,4]])
l = lusol(A)
l.loadlibrary()
| 2.1875 | 2 |
reboot_required/tests/test_reboot_required.py | divyamamgai/integrations-extras | 158 | 12795426 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from os.path import isfile
def test_ok(aggregator, check, instance_ok):
assert isfile(instance_ok['created_at_file'])
check.check(instance_ok)
aggregator.assert_service_check('system.reboot_required', status=check.OK)
def test_not_present_ok(aggregator, check, instance_not_present):
assert not isfile(instance_not_present['created_at_file'])
check.check(instance_not_present)
aggregator.assert_service_check('system.reboot_required', status=check.OK)
def test_warning(aggregator, check, instance_warning):
check.check(instance_warning)
aggregator.assert_service_check('system.reboot_required', status=check.WARNING)
def test_critical(aggregator, check, instance_critical):
check.check(instance_critical)
aggregator.assert_service_check('system.reboot_required', status=check.CRITICAL)
| 2.0625 | 2 |
src/network/assemble.py | BeholdersEye/PyBitmessage | 1,583 | 12795427 | <gh_stars>1000+
"""
Create bitmessage protocol command packets
"""
import struct
import addresses
from network.constants import MAX_ADDR_COUNT
from network.node import Peer
from protocol import CreatePacket, encodeHost
def assemble_addr(peerList):
"""Create address command"""
if isinstance(peerList, Peer):
peerList = [peerList]
if not peerList:
return b''
retval = b''
for i in range(0, len(peerList), MAX_ADDR_COUNT):
payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT]))
for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]:
# 64-bit time
payload += struct.pack('>Q', timestamp)
payload += struct.pack('>I', stream)
# service bit flags offered by this node
payload += struct.pack('>q', 1)
payload += encodeHost(peer.host)
# remote port
payload += struct.pack('>H', peer.port)
retval += CreatePacket('addr', payload)
return retval
| 2.53125 | 3 |
Swin-Transformer/Model.py | HzcIrving/DLRL_PlayGround | 27 | 12795428 | #! /usr/bin/enc python
# -*- coding: utf-8 -*-
# author: <NAME>
# email: <EMAIL>
"""
Swin Transformer
1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍;
这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。
2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head
Self-Attention只在每个Windows内部进行。相对于ViT直接对整个Global进行MSA,计算复杂度更低;但是会隔绝不同
窗口之间的信息传递,通过Shifted Windows Multi-head Self-Atten来让信息在相邻窗口进行传递。
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from typing import Optional
from BasicModule import PatchMerging, DropPath, PatchEmbed
from BasicModule import Mlp
from BasicModule import window_partition, window_reverse
"""SwinT
window_size = 7
img_size = 224
Trained ImageNet-1k
depths->2,2,6,2
"""
def swin_tiny_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
num_classes=num_classes,
**kwargs)
return model
"""Swin-S
depths->2,2,18,2
"""
def swin_small_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
num_classes=num_classes,
**kwargs)
return model
"""Swin-B"""
def swin_base_patch4_window7_224(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window12_384(num_classes: int = 1000, **kwargs):
# trained ImageNet-1K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
def swin_base_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
num_classes=num_classes,
**kwargs)
return model
"""Swin-Large"""
def swin_large_patch4_window7_224_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=7,
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
num_classes=num_classes,
**kwargs)
return model
def swin_large_patch4_window12_384_in22k(num_classes: int = 21841, **kwargs):
# trained ImageNet-22K
# https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth
model = SwinTransformer(in_chans=3,
patch_size=4,
window_size=12,
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
num_classes=num_classes,
**kwargs)
return model
"""Swin Transformer"""
class SwinTransformer(nn.Module):
"""Swin Transformer结构
这里有个不同之处,就是每个Stage Layer中,
"""
def __init__(self, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
# 输出特征矩阵的Channels (C)
# H/4 x W/4 x 48 -> H/4 x W/4 x C(Stage1) -> H/8 x W/8 x 2C(Stage2) -> H/16 x W/16 x 4C(stage3) ...
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# 将image切分为不重合的Patches
# input: (Bs, 224, 224, 3)
# output: (e.g patch_size=4: Bs, 56x56, 4x4x3)
self.patch_embed = PatchEmbed(
patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
# Drop Path
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# bulid layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
# 注意这里构建的stage和论文图中有些差异
# 这里的stage不包含该stage的patch_merging层,包含的是下个stage的
layers = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layers)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self,x):
# x:[B, L, C]
x,H,W = self.patch_embed(x)
x = self.pos_drop(x)
# 多尺度分层Multi-Stage
for layer in self.layers:
x,H,W = layer(x,H,W)
x = self.norm(x) # [B, L, C]
x = self.avgpool(x.transpose(1, 2)) # [B, C, 1]
x = torch.flatten(x, 1)
x = self.head(x) # 分类头
return x
"""一个Stage内的基本SwinTransformer模块"""
class BasicLayer(nn.Module):
"""
One Stage SwinTransformer Layer包括:
"""
def __init__(self, dim, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
"""
Args:
dim (int): Number of input channels.
depth (int): Number of blocks. block数量
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
super(BasicLayer, self).__init__()
self.dim = dim
self.depth = depth
self.window_size = window_size
self.use_checkpoint = use_checkpoint # pre-trained
self.shift_size = window_size // 2
# 构建SwinTransformer Block
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else self.shift_size, #当i为偶,就是W-MSA,i为奇,就是SW-MSA,与论文一致, 保证窗口之间通信
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# Patch Merging Layer 类似于Pooling下采样
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def create_mask(self,x,H,W):
"""
SW-MSA后,对于移位后左上角的窗口(也就是移位前最中间的窗口)来说,里面的元素都是互相紧挨着的,
他们之间可以互相两两做自注意力,但是对于剩下几个窗口来说,它们里面的元素是从别的很远的地方搬过来的,
所以他们之间,按道理来说是不应该去做自注意力,也就是说他们之间不应该有什么太大的联系
以14x14个patch为例进行
H: Feature Map Height
W: Feature Map Width
x: Feature Map
"""
# 为SW-MSA计算Attention Mask.
# 保证Hp和Wp是window_size的整数倍
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
# 拥有和feature map一样的通道排列顺序,方便后续window_partition
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # [1, Hp, Wp, 1]
# 准备进行区域生成,方便生成Mask
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
# 区域编码
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# Shift Window 混合区域的窗口分割
mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1]
mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw]
# 掩码生成
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1]
# [nW, Mh*Mw, Mh*Mw]
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self,x,H,W):
# [nW, Mh*Mw, Mh*Mw] nW:窗口数
attn_mask = self.create_mask(x,H,W)
for blk in self.blocks:
blk.H, blk.W = H, W # self.H = H, self.W = W
if not torch.jit.is_scripting() and self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x = self.downsample(x, H, W)
H, W = (H + 1) // 2, (W + 1) // 2 # DownSample之后,H,W应该减半
return x, H, W
"""一个基本的SwinTransformerBlock的构成Model"""
class SwinTransformerBlock(nn.Module):
"""
Swin Transformer Block包括:
Feature Map Input -> LayerNorm -> SW-MSA/W-MSA -> LayerNorm-> MLP -------->
|--------------------------------------||----------------------|
"""
def __init__(self, dim, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
"""
Args参数定义:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super(SwinTransformerBlock, self).__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
# shift_size必须小于windows_size
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0~window_size"
# LN1
self.norm1 = norm_layer(dim)
# Windows_Multi-head Self Attention
self.attn = WindowsAttention(
dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# LN2
self.norm2 = norm_layer(dim)
# MLP Layer
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, attn_mask):
# feature map的Height & Width
H, W = self.H, self.W
# Batch, length, channel
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
# Skip Connect
shortcut = x
x = self.norm1(x)
# reshape feature map
x = x.view(B, H, W, C)
# 对feature map进行pad,pad到windows size的整数倍
pad_l = 0
pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x,(0,0,pad_l,pad_r,pad_t,pad_b))
# Hp, Wp代表pad后的feature map的Height和Width
_, Hp, Wp, _ = x.shape
# 是W-MSA 还是 SW-MSA ?
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
attn_mask = None
# 窗口划分
# Windows Partition
x_windows = window_partition(shifted_x,self.window_size) #[nW*B, Mh, Mw, C]
x_windows = x_windows.view(-1, self.window_size*self.window_size,C) # [nW*B, Mh*Mw, C]
# W-MSA / SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C]
# 将分割的Windows进行还原
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # [nW*B, Mh, Mw, C]
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C]
# 如果是SW-MSA,需要逆shift过程
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
# 移除Pad数据
if pad_r > 0 or pad_b > 0:
# 把前面pad的数据移除掉
x = x[:, :H, :W, :].contiguous()
x = x.view(B,H*W,C)
# FFN
# 两个Skip Connect
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class WindowsAttention(nn.Module):
"""
Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
VIT中注意力是全局的,复杂度随着图片尺寸的增加指数增加,样当去做视觉里的下游任务,尤其是密集
预测型的任务,或者说遇到非常大尺寸的图片时候,这种全局算自注意力的计算复杂度就非常贵了
SwinTransformer中,采用Windows-based Attention来将计算复杂度与图片尺寸的关系变为线性关系。
General Model:
W-MSA / SW-MSA
Shift 操作但如果加上 shift 的操作,每个 patch 原来只能跟它所在的窗口里的别的 patch 进行
交互,但是 shift 之后,这个 patch就可以跟新的窗口里的别的 patch就进行交互了,而这个新的窗
口里所有的 patch 其实来自于上一层别的窗口里的 patch,这也就是作者说的能起到
cross-window connection,就是窗口和窗口之间可以交互了
上述过程配合之后的Patch Merging,合并到Transformer最后几层的时候,每一个patch本身的感受
野就已经很大了。
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
"""
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
# Mh: Windows Size Height
# Mw: Windows Size Width
# nH: num_heads
super(WindowsAttention, self).__init__()
self.dim = dim
self.window_size = window_size # [Mh, Mw]
self.num_heads = num_heads
head_dim = dim // num_heads # 每个head的dim
self.scale = head_dim ** -0.5 # scale
# 定义一个parameter table来存放relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH]
# 相对位置索引获得方法
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij")) # [2, Mh, Mw]
coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw]
# [2, Mh*Mw, 1] - [2, 1, Mh*Mw]
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw]
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2]
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw]
# Register_buffer: 应该就是在内存中定一个常量,同时,模型保存和加载的时候可以写入和读出。
# 不需要学习,但是可以灵活读写
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self,x,mask=None):
"""
Args:
x: input features with shape of (num_windows*B, Mh*Mw, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
x的输入维度是(num_windows窗口数*Batch Size)
在窗口内进行Attention Op
"""
# [batch_size*num_windows, Mh*Mw, total_embed_dim]
B_, N, C = x.shape
# qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim]
# reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head]
# permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
q,k,v = qkv.unbind(0)
# QK^T/sqrt(d)
# transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw]
# @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
# QK^T/sqrt(d) + B
# B:
# relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH]
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw]
# [Bs*nW, nH, Mh*Mw, Mh*Mw]
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
# SW-MSA 需要做attention Mask
# mask: [nW, Mh*Mw, Mh*Mw]
# attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw]
# # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
# @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
# transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head]
# reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim]
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
if __name__ == "__main__":
pass | 2.328125 | 2 |
day04/test12.py | jaywoong/python | 0 | 12795429 | <reponame>jaywoong/python
st1 = '<EMAIL>'
print(len(st1))
print(st1.find('.'))
print(st1.rfind('.'))
print(st1.count('.'))
id = st1[:st1.find('@')]
print(id)
domain = st1[st1.find('@')+1:st1.find('.')]
print(domain) | 3.234375 | 3 |
src/python/reeborg_en.py | aroberge/reeborg-docs | 2 | 12795430 | """This module contains functions, classes and exceptions that can be
included in a Python program for Reeborg's World.
"""
# When generating documentation using sphinx, these modules are both
# unavailable and not needed
try:
from browser import window
RUR = window.RUR
except:
print("\n --> Skipping importing from browser for sphinx.\n")
# All functions from Javascript used below should have names of the form
# RUR._xyz_ and be defined in commands.js and methods should have names of
# the form RUR._UR.xyz_; functions and methods should appear
# alphabetically in this English version, with the exception of Python-specific
# functions or classes that should appear near the end.
def at_goal(): #py:at_goal
"""Indicate if Reeborg has reached the desired location.
Returns:
True if Reeborg has reached its goal, False otherwise.
"""
return RUR._at_goal_()
def build_wall(): #py:build_wall
"""Instructs Reeborg to build a wall at the location in front of itself."""
RUR._build_wall_()
def carries_object(obj=None): #py:carries_object
"""Indicates whether Reeborg carries an object or not.
Args:
obj: optional parameter which is the name of an object as a string.
Returns:
a list of the type of objects carried by Reeborg.
If Reeborg carries no object, or not the specified one,
the result is an empty list.
Examples:
>>> carries_object()
["token", "apple"]
>>> carries_object("token")
["token"]
>>> carries_object("banana")
[]
"""
if obj is not None:
ans = RUR._carries_object_(obj)
else:
ans = RUR._carries_object_()
return list(ans)
def clear_print(): #py:clear_print
"""Erase all the text previously written using a call to print()."""
RUR._clear_print_()
def color_here(): #py:color_here
return RUR._color_here_()
def default_robot(): #py:default_robot
"""Returns a recreated version of the default robot."""
class Robot(UsedRobot):
def __init__(self):
self.body = RUR._default_robot_body_()
return Robot()
def dir_js(obj): #py:dir_js
"""Lists attributes and methods of a Javascript object."""
# do not translate the name of this function
RUR._dir_js_(obj)
def done(): #py:done
"""Causes a program's execution to end."""
RUR._done_()
def front_is_clear(): #py:front_is_clear
"""Indicates if an obstacle (wall, fence, water, etc.) blocks the path.
Returns:
True if the path is clear (not blocked), False otherwise.
"""
return RUR._front_is_clear_()
def is_facing_north(): #py:is_facing_north
"""Indicates if Reeborg is facing North (top of the screen) or not."""
return RUR._is_facing_north_()
def in_the_bag(): #py:in_the_bag
return dict(RUR._in_the_bag_())
def move(): #py:move
"""Move forward, by one grid position."""
RUR._move_()
def new_robot_images(images): #py:new_robot_images
"""Allow to replace the images used for the robot. More details will
be provided soon.
"""
RUR._new_robot_images_(images)
def no_highlight(): #py:no_highlight
"""Prevents code highlighting from occurring.
This function has a similar effect to clicking the corresponding
button in Reeborg's World.
Code highlighting occurs thanks to some extra code inserted in a
user's program prior to execution. When disabling highlighting
using this function, the extra instructions are still present,
but they will not be if the program is run a second time.
"""
RUR._no_highlight_()
def object_here(obj=None): #py:object_here
"""Indicates whether any type of objects are present at Reeborg's location.
Args:
obj: optional parameter which is the name of an object as a string.
Returns:
a list of the type of objects found. If no object is present,
or if the specified object is not found, the result is an empty list.
Examples:
>>> object_here()
["token", "apple"]
>>> object_here("token")
["token"]
>>> object_here("banana")
[]
"""
if obj is not None:
ans = RUR._object_here_(obj)
else:
ans = RUR._object_here_()
return list(ans) # convert from JS list-like object to proper Python list
def paint_square(color): #py:paint_square
RUR._paint_square_(color)
def pause(ms=None): #py:pause
"""Pauses a program's execution (playback).
If an argument (time in milliseconds) is given, the execution
automatically resumes after this time has elapsed.
"""
if ms is None:
RUR._pause_()
else:
RUR._pause_(ms)
def print_html(html, append=False): #py:print_html
"""Intended primarily for world creators, this function is similar to
print() except it can make use of html input.
"""
RUR._print_html_(html, append)
window['print_html'] = print_html # No translation needed
def put(obj=None): #py:put
"""Puts down an object. If Reeborg carries more than one type of objects,
the type must be specified as an argument, otherwise an exception
will be raised.
"""
if obj is None:
RUR._put_()
else:
RUR._put_(obj)
def recording(bool): #py:recording
"""Stops or starts recording changes occuring in the world.
Args:
bool: True if recording is desired, False otherwise.
"""
RUR._recording_(bool)
def remove_robots(): #py:remove_robots
"""Remove all robots found in the world."""
RUR._remove_robots_()
def right_is_clear(): #py:right_is_clear
"""Indicates if an obstacle (wall, fence, water, etc.) is on the
immediate right of Reeborg.
Returns:
True if an obstacle is on Reeborg's right, False otherwise.
"""
return RUR._right_is_clear_()
def set_max_nb_instructions(nb): #py:set_max_nb_instructions
"""Intended primarily for world creators, this function allows
to change the default maximum number of instructions executed in a
program (1000) by a different value.
"""
RUR._set_max_nb_instructions_(nb)
def set_max_nb_robots(nb): #py:set_max_nb_robots
"""Intended primarily for world creators, this function
allows to set the maximum number of robots allowed in a given world.
"""
RUR._set_max_nb_robots_(nb)
def set_trace_color(color): #py:set_trace_color
"""Change the color of the trace (oil leak).
Args:
color (string): four formats are possible: named color,
rgb and rgba, and hexadecimal notation.
Examples::
>>> set_trace_color("red")
>>> set_trace_color("rgb(125, 0, 0)")
>>> set_trace_color("rgba(125, 0, 0, 0.5)")
>>> set_trace_color("#FF00FF")
"""
RUR._set_trace_color_(color)
def set_trace_style(style="default"): #py:set_trace_style
"""Change the trace style of the robot.
Args:
style: "thick", "invisible" and "default" are the three possible
arguments. "invisible" is equivalent to
set_trace_color("rgba(0, 0, 0, 0)"), that is it sets
the colour to a completely transparent value.
The "thick" style is centered on the path followed,
so that it is impossible to distinguish between motion
to the left or to the right, and right handed turns
appear to be done all at once, if one only looks at the
trace.
"""
if style not in ["thick", "default", "invisible"]:
raise ReeborgError("Unrecognized style in set_trace_style().")
RUR._set_trace_style_(style)
def sound(bool): #py:sound
"""Activate or deactivate sound effects."""
RUR._sound_(bool)
def take(obj=None): #py:take
"""Takes an object. If more than one type of objects is at Reeborg's location,
the type must be specified as an argument, otherwise an exception
will be raised.
"""
if obj is None:
RUR._take_()
else:
RUR._take_(obj)
def think(ms): #py:think
"""Set a time delay (in milliseconds) between Reeborg's actions
played back.
"""
RUR._think_(ms)
def turn_left(): #py:turn_left
"""Reeborg turns to its left."""
RUR._turn_left_()
def view_source_js(fn): #py:view_source_js
"""Shows the source code of a Javascript function."""
RUR._view_source_js_(fn)
def wall_in_front(): #py:wall_in_front
"""Indicates if a wall blocks the way.
Returns:
True if the path blocked by a wall, False otherwise.
"""
return RUR._wall_in_front_()
def wall_on_right(): #py:wall_on_right
"""Indicates if an wall is on the immediate right of Reeborg.
Returns:
True if a wall is on Reeborg's right, False otherwise.
"""
return RUR._wall_on_right_()
def MakeCustomMenu(content): #py:MakeCustomMenu
"""Designed for use by educators. Makes it possible to create custom world
menus. See the documentation for more details.
"""
RUR._MakeCustomMenu_(content)
def World(url, shortname=None): #py:World
"""Allow to select a specific world within a program.
If the world currently shown is different than the one selected by
using this function, the result of running the program will simply
be to change the world - the rest of the program will be ignored.
If the desired world is already selected, this command is ignored
and the rest of the program is executed.
If the world is not already present in the html selector,
it will be added.
Args:
url: two possible choices: either a name appearing in the html
selector, or a URL ("link") to a world defined on some
website.
shortname: Optional parameter; if specified, this will be the
name shown in the html selector.
Examples:
>>> World("Home 1") # world included by default
>>> World("http://reeborg.ca/my_world") # fictitious example
# the name http://reeborg.ca/my_world will be added to the selector
>>> World("http://reeborg.ca/my_world", "Hello")
# The name "Hello" will be shown in the selector instead
# of the full url
"""
if shortname is None:
RUR._World_(url)
else:
RUR._World_(url, shortname)
class UsedRobot(object): #py:UR
def __init__(self, x=1, y=1, orientation='e', tokens=None): #py:UR.__init__
"""Creates a UsedRobot.
Args:
x: horizontal coordinate; an integer greater or equal to 1.
y: vertical coordinate; an integer greater or equal to 1.
orientation (string):,
one of "e" or "east",
"w" or "west", "n" or "north", "s" or "south".
tokens: Initial number of tokens to give to the robot;
its value must be a positive integer, or the string
"inf" to indicate an infinite quantity.
"""
if tokens is None:
robot = RUR.robot.create_robot(x, y, orientation)
else:
robot = RUR.robot.create_robot(x, y, orientation, tokens)
self.body = robot
RUR.world.add_robot(self.body)
def __str__(self): #py:UR.__str__
location = "({}, {})".format(self.body.x, self.body.y)
if self.body._orientation == RUR.EAST:
facing = "facing East"
elif self.body._orientation == RUR.WEST:
facing = "facing West"
elif self.body._orientation == RUR.NORTH:
facing = "facing North"
elif self.body._orientation == RUR.SOUTH:
facing = "facing South"
if 'token' in self.body.objects:
if self.body.objects['token'] == 'inf':
carries = "carries an infinite number of tokens."
else:
carries = 'carries %s tokens' % self.body.objects['token']
else:
carries = 'carries no tokens'
return "UsedRobot at {} {} {}.".format(location, facing, carries)
def at_goal(self): #py:UR.at_goal
"""Indicate if Reeborg has reached the desired location.
Returns:
True if Reeborg has reached its goal, False otherwise.
"""
return RUR._UR.at_goal_(self.body)
def build_wall(self): #py:UR.build_wall
"""Instructs Reeborg to build a wall at the location in
front of itself.
"""
RUR._UR.build_wall_(self.body)
def carries_object(self, obj=''): #py:UR.carries_object
"""Indicates whether Reeborg carries an object or not.
Args:
obj: optional parameter which is the name of an object as a string.
Returns:
a list of the type of objects carried by Reeborg.
If Reeborg carries no object, or not the specified one,
the result is an empty list.
Examples:
>>> reeborg = UsedRobot()
>>> reeborg.carries_object()
["token", "apple"]
>>> reeborg.carries_object("token")
["token"]
>>> reeborg.carries_object("banana")
[]
"""
if obj is not None:
return list(RUR._UR.carries_object_(self.body, obj))
else:
return list(RUR._UR.carries_object_(self.body))
def front_is_clear(self): #py:UR.front_is_clear
"""Indicates if an obstacle (wall, fence, water, etc.) blocks the path.
Returns:
True if the path is clear (not blocked), False otherwise.
"""
return RUR._UR.front_is_clear_(self.body)
def in_the_bag(self): #py:UR.in_the_bag
return dict(RUR._UR.in_the_bag_(self.body))
def is_facing_north(self): #py:UR.is_facing_north
"""Indicates if Reeborg is facing North (top of the screen) or not."""
return RUR._UR.is_facing_north_(self.body)
def move(self): #py:UR.move
"""Move forward, by one grid position."""
RUR._UR.move_(self.body)
def object_here(self, obj=None): #py:UR.object_here
"""Indicates whether any type of objects are present at Reeborg's location.
Args:
obj: optional parameter which is the name of an object as a string.
Returns:
a list of the type of objects found. If no object is present,
or if the specified object is not found, the result is an
empty list.
Examples:
>>> reeborg = UsedRobot()
>>> reeborg.object_here()
["token", "apple"]
>>> reeborg.object_here("token")
["token"]
>>> reeborg.object_here("banana")
[]
"""
if obj is not None:
return list(RUR._UR.object_here_(self.body, obj))
else:
return list(RUR._UR.object_here_(self.body))
def put(self, obj=None): #py:UR.put
"""Puts down an object. If Reeborg carries more than one type of objects,
the type must be specified as an argument, otherwise an exception
will be raised.
"""
if obj is None:
RUR._UR.put_(self.body)
else:
RUR._UR.put_(self.body, obj)
def right_is_clear(self): #py:UR.right_is_clear
"""Indicates if an obstacle (wall, fence, water, etc.) is on the
immediate right of Reeborg.
Returns:
True if an obstacle is on Reeborg's right, False otherwise.
"""
return RUR._UR.right_is_clear_(self.body)
def set_model(self, model): #py:UR.set_model
"""Select the model (images) for the robot.
Args:
model: a number between 0 and 3.
"""
RUR._UR.set_model_(self.body, model)
def set_trace_color(self, color): #py:UR.set_trace_color
"""Change the color of the trace (oil leak).
Args:
color (string): four formats are possible: named color,
rgb and rgba, and hexadecimal notation.
Examples::
>>> reeborg = UsedRobot()
>>> reeborg.set_trace_color("red")
>>> reeborg.set_trace_color("rgb(125, 0, 0)")
>>> reeborg.set_trace_color("rgba(125, 0, 0, 0.5)")
>>> reeborg.set_trace_color("#FF00FF")
"""
RUR._UR.set_trace_color_(self.body, color)
def set_trace_style(self, style): #py:UR.set_trace_style
"""Change the trace style of the robot.
Args:
style: "thick", "invisible" and "default" are the three
possible arguments. "invisible" is equivalent to
set_trace_color("rgba(0, 0, 0, 0)"), that is it sets
the colour to a completely transparent value.
The "thick" style is centered on the path followed,
so that it is impossible to distinguish between motion
to the left or to the right, and right handed turns
appear to be done all at once, if one only looks at the
trace.
"""
if style not in ["thick", "default", "invisible"]:
raise ReeborgError("Unrecognized style in set_trace_style().")
RUR._UR.set_trace_style_(self.body, style)
def take(self, obj=None): #py:UR.take
"""Takes an object. If more than one type of objects is at Reeborg's location,
the type must be specified as an argument, otherwise an exception
will be raised.
"""
if obj is None:
RUR._UR.take_(self.body)
else:
RUR._UR.take_(self.body, obj)
def turn_left(self): #py:UR.turn_left
"""Reeborg turns to its left."""
RUR._UR.turn_left_(self.body)
def wall_in_front(self): #py:UR.wall_in_front
"""Indicates if a wall blocks the way.
Returns:
True if the path blocked by a wall, False otherwise.
"""
return RUR._UR.wall_in_front_(self.body)
def wall_on_right(self): #py:UR.wall_on_right
"""Indicates if an wall is on the immediate right of Reeborg.
Returns:
True if a wall is on Reeborg's right, False otherwise.
"""
return RUR._UR.wall_on_right_(self.body)
#py:python_specific
def add_watch(expr): #py:add_watch
"""Adds a valid Python expression (given as a string) to
the watch list.
"""
RUR.add_watch(expr)
def dir_py(obj): #py:dir_py
"""Lists attributes and methods of a Python object, excluding
those whose name start with a double underscore and are
considered to be private.
"""
# do not translate the name of this function
attrs = []
for attr in dir(obj):
if attr.startswith("__"):
continue
if callable(getattr(obj, attr)):
attr += "()"
attrs.append(attr)
print_html(str("\n".join(attrs)).replace("&", "&").replace("<", "<"
).replace(">", ">").replace("\n", "<br>"))
class ReeborgError(Exception): #py:RE
"""Exceptions specific to Reeborg's World.
Examples::
def done(): #py:
message = "You can not use done() for this task."
raise ReeborgError(message)
#---- or ------
try:
move()
except ReeborgError: # ignore a collision
turn_left()
"""
def __init__(self, message): #py:RE.__init__
self.reeborg_shouts = message
def __str__(self): #py:RE.__str__
return repr(self.reeborg_shouts)
try:
window['ReeborgError'] = ReeborgError
except:
pass
class WallCollisionError(ReeborgError): #py:WCE
"""Exceptions specific to Reeborg's World.
Is raised when Reeborg hits a wall.
"""
pass
try:
window['WallCollisionError'] = WallCollisionError
except:
pass
class SatelliteInfo(): #py:SI
@property
def world_map(self): #py:SI.world_map
'''Returns a dict containing information about world.
'''
import json
return json.loads(RUR.control.get_world_map())
def print_world_map(self): #py:SI.print_world_map
'''Prints a formatted copy of the world'''
print(RUR.control.get_world_map())
#py:obsolete
# Do not tranlate the following
def narration(html):
raise ReeborgError("narration is obsolete; use print_html().")
def say():
raise ReeborgError("say() is no longer supported; use print() instead.")
| 3.078125 | 3 |
beluga/continuation/ContinuationSolution.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | 1 | 12795431 | class ContinuationSolution(list):
pass
| 0.960938 | 1 |
Dataset/Leetcode/valid/110/1057.py | kkcookies99/UAST | 0 | 12795432 | class Solution:
def XXX(self, root: 'TreeNode') -> 'bool':
if not root:
return True
if abs(self.maxDepth(root.left)-self.maxDepth(root.right))<=1:
# 这一点原本想错了
return self.XXX(root.left) and self.XXX(root.right)
return False
def maxDepth(self, p):
if not p:
return 0
else:
return max(self.maxDepth(p.left),self.maxDepth(p.right))+1
| 3.203125 | 3 |
src/data_structure/queue/queue.py | sujeek/python_base | 0 | 12795433 | <reponame>sujeek/python_base
class Queue:
def __init__(self):
self.queue = []
def insert(self, data):
if data is not None:
self.queue.insert(0,data)
return True
return False
def size(self):
return len(self.queue)
def pop(self):
if len(self.queue) <=0:
return "No element in the Queue!"
return self.queue.pop()
| 3.59375 | 4 |
tests/test_pp_inbox.py | KonnexionsGmbH/dcr | 2 | 12795434 | <reponame>KonnexionsGmbH/dcr<gh_stars>1-10
# pylint: disable=unused-argument
"""Testing Module pp.inbox."""
import os.path
import pathlib
import shutil
import cfg.cls_setup
import cfg.glob
import db.cls_db_core
import db.cls_run
import pytest
import utils
import dcr
# -----------------------------------------------------------------------------
# Constants & Globals.
# -----------------------------------------------------------------------------
# pylint: disable=W0212
# @pytest.mark.issue
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_INBOX - accepted - duplicate.
# -----------------------------------------------------------------------------
def test_run_action_process_inbox_accepted_duplicate(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_INBOX - accepted duplicate."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
stem_name_1: str = "pdf_text_ok"
file_ext: str = "pdf"
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[
(stem_name_1, file_ext),
],
target_path=cfg.glob.setup.directory_inbox,
)
stem_name_2: str = "pdf_text_ok_1"
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_accepted
)
os.rename(
utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_1 + "." + file_ext),
utils.get_full_name(cfg.glob.setup.directory_inbox_accepted, stem_name_2 + "." + file_ext),
)
# -------------------------------------------------------------------------
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_inbox_accepted_duplicate <=========")
pytest.helpers.verify_content_of_inboxes(
inbox=(
[],
[
stem_name_1 + "." + file_ext,
],
),
inbox_accepted=(
[],
[
stem_name_2 + "." + file_ext,
],
),
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_INBOX - french.
# -----------------------------------------------------------------------------
def test_run_action_process_inbox_french(fxtr_setup_empty_inbox):
"""Test RUN_ACTION_PROCESS_INBOX - French."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data)
initial_database_data_path_directory = os.path.dirname(initial_database_data_path)
initial_database_data_path_file_name = os.path.basename(initial_database_data_path)
initial_database_data_path_file_name_test = "initial_database_data_french.json"
# copy test file
shutil.copy(
utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test),
utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name),
)
cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True)
cfg.glob.db_core.create_database()
# -------------------------------------------------------------------------
# Copy language subdirectory
pytest.helpers.copy_directories_4_pytest_2_dir(
source_directories=["french"], target_dir=str(cfg.glob.setup.directory_inbox)
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_VERBOSE, "false"),
],
)
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_inbox_french <=========")
pytest.helpers.verify_content_of_inboxes(
inbox=(
["french"],
[],
),
inbox_accepted=(
[],
[
"docx_french_ok_1.docx",
"pdf_french_ok_2.jpg",
"pdf_french_ok_3.pdf",
"pdf_french_scanned_4.pdf",
],
),
)
# -------------------------------------------------------------------------
base_directory = str(cfg.glob.setup.directory_inbox)
language_directory_name = str(utils.get_full_name(base_directory, pathlib.Path("french")))
assert os.path.isdir(utils.get_os_independent_name(base_directory)), (
"base directory '" + base_directory + "' after processing missing"
)
assert os.path.isdir(utils.get_os_independent_name(language_directory_name)), (
"language directory '" + language_directory_name + "' after processing missing"
)
assert 0 == len(os.listdir(language_directory_name)), (
str(len(os.listdir(language_directory_name))) + " files still found after processing"
)
# -------------------------------------------------------------------------
# Check empty language subdirectory
# TBD
# -------------------------------------------------------------------------
# Test not language English in document
# TBD
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_INBOX - ignore duplicates.
# -----------------------------------------------------------------------------
def test_run_action_process_inbox_ignore_duplicates(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_INBOX - ignore duplicates."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[
("pdf_text_ok", "pdf"),
("pdf_text_ok_protected", "pdf"),
],
target_path=cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, "true"),
],
)
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_inbox_ignore_duplicates <=========")
pytest.helpers.verify_content_of_inboxes(
inbox_accepted=(
[],
[
"pdf_text_ok_1.pdf",
"pdf_text_ok_protected_2.pdf",
],
),
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_INBOX - rejected.
# -----------------------------------------------------------------------------
def test_run_action_process_inbox_rejected(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_INBOX - rejected."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted)
fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected)
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[
("pdf_text_ok", "pdf"),
("pdf_text_ok_protected", "pdf"),
("pdf_wrong_format", "pdf"),
],
target_path=cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, "false"),
],
)
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_inbox_rejected <=========")
pytest.helpers.verify_content_of_inboxes(
inbox=(
[],
[],
),
inbox_accepted=(
[],
[
"pdf_text_ok_1.pdf",
],
),
inbox_rejected=(
[],
[
"pdf_text_ok_protected_2.pdf",
"pdf_wrong_format_3.pdf",
],
),
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_INBOX - rejected - duplicate.
# -----------------------------------------------------------------------------
def test_run_action_process_inbox_rejected_duplicate(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_INBOX - rejected duplicate."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
stem_name_1: str = "pdf_wrong_format"
file_ext: str = "pdf"
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox
)
stem_name_2: str = "pdf_wrong_format_1"
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[(stem_name_1, file_ext)], target_path=cfg.glob.setup.directory_inbox_rejected
)
os.rename(
utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_1 + "." + file_ext),
utils.get_full_name(cfg.glob.setup.directory_inbox_rejected, stem_name_2 + "." + file_ext),
)
# -------------------------------------------------------------------------
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_inbox_rejected_duplicate <=========")
pytest.helpers.verify_content_of_inboxes(
inbox=(
[],
[
stem_name_1 + "." + file_ext,
],
),
inbox_rejected=(
[],
[
stem_name_2 + "." + file_ext,
],
),
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_INBOX - rejected - 901.
# -----------------------------------------------------------------------------
def test_run_action_process_inbox_rejected_901(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_INBOX - rejected - 901."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_accepted)
fxtr_rmdir_opt(cfg.glob.setup.directory_inbox_rejected)
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[
("unknown_file_extension", "xxx"),
("unknown_file_extension_protected", "xxx"),
],
target_path=cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, "false"),
],
)
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_inbox_rejected <=========")
pytest.helpers.verify_content_of_inboxes(
inbox=(
[],
[],
),
inbox_accepted=(
[],
[],
),
inbox_rejected=(
[],
[
"unknown_file_extension_1.xxx",
"unknown_file_extension_protected_2.xxx",
],
),
)
| 1.5625 | 2 |
smarkets/tests/streaming_api/utils.py | smarkets/smk_python_sdk | 20 | 12795435 | <reponame>smarkets/smk_python_sdk
from __future__ import absolute_import, division, print_function, unicode_literals
from nose.tools import eq_
from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE
from smarkets.streaming_api.utils import set_payload_message
def test_set_payload_message():
payload = Payload()
assert payload.type != PAYLOAD_ORDER_CREATE
oc = OrderCreate(quantity=123456)
set_payload_message(payload, oc)
eq_(payload.type, PAYLOAD_ORDER_CREATE)
eq_(payload.order_create, oc)
| 2.0625 | 2 |
financial/calc_engines/factor_per_share_indicators_cal.py | wangjiehui11235/panther | 0 | 12795436 | # -*- coding: utf-8 -*-
import pdb, importlib, inspect, time, datetime, json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from financial import factor_per_share_indicators
from data.model import BalanceMRQ, BalanceTTM, BalanceReport
from data.model import CashFlowTTM, CashFlowReport
from data.model import IndicatorReport
from data.model import IncomeReport, IncomeTTM
from vision.table.valuation import Valuation
from vision.db.signletion_engine import *
from data.sqlengine import sqlEngine
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url,
methods=[{'packet': 'financial.factor_pre_share_indicators', 'class': 'FactorPerShareIndicators'}, ]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
engine = sqlEngine()
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# Report data
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
[CashFlowReport.FINALCASHBALA, # 期末现金及现金等价物余额
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(col, axis=1)
cash_flow_sets = cash_flow_sets.rename(columns={'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO, # 营业收入
IncomeReport.BIZTOTINCO, # 营业总收入
IncomeReport.PERPROFIT, # 营业利润
IncomeReport.DILUTEDEPS, # 稀释每股收益
], dates=[trade_date])
for col in columns:
if col in list(income_sets.keys()):
income_sets = income_sets.drop(col, axis=1)
income_sets = income_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'PERPROFIT': 'operating_profit', # 营业利润
'DILUTEDEPS': 'diluted_eps', # 稀释每股收益
})
balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport,
[BalanceReport.PARESHARRIGH, # 归属于母公司的所有者权益
BalanceReport.CAPISURP,
BalanceReport.RESE,
BalanceReport.UNDIPROF,
], dates=[trade_date])
for col in columns:
if col in list(balance_sets.keys()):
balance_sets = balance_sets.drop(col, axis=1)
balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'total_owner_equities', # 归属于母公司的所有者权益
'CAPISURP': 'capital_reserve_fund', # 资本公积
'RESE': 'surplus_reserve_fund', # 盈余公积
'UNDIPROF': 'retained_profit', # 未分配利润
})
indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport,
[IndicatorReport.FCFE, # 股东自由现金流量
IndicatorReport.FCFF, # 企业自由现金流量
IndicatorReport.EPSBASIC, # 基本每股收益
IndicatorReport.DPS, # 每股股利(税前)
], dates=[trade_date])
for col in columns:
if col in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(col, axis=1)
indicator_sets = indicator_sets.rename(columns={'FCFE': 'shareholder_fcfps', # 股东自由现金流量
'FCFF': 'enterprise_fcfps', # 企业自由现金流量
'EPSBASIC': 'basic_eps', # 基本每股收益
'DPS': 'dividend_receivable', # 每股股利(税前)
})
# TTM data
cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM,
[CashFlowTTM.CASHNETI, # 现金及现金等价物净增加额
CashFlowTTM.MANANETR, # 经营活动现金流量净额
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_ttm_sets.keys()):
cash_flow_ttm_sets = cash_flow_ttm_sets.drop(col, axis=1)
cash_flow_ttm_sets = cash_flow_ttm_sets.rename(
columns={'CASHNETI': 'cash_equivalent_increase_ttm', # 现金及现金等价物净增加额
'MANANETR': 'net_operate_cash_flow_ttm', # 经营活动现金流量净额
})
income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.PARENETP, # 归属于母公司所有者的净利润
IncomeTTM.PERPROFIT, # 营业利润
IncomeTTM.BIZINCO, # 营业收入
IncomeTTM.BIZTOTINCO, # 营业总收入
], dates=[trade_date])
for col in columns:
if col in list(income_ttm_sets.keys()):
income_ttm_sets = income_ttm_sets.drop(col, axis=1)
income_ttm_sets = income_ttm_sets.rename(columns={'PARENETP': 'np_parent_company_owners_ttm', # 归属于母公司所有者的净利润
'PERPROFIT': 'operating_profit_ttm', # 营业利润
'BIZINCO': 'operating_revenue_ttm', # 营业收入
'BIZTOTINCO': 'total_operating_revenue_ttm', # 营业总收入
})
column = ['trade_date']
valuation_data = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.capitalization,
).filter(Valuation.trade_date.in_([trade_date])))
for col in column:
if col in list(valuation_data.keys()):
valuation_data = valuation_data.drop(col, axis=1)
valuation_sets = pd.merge(cash_flow_sets, income_sets, on='security_code').reindex()
valuation_sets = pd.merge(balance_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(indicator_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(cash_flow_ttm_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(income_ttm_sets, valuation_sets, on='security_code').reindex()
valuation_sets = pd.merge(valuation_data, valuation_sets, on='security_code').reindex()
return valuation_sets
def process_calc_factor(self, trade_date, valuation_sets):
per_share = factor_per_share_indicators.FactorPerShareIndicators()
factor_share_indicators = pd.DataFrame()
factor_share_indicators['security_code'] = valuation_sets['security_code']
valuation_sets = valuation_sets.set_index('security_code')
factor_share_indicators = factor_share_indicators.set_index('security_code')
factor_share_indicators = per_share.EPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.DilutedEPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.CashEquPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.DivPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.EPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.NetAssetPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.TotalRevPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.TotalRevPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.OptRevPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.OptRevPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.OptProfitPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.OptProfitPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.CapticalSurplusPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.SurplusReservePS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.UndividedProfitPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.RetainedEarningsPS(factor_share_indicators, factor_share_indicators)
factor_share_indicators = per_share.OptCFPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.CFPSTTM(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.EnterpriseFCFPS(valuation_sets, factor_share_indicators)
factor_share_indicators = per_share.ShareholderFCFPS(valuation_sets, factor_share_indicators)
factor_share_indicators = factor_share_indicators.reset_index()
factor_share_indicators['trade_date'] = str(trade_date)
factor_share_indicators.replace([-np.inf, np.inf, None], np.nan, inplace=True)
return factor_share_indicators
def local_run(self, trade_date):
print('当前交易日: %s' % trade_date)
tic = time.time()
valuation_sets = self.loading_data(trade_date)
print('data load time %s' % (time.time() - tic))
storage_engine = StorageEngine(self._url)
result = self.process_calc_factor(trade_date, valuation_sets)
print('cal_time %s' % (time.time() - tic))
storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result)
# storage_engine.update_destdb('factor_pre_share_indicators', trade_date, result)
# def remote_run(self, trade_date):
# total_data = self.loading_data(trade_date)
# #存储数据
# session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond))
# cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records'))
# distributed_factor.delay(session, json.dumps(self._methods), self._name)
#
# def distributed_factor(self, total_data):
# mkt_df = self.calc_factor_by_date(total_data,trade_date)
# result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date)
# @app.task
# def distributed_factor(session, trade_date, packet_sets, name):
# calc_engines = CalcEngine(name, packet_sets)
# content = cache_data.get_cache(session, factor_name)
# total_data = json_normalize(json.loads(content))
# calc_engines.distributed_factor(total_data)
#
# # @app.task()
# def factor_calculate(**kwargs):
# print("per_share_kwargs: {}".format(kwargs))
# date_index = kwargs['date_index']
# session = kwargs['session']
# content = cache_data.get_cache(session + str(date_index), date_index)
# total_pre_share_data = json_normalize(json.loads(str(content, encoding='utf8')))
# print("len_total_per_share_data {}".format(len(total_pre_share_data)))
# calculate(date_index, total_pre_share_data)
| 2.25 | 2 |
wack/scenes.py | tjusticelee/textytextgame | 0 | 12795437 | <gh_stars>0
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.exceptions import abort
bp = Blueprint('scenes', __name__)
@bp.route('/')
def index():
return render_template('scenes/index.html')
@bp.route('/home', methods=('GET', 'POST'))
def home():
sceneboi = {
'scenario': """Your alarm wakes you up. You lay in bed and decide
whether or not to skip class. \n
1. Stay in and sleep
\n
2. Get ready for class"""
}
if request.method == 'POST':
choice = request.form['action']
if choice == "1":
return redirect(url_for('scenes.bus'))
if choice == "2":
return redirect(url_for('scenes.walk'))
return render_template('scenes/play.html', scene=sceneboi)
@bp.route('/bus', methods=('GET', 'POST'))
def bus():
sceneboi = {
'scenario': """You lay in bed and close your eyes. Your mom comes through
your air vent and yells at you to get up you yell at her
\"You don't understand me mom!\" She dropkicks you
into the bus from your room.
You land in the driver's seat of the bus and realize you have to drive
the bus. do you?
1. Drift dat boi
2. Drive like a civilized person"""
}
if request.method == 'POST':
choice = request.form['action']
return render_template('scenes/play.html', scene=sceneboi)
| 2.53125 | 3 |
app/mysqltojson.py | imosudi/graphql | 1 | 12795438 | <filename>app/mysqltojson.py
from sqlalchemy import create_engine, inspect
import os, json
#import requests
import decimal, datetime
from .dbconnect import engine, alchemyencoder
#from .dbconnect import engine, alchemyencoder
pwd = os.path.dirname(os.path.abspath(__file__))
class flatToCascadedJson(object):
def __init__(self, dbtable, *args):
super(flatToCascadedJson, self).__init__(*args)
self.dbtable =dbtable
if not os.path.exists(f'{pwd}/table_json/'):
os.makedirs(f'{pwd}/table_json/')
def reformatjson(self):
dbtable = self.dbtable
if dbtable not in ['patients', 'labtests', 'transactions', 'user']:
return {'response':'Not available in database'}, inspect(engine)
dbtableData = engine.execute('SELECT * FROM {dbtable}' .format(dbtable=dbtable))
#engine.dispose()
dataList = json.dumps([dict(row) for row in dbtableData], default=alchemyencoder, indent=4)
with open(f'{pwd}/table_json/{dbtable}.json', 'w+') as file:
file.write(dataList)
file.close()
if dbtable == 'patients':
patientList = json.loads(dataList)
with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file:
for i in range(0, len(patientList)) :
data2 = json.dumps(
{
'patient_row_id' : patientList[i]['patient_id'],
'patient_unique_ID': patientList[i]['patientID'],
'labsessioncount' : '',
'PatientPersonalDetails' :[
{
'patientSex': patientList[i]['patientSex'],
'patientStatus': patientList[i]['patientStatus'],
'patientType': patientList[i]['patientType'],
'ageGrade': patientList[i]['ageGrade'],
'patientDateofBirth': patientList[i]['patientDateofBirth'],
'patientTitle': patientList[i]['patientTitle'],
'patientFirstname': patientList[i]['patientFirstname'],
'patientLastname': patientList[i]['patientLastname'],
'patientMiddlename': patientList[i]['patientMiddlename'],
'patientEmail': patientList[i]['patientEmail'],
'patientAltEmail': patientList[i]['patientAltEmail'],
'patientPhonenumber': patientList[i]['patientPhonenumber'],
'patientAltPhonenumber': patientList[i]['patientAltPhonenumber'],
'patientwhatsappnumber': patientList[i]['patientwhatsappnumber'],
'patientAddress': patientList[i]['patientAddress'],
'patientCity': patientList[i]['patientCity'],
'patientState': patientList[i]['patientState'],
'patientCountry': patientList[i]['patientCountry'],
'patientpersonalEnroledby': patientList[i]['patientpersonalEnroledby']
}
],
'PatientCorporateDetails' :[
{
'patientCompanyname': patientList[i]['patientCompanyname'],
'patientCorporateContactperson': patientList[i]['patientCorporateContactperson'],
'patientCorporateEmail': patientList[i]['patientCorporateEmail'],
'patientCorporatePhone': patientList[i]['patientCorporatePhone'],
'patientCorporatewhatsappnumber': patientList[i]['patientCorporatewhatsappnumber'],
'patientCorporateAddress': patientList[i]['patientCorporateAddress'],
'patientCorporateCity': patientList[i]['patientCorporateCity'],
'patientCorporateState': patientList[i]['patientCorporateState'],
'patientCorporateCountry': patientList[i]['patientCorporateCountry'],
'patientCorporateEnroledby': patientList[i]['patientCorporateEnroledby'],
'enrolment_Time': patientList[i]['enrolment_Time']
}
]
},
indent=2
)
#print(data2)
file.write(data2)
file.close()
#print(patientList)
return data2, dataList
elif dbtable == 'labtests' :
testList = json.loads(dataList)
with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file:
for i in range(0, len(testList)) :
data2 = json.dumps(
{
'test_id': testList[i]['test_id'],
'testType': testList[i]['testType'],
'testBottleType': testList[i]['testBottleType'],
'testName': testList[i]['testName'],
'testmnemonics': testList[i]['testmnemonics'],
'testDetails': testList[i]['testDetails'],
'testTAT': testList[i]['testTAT'],
'testPrice': testList[i]['testPrice']
},
indent=2
)
#print(data2)
file.write(data2)
file.close()
return data2, dataList
elif dbtable == 'transactions' :
transactionList = json.loads(dataList)
with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file:
for i in range(0, len(transactionList)) :
data2 = json.dumps(
{
'transaction_id': 1,
'transactTime': transactionList[i]['transactTime'],
'labSessionTestDetails' : [
{
'invoicemnemonics': transactionList[i]['invoicemnemonics'],
'invoicetestname': transactionList[i]['invoicetestname'],
'invoiceprice': transactionList[i]['invoiceprice'],
'invoicetat': transactionList[i]['invoicetat']
}
],
'PatientDetails' : [
{
'CurrentpatientID': transactionList[i]['CurrentpatientID'],
'fullName': transactionList[i]['fullName'],
'sex': transactionList[i]['sex'],
'billto': transactionList[i]['billto'],
'testspriority': transactionList[i]['testspriority'],
'testscheduletype': transactionList[i]['testscheduletype']
}
],
'Payment_Reference' : [
{
'subtotal': transactionList[i]['subtotal'],
'discount': transactionList[i]['discount'],
'equalltax': transactionList[i]['equalltax'],
'total': transactionList[i]['total'],
'paymentmethod': transactionList[i]['paymentmethod'],
'payment': transactionList[i]['payment'],
'referenceOrchange': transactionList[i]['referenceOrchange'],
'sessionconfirm': transactionList[i]['sessionconfirm'],
'paymentconfirm': transactionList[i]['paymentconfirm'],
'barcode': transactionList[i]['barcode'],
'phlebotomy_processed': transactionList[i]['phlebotomy_processed']
}
],
'PaymentPtocessor' : [
{
'regtype': transactionList[i]['regtype'],
'cashier': transactionList[i]['cashier'],
'paymentupdateamount': transactionList[i][ 'paymentupdateamount'],
'paymentupdateby': transactionList[i]['paymentupdateby'],
'paymentupdateTime': transactionList[i]['paymentupdateTime']
}
]
},
indent=2
)
#print(data2)
file.write(data2)
file.close()
#print(transactionList[0])
return data2, dataList
elif dbtable == 'user' :
userList = json.loads(dataList)
with open(f'{pwd}/table_json/{dbtable}_casded.json', 'w+') as file:
for i in range(0, len(userList)) :
data2 =json.dumps(
{
'userID': userList[i]['id'],
'loginDetails' :[{
'username': userList[i]['email'],
'password': userList[i]['password']
}],
'designation': userList[i]['designation'],
'userDetails' :[{
'firstname' : userList[i]['firstname'],
'lastname': userList[i]['lastname'],
'email': userList[i]['email'],
'phonenumber': userList[i]['phonenumber'],
'AlternatePhonenumber' : userList[i]['altnumber'],
'location' :[{
'location': userList[i]['location'],
'city' : userList[i]['city'],
'state': userList[i]['state'],
'country': userList[i]['country']
}],
'zip_code' : userList[i]['zip_code']
}],
'Analytics' :[{
'last_login_at': userList[i]['last_login_at'],
'current_login_at': userList[i]['current_login_at'],
'last_login_ip': userList[i]['last_login_ip'],
'current_login_ip': userList[i]['current_login_ip'],
'login_count': userList[i]['login_count'],
'confirmed_at': userList[i]['confirmed_at'],
'active': userList[i]['active']
}]
}, indent=2
)
#print(data2)
file.write(data2)
file.close()
# End for statement'
return data2, dataList
#print(userList[0])
| 2.515625 | 3 |
bfs/0261_graph_valid_tree.py | adwardlee/leetcode_solutions | 0 | 12795439 | '''
Given n nodes labeled from 0 to n - 1 and a list of undirected edges (each edge is a pair of nodes), write a function to check whether these edges make up a valid tree.
样例
Example 1:
Input: n = 5 edges = [[0, 1], [0, 2], [0, 3], [1, 4]]
Output: true.
Example 2:
Input: n = 5 edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]
Output: false.
注意事项
You can assume that no duplicate edges will appear in edges. Since all edges are undirected, [0, 1] is the same as [1, 0] and thus will not appear together in edges.
'''
from collections import defaultdict,deque
class Solution:
"""
@param n: An integer
@param edges: a list of undirected edges
@return: true if it's a valid tree, or false
"""
def validTree(self, n, edges):
# write your code here
if len(edges) != n - 1:
return False
if len(edges) == 0:
return n == 1
neighbor = defaultdict()
for edge in edges:
if edge[0] not in neighbor:
neighbor[edge[0]] = 1
else:
neighbor[edge[0]] += 1
if edge[1] not in neighbor:
neighbor[edge[1]] = 1
else:
neighbor[edge[1]] += 1
queue = deque()
for x in range(n):
if x not in neighbor:
return False
elif neighbor[x] == 1:
neighbor[x] -= 1
queue.append(x)
count = 0
while queue:
node = queue.popleft()
count += 1
for edge in edges:
if node in edge:
neighbor[edge[0]] -= 1
neighbor[edge[1]] -= 1
if len(queue) == 0:
for key in neighbor:
if neighbor[key] == 1 or neighbor[key] == 0:
queue.append(key)
if count < n:
return False
return True | 4.15625 | 4 |
flask_mailing/__init__.py | jfkinslow/flask-mailing | 0 | 12795440 | <reponame>jfkinslow/flask-mailing
from .mail import Mail
from .config import ConnectionConfig
from .schemas import (
Message as Message,
MultipartSubtypeEnum as MultipartSubtypeEnum
)
from . import utils
version_info = (0, 0, 6)
__version__ = ".".join([str(v) for v in version_info])
__author__ = "<EMAIL>"
__all__ = [
"Mail", "ConnectionConfig", "Message", "utils", "MultipartSubtypeEnum"
] | 1.96875 | 2 |
books/books.py | rossgk2/cs257 | 0 | 12795441 | '''
books.py
Written by <NAME> and <NAME> for cs257
Revised by <NAME>
A command line interface for searching the 'books.csv' file.
'''
import csv
import argparse
def get_parsed_arguments():
# Set up command line arguments.
with open("prolog.txt", "r") as prolog, open("epilog.txt", "r") as epilog:
parser = argparse.ArgumentParser(description = prolog.read(), epilog = epilog.read())
parser.add_argument("-b", "--books", nargs="+", help="One or more substrings to search for in the titles of books. "
"If one of the substrings contains a space, surround that substring"
" with quotes \"\".")
parser.add_argument("-a", "--authors", nargs="+",
help="One or more substrings to search for in the names of authors. If one of the substrings contains "
"a space, surround that substring with quotes \"\".")
# may need to fix, see python3 books.py books.csv -b 'the' 1800 1899 for example
parser.add_argument("year1", nargs = "?", help="One of the years in the time "
"interval [min(year1, year2), max(year1, year2)] "
"within which to search for books.")
parser.add_argument("year2", nargs = "?", help="One of the years in the time "
"interval [min(year1, year2), max(year1, year2)] "
"within which to search for books.")
# Parse the command line.
parsed_arguments = parser.parse_args()
# Handle the years.
year1 = parsed_arguments.year1
if parsed_arguments.year2 is None:
parsed_arguments.year2 = year1
# Note that year1 or year2 might still be None, which is fine.
return parsed_arguments
def filterBooks(filtered, books) -> list:
return list(filter(lambda p: any(sub.lower() in p[0].lower() for sub in books), filtered))
def filterAuthors(filtered, authors) -> list:
return list(filter(lambda p: any(sub.lower() in p[2].lower() for sub in authors), filtered))
def filterYears(filtered, year1, year2) -> list:
return list(filter(lambda p: year1 <= p[1] and year2 >= p[1], filtered))
def getAuthorSet(filtered, authors) -> set:
authorSet = set()
if authors:
for row in filtered:
authorSet.add(row[2])
return authorSet
def main():
# Get arguments from the command line.
arguments = get_parsed_arguments()
filtered = csv.reader(open('books.csv', 'r'))
# Filter by years, books, or authors.
if arguments.year1:
filtered = filterYears(filtered, arguments.year1, arguments.year2)
if arguments.books:
filtered = filterBooks(filtered, arguments.books)
if arguments.authors:
filtered = filterAuthors(filtered, arguments.authors)
authorSet = getAuthorSet(filtered, arguments.authors)
# If authorSet is nonempty, print authors and their books.
if authorSet != set():
tab = " " * 4
for auth in authorSet:
print(auth)
for row in list(filtered):
if row[2] == auth:
print(tab + row[0] + ", " + row[1])
# Otherwise, print all book/author/year information in "filtered".
else:
for row in filtered:
print(row[0] + ", " + row[1] + ", " + row[2])
if __name__ == "__main__":
main()
| 3.28125 | 3 |
crosspm/contracts/contract.py | devopshq/crosspm2 | 3 | 12795442 | <reponame>devopshq/crosspm2
class Contract:
def __init__(self, name, values):
self.name = name
self.values = values
def __hash__(self):
return hash((self.name, self.values))
def __str__(self):
return "{}{}".format(self.name, self.values)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.name == other.name and (set(self.values) & set(other.values))
def __ne__(self, other):
return not (self == other)
class PackageContracts:
def __init__(self, contracts):
self._contracts = contracts
def __getitem__(self, key):
for c in self._contracts:
if c.name == key.name:
return c
return None
def is_lower(self, contract):
c = self[contract]
return c and c.value < contract.value
def is_equal(self, contract):
c = self[contract]
return c and c.value == contract.value
| 2.859375 | 3 |
grid_utils/gridder.py | claydodo/grid_utils | 0 | 12795443 | # -*- coding:utf-8 -*-
import six
import numpy as np
from pyproj import Proj
import operator
from .exceptions import *
class NullProj(object):
"""
Similar to pyproj.Proj, but NullProj does not do actual conversion.
"""
@property
def srs(self):
return ''
def __call__(self, x, y, **kwargs):
return x, y
class GridderBase(object):
"""Gridder is a helper for i, j <-> x, y conversion, etc."""
def i2x(self, *args):
"""Convert i, j, ... -> x, y, ..."""
raise NotImplementedError
def x2i(self, *args, **kwargs):
"""Convert x, y, ... -> i, j, ..."""
raise NotImplementedError
def copy(self, **kwargs):
kws = self.dump()
kws.update(kwargs)
new_gridder = self.__class__(**kws)
return new_gridder
def calibrate(self, x0, y0, x1=None, y1=None):
return
def dump(self):
return {}
class XYGridderBase(GridderBase):
"""
Requires self.X & self.Y.
"""
@property
def bbox(self):
return (np.min(self.X), np.min(self.Y), np.max(self.X), np.max(self.Y))
def get_bounding_ij(self, x1, y1, x2, y2, **kwargs):
bbox = self.bbox
if x1 is None:
x1 = bbox[0]
if y1 is None:
y1 = bbox[1]
if x2 is None:
x2 = bbox[2]
if y2 is None:
y2 = bbox[3]
bad = ~((self.X >= x1) & (self.X <= x2) & (self.Y >= y1) & (self.Y <= y2))
x_bad = np.alltrue(bad, axis=0)
y_bad = np.alltrue(bad, axis=1)
x_points = np.argwhere(np.diff(np.r_[True, x_bad, True])).reshape(-1, 2)
y_points = np.argwhere(np.diff(np.r_[True, y_bad, True])).reshape(-1, 2)
i1, i2 = (-1, -1) if x_points.shape[0] == 0 else x_points[0]
j1, j2 = (-1, -1) if y_points.shape[0] == 0 else y_points[0]
return i1, j1, i2, j2
def check_bound(self, i, j, int_index=True):
start = -0.5
subtracted = 1
if int_index:
start = 0
if int_index in ('lowerleft', 'll'):
subtracted = 2
if np.isscalar(i):
if (i >= start and i <= self.nx-subtracted) and (j >= start and j <= self.ny-subtracted):
return i, j
else:
raise OutOfGridBound("i: {}, j: {} is out of bound!".format(i, j))
else:
i = np.where((i >= start) & (i <= self.nx - subtracted), i, np.nan)
j = np.where((j >= start) & (j <= self.ny - subtracted), j, np.nan)
return i, j
class XYProjGridder(XYGridderBase):
def __init__(self, proj=None, x=None, y=None, nx=None, ny=None, dx=None, dy=None, x_orig=0.0, y_orig=0.0, **kwargs):
self.proj = proj
self._reset_raw_xy()
if x is not None and y is not None:
self.set_xy(x, y)
else:
self._init_with_para(nx, ny, dx, dy, x_orig, y_orig)
@property
def proj(self):
return self._proj
@proj.setter
def proj(self, p):
if p is None:
self._proj = NullProj()
elif isinstance(p, (Proj, NullProj)):
self._proj = p
elif isinstance(p, dict):
self._proj = Proj(**p)
else: # Treat as proj_string
self._proj = Proj(str(p)) # TODO: check PY3 compatibility.
self._reset_raw_xy()
if all([hasattr(self, attr) for attr in ('_nx', '_ny', '_dx', '_dy', '_x_orig', '_y_orig')]):
self._updateXY()
@property
def X(self):
return self._X
@X.setter
def X(self, x):
if self._raw_y is None:
raise ValueError("Cannot set x alone when no raw y presents.")
ndim_x = np.ndim(x)
if ndim_x == 1 and np.ndim(self._raw_y) == 1:
self.set_xy(x, self._raw_y)
elif ndim_x == 2 and np.shape(x) == np.shape(self.Y):
self.set_xy(x, self.Y)
else:
self._raise_invalid_shape(x, self.Y)
@property
def Y(self):
return self._Y
@Y.setter
def Y(self, y):
if self._raw_x is None:
raise ValueError("Cannot set y alone when no raw x presents.")
ndim_y = np.ndim(y)
if ndim_y == 1 and np.ndim(self._raw_x) == 1:
self.set_xy(self._raw_x, y)
elif ndim_y == 2 and np.shape(y) == np.shape(self.X):
self.set_xy(self.X, y)
else:
self._raise_invalid_shape(self.X, y)
@property
def CX(self):
return self._CX
@property
def CY(self):
return self._CY
@property
def x(self):
return self._raw_x if self._raw_x is not None else self._X
@property
def y(self):
return self._raw_y if self._raw_y is not None else self._Y
@property
def cx(self):
return self._raw_cx if self._raw_cx is not None else self._CX
@property
def cy(self):
return self._raw_cy if self._raw_cy is not None else self._CY
@property
def nx(self):
return self._nx
@nx.setter
def nx(self, value):
self._nx = value
self._reset_raw_xy()
self._updateXY()
@property
def ny(self):
return self._ny
@ny.setter
def ny(self, value):
self._ny = value
self._reset_raw_xy()
self._updateXY()
@property
def dx(self):
return self._dx
@dx.setter
def dx(self, value):
self._dx = value
self._reset_raw_xy()
self._updateXY()
@property
def dy(self):
return self._dy
@dy.setter
def dy(self, value):
self._dy = value
self._reset_raw_xy()
self._updateXY()
@property
def x_orig(self):
return self._x_orig
@x_orig.setter
def x_orig(self, value):
self._x_orig = value
self._reset_raw_xy()
self._updateXY()
@property
def y_orig(self):
return self._y_orig
@y_orig.setter
def y_orig(self, value):
self._y_orig = value
self._reset_raw_xy()
self._updateXY()
@property
def bbox(self):
return self._bbox
@property
def cbox(self):
"""corner box"""
return self._cbox
def _init_with_para(self, nx, ny, dx, dy, x_orig, y_orig):
self._nx = nx
self._ny = ny
self._dx = dx
self._dy = dy
self._x_orig = x_orig
self._y_orig = y_orig
self._updateXY()
@property
def has_null_proj(self):
return isinstance(self.proj, NullProj)
def set_xy(self, x, y):
ndim_x, ndim_y = np.ndim(x), np.ndim(y)
if ndim_x == 1 and ndim_y == 1:
self._nx, self._ny = len(x), len(y)
elif ndim_x == 2 and ndim_y == 2:
self._ny, self._nx = np.shape(x)
else:
self._raise_invalid_shape(x, y)
self._raw_x, self._raw_y = np.asarray(x), np.asarray(y)
self.calibrate(x, y)
def _raise_invalid_shape(self, x, y):
raise ValueError("Invalid x, y shape: {}, {}".format(np.shape(x), np.shape(y)))
def _reset_raw_xy(self):
self._raw_x, self._raw_y = None, None
def _updateXY(self):
jj, ii = np.mgrid[0:self.ny, 0:self.nx]
cjj, cii = np.mgrid[-0.5:self.ny, -0.5:self.nx]
xx, yy = self.i2x(ii, jj)
cxx, cyy = self.i2x(cii, cjj)
self._X, self._Y = xx, yy
self._CX, self._CY = cxx, cyy
if self._raw_x is not None and self._raw_x.ndim == 1:
self._raw_cx = self._CX[0]
else:
self._raw_cx = None
if self._raw_y is not None and self._raw_y.ndim == 1:
self._raw_cy = self._CY[:, 0]
else:
self._raw_cy = None
self._bbox = (np.min(self._X), np.min(self._Y), np.max(self._X), np.max(self._Y))
self._cbox = (np.min(self._CX), np.min(self._CY), np.max(self._CX), np.max(self._CY))
return xx, yy
def i2x(self, i, j):
px = i * self.dx + self.x_orig
py = j * self.dy + self.y_orig
return self.proj(px, py, inverse=True)
def x2i(self, x, y, int_index=True, check_bound=None):
px, py = self.proj(x, y)
i = (px - self.x_orig) / self.dx
j = (py - self.y_orig) / self.dy
if int_index:
if int_index in ('lowerleft', 'll'):
i = np.floor(i)
j = np.floor(j)
else:
i = np.round(i)
j = np.round(j)
if np.isscalar(i):
i = int(i)
j = int(j)
else:
i = i.astype('i')
j = j.astype('i')
if check_bound:
return self.check_bound(i, j, int_index=int_index)
else:
return i, j
def calibrate(self, x, y, x1=None, y1=None):
ndim_x, ndim_y = np.ndim(x), np.ndim(y)
if ndim_x == 0 and ndim_y == 0:
x0, y0 = x, y
if ndim_x == 1 and ndim_y == 1:
x0, x1 = x[0], x[1]
y0, y1 = y[0], y[1]
elif ndim_x == 2 and ndim_y == 2:
x0, x1 = x[0, 0], x[1, 1]
y0, y1 = y[0, 0], y[1, 1]
else:
self._raise_invalid_shape(x, y)
px0, py0 = self.proj(x0, y0)
self._x_orig = px0
self._y_orig = py0
if x1 is not None and y1 is not None:
px1, py1 = self.proj(x1, y1)
self._dx = px1 - px0
self._dy = py1 - py0
self._updateXY()
def dump(self):
return {
"proj": self.proj.srs,
"nx": self.nx, "ny": self.ny, "dx": self.dx, "dy": self.dy,
"x_orig": self.x_orig, "y_orig": self.y_orig
}
class LonLatSurroundingGridder(XYGridderBase):
def __init__(self, lon0, lat0, rmin, rmax, nr, ntheta, theta0=0.0, r_earth=6371):
self.lon0 = lon0
self.lat0 = lat0
self.rmin = rmin
self.rmax = rmax
self.nr = nr
self.ntheta = ntheta
self.theta0 = theta0
self.r_earth = r_earth
self.dtheta = np.pi * 2 / self.ntheta
self.dr = (self.rmax - self.rmin) / (self.nr - 1)
self._updateXY()
def _updateXY(self):
r = np.linspace(self.rmin, self.rmax, self.nr)
theta = np.arange(self.ntheta) * self.dtheta + self.theta0
THETA, R = np.meshgrid(theta, r)
LON, LAT = self.r_theta_to_lon_lat(R, THETA)
self._X = LON
self._Y = LAT
return self._X, self._Y
def r_theta_to_lon_lat(self, r, theta):
r_ = r / self.r_earth
sin_r = np.sin(r_)
cos_r = np.cos(r_)
lat0_ = np.deg2rad(self.lat0)
lon0_ = np.deg2rad(self.lon0)
sin_lat0 = np.sin(lat0_)
cos_lat0 = np.cos(lat0_)
sin_lat = sin_lat0 * cos_r + cos_lat0 * sin_r * np.cos(theta)
lat_ = np.arcsin(sin_lat)
lon_ = lon0_ + np.arctan2(np.sin(theta) * sin_r * cos_lat0, cos_r - sin_lat0 * sin_lat)
lon = np.rad2deg(lon_)
lat = np.rad2deg(lat_)
return lon, lat
@property
def nx(self):
return self.ntheta
@property
def ny(self):
return self.nr
@property
def X(self):
return self._X
@property
def Y(self):
return self._Y
@property
def x(self):
return self._X
@property
def y(self):
return self._Y
def i2x(self, i, j):
theta = self.theta0 + i * self.dtheta
r = self.rmin + j * self.dr
lon, lat = self.r_theta_to_lon_lat(r, theta)
return lon, lat
def x2i(self, x, y, int_index=True, check_bound=None):
lon2, lat2 = np.deg2rad(x), np.deg2rad(y)
lon1, lat1 = np.deg2rad(self.lon0), np.deg2rad(self.lat0)
dlon = lon2 - lon1
dlat = lat2 - lat1
sin_dlon = np.sin(dlon)
cos_dlon = np.cos(dlon)
sin_lat1 = np.sin(lat1)
cos_lat1 = np.cos(lat1)
sin_lat2 = np.sin(lat2)
cos_lat2 = np.cos(lat2)
a = cos_lat2 * sin_dlon
b = cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_dlon
theta = np.arctan2(a, b)
c = np.sin(dlat / 2) ** 2 + cos_lat1 * cos_lat2 * np.sin(dlon / 2) ** 2
d = 2 * np.arcsin(np.sqrt(c))
r = d * self.r_earth
i = (theta - self.theta0) / self.dtheta % self.ntheta
j = (r - self.rmin) / self.dr
if int_index:
i = np.round(i)
j = np.round(j)
if np.isscalar(i):
i = int(i)
j = int(j)
else:
i = i.astype('i')
j = j.astype('i')
if check_bound:
return self.check_bound(i, j, int_index=int_index)
else:
return i, j
class XYIrregularGridder(XYGridderBase):
# TODO: use kdtree.
def __init__(self, X, Y):
X = np.array(X)
Y = np.array(Y)
if X.ndim == 1:
self.X, self.Y = np.meshgrid(X, Y)
else:
self.X, self.Y = X, Y
self.ny, self.nx = X.shape
def i2x(self, i, j, *args, **kwargs):
return self.X[j, i], self.Y[j, i]
def x2i(self, x, y, *args, **kwargs):
distances = np.hypot(self.X-x, self.Y-y)
flat_i = np.argmin(distances)
nx = self.X.shape[1]
return flat_i / self.nx, flat_i % self.nx
def dump(self):
return {
"X": self.X,
"Y": self.Y,
"nx": self.nx,
"ny": self.ny,
}
| 2.484375 | 2 |
unitvelo/__init__.py | StatBiomed/UniTVelo | 0 | 12795444 | <filename>unitvelo/__init__.py
#%%
import os
from time import gmtime, strftime
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
try:
from setuptools_scm import get_version
__version__ = get_version(root="..", relative_to=__file__)
del get_version
except (LookupError, ImportError):
try:
from importlib_metadata import version
except:
from importlib.metadata import version
__version__ = version(__name__)
del version
print (f'(Running UniTVelo {__version__})')
print (strftime("%Y-%m-%d %H:%M:%S", gmtime()))
from .main import run_model
from .config import Configuration
from .eval_utils import evaluate
from .gene_influence import influence | 1.84375 | 2 |
mfgp/task1_new/utils/has_duplicates.py | kunalghosh/Multi_Fidelity_Prediction_GP | 0 | 12795445 | <reponame>kunalghosh/Multi_Fidelity_Prediction_GP<gh_stars>0
def has_duplicates(seq):
return len(seq) != len(set(seq))
| 1.789063 | 2 |
scan_service/tests/hardware_config_tests.py | kkkkv/tgnms | 12 | 12795446 | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import unittest
from typing import List
from bidict import bidict
from scan_service.utils.hardware_config import HardwareConfig
class HardwareConfigTests(unittest.TestCase):
def setUp(self) -> None:
with open("tests/hardware_config.json") as f:
hardware_config = json.load(f)
HardwareConfig.set_config(hardware_config)
def test_class_variables(self) -> None:
self.assertDictEqual(
HardwareConfig.BEAM_ORDER,
{
"0": {
"-18": [0, 1, 2, 3, 4, 5, 6, 7],
"18": [8, 9, 10, 11, 12, 13, 14, 15],
},
"1": {
"0": [30, 29, 28, 27, 26, 25, 24, 16, 17, 18, 19, 20, 21, 22, 23]
},
},
)
self.assertDictEqual(
HardwareConfig.TXPOWERIDX_TO_TXPOWER,
{
"2": {
"10": {0: 19, 1: 20, 2: 21, 3: 22, 4: 23, 5: 24, 6: 25, 7: 26},
"6": {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17},
},
"3": {"5": {0: 11, 1: 12, 2: 13, 3: 14, 4: 15, 5: 16, 6: 17, 7: 18}},
"default_channel": {
"default_mcs": {
0: 16,
1: 17,
2: 18,
3: 19,
4: 20,
5: 21,
6: 22,
7: 23,
}
},
},
)
self.assertEqual(HardwareConfig.BORESIDE_BW_IDX, 10)
self.assertEqual(HardwareConfig.MINIMUM_SNR_DB, -10)
self.assertEqual(HardwareConfig.SNR_SATURATE_THRESH_DB, 25)
self.assertEqual(HardwareConfig.BEAM_SEPERATE_IDX, 3)
self.assertEqual(HardwareConfig.MAX_SIDELOBE_LEVEL_DB, 12)
self.assertEqual(HardwareConfig.MAX_POWER, 23)
def test_get_adjacent_beam_index(self) -> None:
self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, 1), 1)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(0, -1), 0)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, 1), 9)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(8, -1), 8)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, 1), 15)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(15, -1), 14)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, 1), 17)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(16, -1), 24)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, 1), 23)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(23, -1), 22)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, 1), 29)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(30, -1), 30)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, 1), 60)
self.assertEqual(HardwareConfig.get_adjacent_beam_index(60, -1), 60)
def test_get_pwr_offset(self) -> None:
self.assertEqual(HardwareConfig.get_pwr_offset(channel="2", mcs="6"), 0)
self.assertEqual(
HardwareConfig.get_pwr_offset(target_pwr_idx=4, channel="2", mcs="6"), -9
)
self.assertEqual(
HardwareConfig.get_pwr_offset(ref_pwr_idx=4, channel="2", mcs="6"), 9
)
self.assertEqual(
HardwareConfig.get_pwr_offset(ref_pwr_idx=5, channel="3", mcs="5"), 7
)
self.assertEqual(
HardwareConfig.get_pwr_offset(ref_pwr_idx=7, channel="2", mcs="10"), -3
)
self.assertEqual(HardwareConfig.get_pwr_offset(target_pwr_idx=5), -2)
| 2.609375 | 3 |
task-library/efficientip/EipGetSubnetName.py | mlavi/blueprints | 60 | 12795447 | <reponame>mlavi/blueprints
#region headers
# * authors: <EMAIL>
# * date: 30/03/2020
# task_name: EipGetSubnets
# description: Get available networks attached to a site on EfficientIP
# input vars: eip_site_name, eip_min_free_ip
# output vars: subnet_lists
#endregion
# this script is used to retreive a list of available subnets on EIP
# this list is provided during at the application launch using dynaminy variable
# all print are commented
#region capture Calm variables
username = "@@{eip_username}@@"
password = <PASSWORD>}@@"
api_server = "@@{eip_endpoint}@@"
site_name = "@@{eip_site_name}@@"
min_free_ip = "@@{eip_min_free_ip}@@"
is_terminal = "1" #means that the subnet cannot contains others subnets as children
#endregion
# region prepare api call
api_server_port = "443"
api_server_endpoint = "/rest"
method = "GET"
base_url = "https://{}:{}{}".format(api_server, api_server_port, api_server_endpoint)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
# endregion
#region API call function
def process_request(url, method, headers, payload=None):
if (payload is not None):
payload = json.dumps(payload)
r = urlreq(url, verb=method, auth='BASIC', user=username, passwd=password, params=payload, verify=False, headers=headers)
if not r.ok:
print("Request failed")
exit(1)
return r
#endregion
#region main processing
# make the api call
url = "{0}/ip_block_subnet_list?WHERE={1}='{2}'&WHERE={3}='{4}'".format(base_url, "is_terminal", is_terminal, "parent_site_name", site_name)
#print("Making a {} API call to {}".format(method, url))
resp = process_request(url, method, headers)
# parsing the response
subnets_list = []
subnets = json.loads(resp.content)
for subnet in subnets:
if subnet['subnet_ip_free_size'] != int(min_free_ip):
subnets_list.append(format(subnet['subnet_name']))
# return array use for dynamic variable input
print(", ".join(subnets_list))
#endregion | 2.328125 | 2 |
lib_client/src/d1_client/tests/test_session.py | DataONEorg/d1_python | 15 | 12795448 | <filename>lib_client/src/d1_client/tests/test_session.py<gh_stars>10-100
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import freezegun
import pytest
import requests
import requests.exceptions
import responses
import d1_common.logging_context
import d1_client.session
import d1_test.d1_test_case
import d1_test.mock_api.get
import d1_test.mock_api.post
import d1_test.sample
logger = logging.getLogger(__name__)
@d1_test.d1_test_case.reproducible_random_decorator("TestSession")
@freezegun.freeze_time("1945-01-02")
class TestSession(d1_test.d1_test_case.D1TestCase):
def _get_hash(self, pid):
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL)
response = s.GET(["object", pid])
return hashlib.sha1(response.content).hexdigest()
def _get_response(self, pid, header_dict=None):
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL)
return s.GET(["object", pid], headers=header_dict or {})
def _post(self, query_dict, header_dict, body):
d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
s = d1_client.session.Session(
d1_test.d1_test_case.MOCK_MN_BASE_URL, query={"default_query": "test"}
)
return s.POST(["post"], query=query_dict, headers=header_dict, data=body)
def _post_fields(self, fields_dict):
d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL)
return s.POST(["post"], fields=fields_dict)
@responses.activate
def test_1000(self):
"""HTTP GET is successful. Mocked GET returns object bytes uniquely tied to given
PID."""
a_pid = "pid_hy7tf83453y498"
b_pid = "pid_09y68gh73n60"
c_pid = "pid_987i075058679589060"
a_hash = self._get_hash(a_pid)
b_hash = self._get_hash(b_pid)
c_hash = self._get_hash(c_pid)
assert a_hash != b_hash
assert b_hash != c_hash
assert a_hash != c_hash
a1_hash = self._get_hash(a_pid)
c1_hash = self._get_hash(c_pid)
c2_hash = self._get_hash(c_pid)
a2_hash = self._get_hash(a_pid)
assert a_hash == a1_hash
assert a_hash == a2_hash
assert c_hash == c1_hash
assert c_hash == c2_hash
@responses.activate
def test_1010(self):
"""Successful HTTP GET returns 200 OK."""
response = self._get_response("pid1")
assert response.status_code == 200
@responses.activate
def test_1020(self):
"""HTTP GET 404."""
response = self._get_response("valid_pid", header_dict={"trigger": "404"})
assert response.status_code == 404
self.sample.assert_equals(response.text, "get_404")
@responses.activate
def test_1030(self):
"""HTTP GET against http://some.bogus.address/ raises ConnectionError."""
s = d1_client.session.Session("http://some.bogus.address")
with d1_common.logging_context.LoggingContext(logger):
logger.setLevel(logging.ERROR)
with pytest.raises(requests.exceptions.ConnectionError):
s.GET("/")
@responses.activate
def test_1040(self):
"""HTTP POST is successful Roundtrip for body, headers and query params."""
body_bytes = b"test_body"
header_dict = {"ijkl": "9876", "mnop": "5432"}
response = self._post({}, header_dict, body_bytes)
r_dict = response.json()
d1_test.sample.assert_equals(r_dict, "post_roundtrip")
@responses.activate
def test_1050(self):
"""Query params passed to Session() and individual POST are correctly
combined."""
d1_test.mock_api.post.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
body_bytes = b"test_body"
query_dict = {"abcd": "1234", "efgh": "5678"}
header_dict = {"ijkl": "9876", "mnop": "5432"}
response = self._post(query_dict, header_dict, body_bytes)
r_dict = response.json()
d1_test.sample.assert_equals(r_dict, "post_roundtrip_query")
@responses.activate
def test_1060(self):
"""Roundtrip for HTML Form fields."""
field_dict = {"post_data_1": "1234", "post_data_2": "5678"}
response = self._post_fields(field_dict)
r_dict = response.json()
d1_test.sample.assert_equals(r_dict, "post_roundtrip_form_fields")
@responses.activate
def test_1070(self):
"""cURL command line retains query parameters and headers."""
query_dict = {"abcd": "1234", "efgh": "5678"}
header_dict = {"ijkl": "9876", "mnop": "5432"}
s = d1_client.session.Session(d1_test.d1_test_case.MOCK_MN_BASE_URL)
curl_str = s.get_curl_command_line(
"POST", "http://some.bogus.address", query=query_dict, headers=header_dict
)
d1_test.sample.assert_equals(curl_str, "curl_command_line")
| 1.875 | 2 |
src/myrl/environments/environment.py | erwanlecarpentier/myrl | 0 | 12795449 | """
Abstract environment class
"""
class Environment(object):
def __init__(self, name, actions, gamma):
self.name = name
self.actions = actions
self.gamma = gamma
def get_state_dimension(self):
return None
def get_state_dtype(self):
return None
def get_state_magnitude(self):
return None
def get_initial_state(self):
return None
def step(self, s, a):
"""
:param s: state
:param a: actionSmall
:return: r, s_p, is_terminal(s_p)
"""
return 0.0, None, False
def get_info(self):
"""
Get general information to be saved on disk.
"""
return {
'name': self.name,
'actions': self.actions,
'gamma': self.gamma
}
| 3.171875 | 3 |
backend/common/routes.py | dogzz9445/TAWeb | 0 | 12795450 | <filename>backend/common/routes.py
from common.api.views.base import RestViewSet
from common.api.views.analyzed import AnalyzedRestViewSet
routes = [
{'regex': r'rest', 'viewset': RestViewSet, 'basename': 'Rest'},
{'regex': r'analyzed', 'viewset': AnalyzedRestViewSet, 'basename': 'Analyzed'}
]
| 1.625 | 2 |
Subsets and Splits