hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79443627b2173b936471c2159718d2cc5536b029 | 1,000 | py | Python | jinstagram/users/admin.py | cjy1521/jinstagram | 82e08388418cb787996e119b73bf70c98c349283 | [
"MIT"
] | null | null | null | jinstagram/users/admin.py | cjy1521/jinstagram | 82e08388418cb787996e119b73bf70c98c349283 | [
"MIT"
] | null | null | null | jinstagram/users/admin.py | cjy1521/jinstagram | 82e08388418cb787996e119b73bf70c98c349283 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from jinstagram.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("name", "email")}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
),
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 28.571429 | 74 | 0.557 |
794436946adcbc3165bb4d9c0d708b88f9aad200 | 13,122 | py | Python | src/sage/functions/transcendental.py | felix-salfelder/sage | 5d8b2ff4794c44c7fa7a9d86ec567ecfa337e566 | [
"BSL-1.0"
] | 2 | 2021-08-20T00:30:35.000Z | 2021-11-17T10:54:00.000Z | src/sage/functions/transcendental.py | felix-salfelder/sage | 5d8b2ff4794c44c7fa7a9d86ec567ecfa337e566 | [
"BSL-1.0"
] | null | null | null | src/sage/functions/transcendental.py | felix-salfelder/sage | 5d8b2ff4794c44c7fa7a9d86ec567ecfa337e566 | [
"BSL-1.0"
] | null | null | null | """
Transcendental Functions
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import sys
import sage.libs.pari.all
from sage.libs.pari.all import pari
import sage.rings.complex_field as complex_field
import sage.rings.real_double as real_double
from sage.gsl.integration import numerical_integral
from sage.structure.parent import Parent
from sage.structure.coerce import parent
from sage.symbolic.expression import Expression
from sage.functions.log import exp
from sage.rings.all import (is_RealNumber, RealField,
is_ComplexNumber, ComplexField,
ZZ, RR, RDF, CDF, prime_range)
from sage.symbolic.function import GinacFunction, BuiltinFunction, is_inexact
CC = complex_field.ComplexField()
I = CC.gen(0)
class Function_zeta(GinacFunction):
def __init__(self):
r"""
Riemann zeta function at s with s a real or complex number.
INPUT:
- ``s`` - real or complex number
If s is a real number the computation is done using the MPFR
library. When the input is not real, the computation is done using
the PARI C library.
EXAMPLES::
sage: zeta(x)
zeta(x)
sage: zeta(2)
1/6*pi^2
sage: zeta(2.)
1.64493406684823
sage: RR = RealField(200)
sage: zeta(RR(2))
1.6449340668482264364724151666460251892189499012067984377356
sage: zeta(I)
zeta(I)
sage: zeta(I).n()
0.00330022368532410 - 0.418155449141322*I
It is possible to use the ``hold`` argument to prevent
automatic evaluation::
sage: zeta(2,hold=True)
zeta(2)
To then evaluate again, we currently must use Maxima via
:meth:`sage.symbolic.expression.Expression.simplify`::
sage: a = zeta(2,hold=True); a.simplify()
1/6*pi^2
TESTS::
sage: latex(zeta(x))
\zeta(x)
sage: a = loads(dumps(zeta(x)))
sage: a.operator() == zeta
True
sage: zeta(1)
Infinity
sage: zeta(x).subs(x=1)
Infinity
"""
GinacFunction.__init__(self, "zeta")
zeta = Function_zeta()
class Function_zetaderiv(GinacFunction):
def __init__(self):
r"""
Derivatives of the Riemann zeta function.
EXAMPLES::
sage: zetaderiv(1, x)
zetaderiv(1, x)
sage: zetaderiv(1, x).diff(x)
zetaderiv(2, x)
sage: var('n')
n
sage: zetaderiv(n,x)
zetaderiv(n, x)
TESTS::
sage: latex(zetaderiv(2,x))
\zeta^\prime\left(2, x\right)
sage: a = loads(dumps(zetaderiv(2,x)))
sage: a.operator() == zetaderiv
True
"""
GinacFunction.__init__(self, "zetaderiv", nargs=2)
zetaderiv = Function_zetaderiv()
def zeta_symmetric(s):
r"""
Completed function `\xi(s)` that satisfies
`\xi(s) = \xi(1-s)` and has zeros at the same points as the
Riemann zeta function.
INPUT:
- ``s`` - real or complex number
If s is a real number the computation is done using the MPFR
library. When the input is not real, the computation is done using
the PARI C library.
More precisely,
.. math::
xi(s) = \gamma(s/2 + 1) * (s-1) * \pi^{-s/2} * \zeta(s).
EXAMPLES::
sage: zeta_symmetric(0.7)
0.497580414651127
sage: zeta_symmetric(1-0.7)
0.497580414651127
sage: RR = RealField(200)
sage: zeta_symmetric(RR(0.7))
0.49758041465112690357779107525638385212657443284080589766062
sage: C.<i> = ComplexField()
sage: zeta_symmetric(0.5 + i*14.0)
0.000201294444235258 + 1.49077798716757e-19*I
sage: zeta_symmetric(0.5 + i*14.1)
0.0000489893483255687 + 4.40457132572236e-20*I
sage: zeta_symmetric(0.5 + i*14.2)
-0.0000868931282620101 + 7.11507675693612e-20*I
REFERENCE:
- I copied the definition of xi from
http://www.math.ubc.ca/~pugh/RiemannZeta/RiemannZetaLong.html
"""
if not (is_ComplexNumber(s) or is_RealNumber(s)):
s = ComplexField()(s)
R = s.parent()
if s == 1: # deal with poles, hopefully
return R(0.5)
return (s/2 + 1).gamma() * (s-1) * (R.pi()**(-s/2)) * s.zeta()
import math
from sage.rings.polynomial.polynomial_real_mpfr_dense import PolynomialRealDense
class DickmanRho(BuiltinFunction):
r"""
Dickman's function is the continuous function satisfying the
differential equation
.. math::
x \rho'(x) + \rho(x-1) = 0
with initial conditions `\rho(x)=1` for
`0 \le x \le 1`. It is useful in estimating the frequency
of smooth numbers as asymptotically
.. math::
\Psi(a, a^{1/s}) \sim a \rho(s)
where `\Psi(a,b)` is the number of `b`-smooth
numbers less than `a`.
ALGORITHM:
Dickmans's function is analytic on the interval
`[n,n+1]` for each integer `n`. To evaluate at
`n+t, 0 \le t < 1`, a power series is recursively computed
about `n+1/2` using the differential equation stated above.
As high precision arithmetic may be needed for intermediate results
the computed series are cached for later use.
Simple explicit formulas are used for the intervals [0,1] and
[1,2].
EXAMPLES::
sage: dickman_rho(2)
0.306852819440055
sage: dickman_rho(10)
2.77017183772596e-11
sage: dickman_rho(10.00000000000000000000000000000000000000)
2.77017183772595898875812120063434232634e-11
sage: plot(log(dickman_rho(x)), (x, 0, 15))
AUTHORS:
- Robert Bradshaw (2008-09)
REFERENCES:
- G. Marsaglia, A. Zaman, J. Marsaglia. "Numerical
Solutions to some Classical Differential-Difference Equations."
Mathematics of Computation, Vol. 53, No. 187 (1989).
"""
def __init__(self):
"""
Constructs an object to represent Dickman's rho function.
TESTS::
sage: dickman_rho(x)
dickman_rho(x)
sage: dickman_rho(3)
0.0486083882911316
sage: dickman_rho(pi)
0.0359690758968463
"""
self._cur_prec = 0
BuiltinFunction.__init__(self, "dickman_rho", 1)
def _eval_(self, x):
"""
EXAMPLES::
sage: [dickman_rho(n) for n in [1..10]]
[1.00000000000000, 0.306852819440055, 0.0486083882911316, 0.00491092564776083, 0.000354724700456040, 0.0000196496963539553, 8.74566995329392e-7, 3.23206930422610e-8, 1.01624828273784e-9, 2.77017183772596e-11]
sage: dickman_rho(0)
1.00000000000000
"""
if not is_RealNumber(x):
try:
x = RR(x)
except (TypeError, ValueError):
return None #PrimitiveFunction.__call__(self, SR(x))
if x < 0:
return x.parent()(0)
elif x <= 1:
return x.parent()(1)
elif x <= 2:
return 1 - x.log()
n = x.floor()
if self._cur_prec < x.parent().prec() or not self._f.has_key(n):
self._cur_prec = rel_prec = x.parent().prec()
# Go a bit beyond so we're not constantly re-computing.
max = x.parent()(1.1)*x + 10
abs_prec = (-self.approximate(max).log2() + rel_prec + 2*max.log2()).ceil()
self._f = {}
if sys.getrecursionlimit() < max + 10:
sys.setrecursionlimit(int(max) + 10)
self._compute_power_series(max.floor(), abs_prec, cache_ring=x.parent())
return self._f[n](2*(x-n-x.parent()(0.5)))
def power_series(self, n, abs_prec):
"""
This function returns the power series about `n+1/2` used
to evaluate Dickman's function. It is scaled such that the interval
`[n,n+1]` corresponds to x in `[-1,1]`.
INPUT:
- ``n`` - the lower endpoint of the interval for which
this power series holds
- ``abs_prec`` - the absolute precision of the
resulting power series
EXAMPLES::
sage: f = dickman_rho.power_series(2, 20); f
-9.9376e-8*x^11 + 3.7722e-7*x^10 - 1.4684e-6*x^9 + 5.8783e-6*x^8 - 0.000024259*x^7 + 0.00010341*x^6 - 0.00045583*x^5 + 0.0020773*x^4 - 0.0097336*x^3 + 0.045224*x^2 - 0.11891*x + 0.13032
sage: f(-1), f(0), f(1)
(0.30685, 0.13032, 0.048608)
sage: dickman_rho(2), dickman_rho(2.5), dickman_rho(3)
(0.306852819440055, 0.130319561832251, 0.0486083882911316)
"""
return self._compute_power_series(n, abs_prec, cache_ring=None)
def _compute_power_series(self, n, abs_prec, cache_ring=None):
"""
Compute the power series giving Dickman's function on [n, n+1], by
recursion in n. For internal use; self.power_series() is a wrapper
around this intended for the user.
INPUT:
- ``n`` - the lower endpoint of the interval for which
this power series holds
- ``abs_prec`` - the absolute precision of the
resulting power series
- ``cache_ring`` - for internal use, caches the power
series at this precision.
EXAMPLES::
sage: f = dickman_rho.power_series(2, 20); f
-9.9376e-8*x^11 + 3.7722e-7*x^10 - 1.4684e-6*x^9 + 5.8783e-6*x^8 - 0.000024259*x^7 + 0.00010341*x^6 - 0.00045583*x^5 + 0.0020773*x^4 - 0.0097336*x^3 + 0.045224*x^2 - 0.11891*x + 0.13032
"""
if n <= 1:
if n <= -1:
return PolynomialRealDense(RealField(abs_prec)['x'])
if n == 0:
return PolynomialRealDense(RealField(abs_prec)['x'], [1])
elif n == 1:
nterms = (RDF(abs_prec) * RDF(2).log()/RDF(3).log()).ceil()
R = RealField(abs_prec)
neg_three = ZZ(-3)
coeffs = [1 - R(1.5).log()] + [neg_three**-k/k for k in range(1, nterms)]
f = PolynomialRealDense(R['x'], coeffs)
if cache_ring is not None:
self._f[n] = f.truncate_abs(f[0] >> (cache_ring.prec()+1)).change_ring(cache_ring)
return f
else:
f = self._compute_power_series(n-1, abs_prec, cache_ring)
# integrand = f / (2n+1 + x)
# We calculate this way because the most significant term is the constant term,
# and so we want to push the error accumulation and remainder out to the least
# significant terms.
integrand = f.reverse().quo_rem(PolynomialRealDense(f.parent(), [1, 2*n+1]))[0].reverse()
integrand = integrand.truncate_abs(RR(2)**-abs_prec)
iintegrand = integrand.integral()
ff = PolynomialRealDense(f.parent(), [f(1) + iintegrand(-1)]) - iintegrand
i = 0
while abs(f[i]) < abs(f[i+1]):
i += 1
rel_prec = int(abs_prec + abs(RR(f[i])).log2())
if cache_ring is not None:
self._f[n] = ff.truncate_abs(ff[0] >> (cache_ring.prec()+1)).change_ring(cache_ring)
return ff.change_ring(RealField(rel_prec))
def approximate(self, x, parent=None):
r"""
Approximate using de Bruijn's formula
.. math::
\rho(x) \sim \frac{exp(-x \xi + Ei(\xi))}{\sqrt{2\pi x}\xi}
which is asymptotically equal to Dickman's function, and is much
faster to compute.
REFERENCES:
- N. De Bruijn, "The Asymptotic behavior of a function
occurring in the theory of primes." J. Indian Math Soc. v 15.
(1951)
EXAMPLES::
sage: dickman_rho.approximate(10)
2.41739196365564e-11
sage: dickman_rho(10)
2.77017183772596e-11
sage: dickman_rho.approximate(1000)
4.32938809066403e-3464
"""
log, exp, sqrt, pi = math.log, math.exp, math.sqrt, math.pi
x = float(x)
xi = log(x)
y = (exp(xi)-1.0)/xi - x
while abs(y) > 1e-12:
dydxi = (exp(xi)*(xi-1.0) + 1.0)/(xi*xi)
xi -= y/dydxi
y = (exp(xi)-1.0)/xi - x
return (-x*xi + RR(xi).eint()).exp() / (sqrt(2*pi*x)*xi)
dickman_rho = DickmanRho()
| 32.641791 | 220 | 0.568663 |
794437c81d6dfefc70f7e97fa81894a5f0da6c48 | 344 | py | Python | ceml/backend/jax/preprocessing/normalizer.py | andreArtelt/ceml | 364d4630d6a01592c2ab86f2d53dbb7feb682381 | [
"MIT"
] | 24 | 2019-07-12T08:11:25.000Z | 2021-12-09T19:24:53.000Z | ceml/backend/jax/preprocessing/normalizer.py | andreArtelt/ceml | 364d4630d6a01592c2ab86f2d53dbb7feb682381 | [
"MIT"
] | 6 | 2020-09-28T04:42:44.000Z | 2021-09-27T06:59:36.000Z | ceml/backend/jax/preprocessing/normalizer.py | andreArtelt/ceml | 364d4630d6a01592c2ab86f2d53dbb7feb682381 | [
"MIT"
] | 7 | 2019-09-11T13:54:57.000Z | 2021-01-14T13:13:26.000Z | # -*- coding: utf-8 -*-
import jax.numpy as npx
from ....model import Model
class Normalizer(Model):
"""
Wrapper for the normalizer.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
def predict(self, x):
"""
Computes the forward pass.
"""
return x / npx.linalg.norm(x) | 19.111111 | 37 | 0.540698 |
79443a7995e20178f45494c1b2d3b3686dee92de | 1,988 | py | Python | accounts/migrations/0027_auto_20200407_1928.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | 2 | 2022-01-24T23:30:18.000Z | 2022-01-26T00:21:22.000Z | accounts/migrations/0027_auto_20200407_1928.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | accounts/migrations/0027_auto_20200407_1928.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2020-04-08 00:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0026_auto_20200402_1822'),
]
operations = [
migrations.CreateModel(
name='Friendship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('accepted', models.BooleanField(blank=True, default=False, help_text='Whether or not the friend request has been accepted', verbose_name='accepted status')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('accepted_datetime', models.DateTimeField(blank=True, help_text='The date & time the request was accepted', null=True, verbose_name='accepted datetime')),
('first', models.ForeignKey(help_text='First user, the one that instigated the friendship', limit_choices_to={'programmatic_account': False}, on_delete=django.db.models.deletion.CASCADE, related_name='friends_first', to=settings.AUTH_USER_MODEL, verbose_name='first')),
('second', models.ForeignKey(help_text='Second user', limit_choices_to={'programmatic_account': False}, on_delete=django.db.models.deletion.CASCADE, related_name='friends_second', to=settings.AUTH_USER_MODEL, verbose_name='second')),
],
options={
'verbose_name': 'friendship',
'verbose_name_plural': 'friendships',
},
),
migrations.AddField(
model_name='customuser',
name='friends',
field=models.ManyToManyField(blank=True, help_text="User's friends", related_name='_customuser_friends_+', through='accounts.Friendship', to=settings.AUTH_USER_MODEL, verbose_name='friends'),
),
]
| 53.72973 | 285 | 0.666499 |
79443bfd01d78429bbfe256fd754a73ff796e553 | 2,574 | py | Python | python/common/acllite/cameracapture.py | Ascend/samples | 5e060ddf8c502cf0e248ecbe1c8986e95351cbbd | [
"Apache-2.0"
] | 25 | 2020-11-20T09:01:35.000Z | 2022-03-29T10:35:38.000Z | python/common/acllite/cameracapture.py | Ascend/samples | 5e060ddf8c502cf0e248ecbe1c8986e95351cbbd | [
"Apache-2.0"
] | 5 | 2021-02-28T20:49:37.000Z | 2022-03-04T21:50:27.000Z | python/common/acllite/cameracapture.py | Ascend/samples | 5e060ddf8c502cf0e248ecbe1c8986e95351cbbd | [
"Apache-2.0"
] | 16 | 2020-12-06T07:26:13.000Z | 2022-03-01T07:51:55.000Z | # !/usr/bin/env python
# -*- coding:utf-8 -*-
#
from ctypes import *
import os
import time
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
from lib.acllite_so import libacllite
import constants as const
from acllite_image import AclLiteImage
from acllite_logger import log_error, log_info
CAMERA_OK = 0
CAMERA_ERROR = 1
CAMERA_CLOSED = 0
CAMERA_OPENED = 1
class CameraOutputC(Structure):
"""Ctypes parameter object for frame data"""
_fields_ = [
('size', c_int),
('data', POINTER(c_ubyte))
]
class CameraCapture(object):
"""Atlas200dk board camera access class"""
def __init__(self, camera_id, fps=20, size=(1280, 720)):
"""Create camera instance
Args:
camera_id: camera slot
fps: frame per second
size: frame resolution
"""
self._id = camera_id
self._fps = fps
self._width = size[0]
self._height = size[1]
self._size = int(self._width * self._height * 3 / 2)
self._status = CAMERA_CLOSED
if CAMERA_OK == self._open():
self._status = CAMERA_OPENED
else:
log_error("Open camera %d failed" % (camera_id))
def _open(self):
ret = libacllite.OpenCameraEx(self._id, self._fps,
self._width, self._height)
if (ret != CAMERA_OK):
log_error("Open camera %d failed ,ret = %d" % (self._id, ret))
return CAMERA_ERROR
self._status = CAMERA_OPENED
return CAMERA_OK
def is_opened(self):
"""Camera is opened or not"""
return (self._status == CAMERA_OPENED)
def read(self):
"""Read frame from camera"""
frame_data = CameraOutputC()
ret = libacllite.ReadCameraFrame(self._id, byref(frame_data))
if (ret != CAMERA_OK):
log_error("Read camera %d failed" % (self._id))
return None
return AclLiteImage(
addressof(frame_data.data.contents),
self._width,
self._height,
self._size,
const.MEMORY_DVPP)
def close(self):
"""Close camera"""
log_info("Close camera ", self._id)
libacllite.CloseCameraEx(self._id)
def __del__(self):
self.close()
if __name__ == "__main__":
cap = Camera(camera_id=0, fps=20, size=(1280, 720))
start = time.time()
for i in range(0, 100):
image = cap.read()
print("Read 100 frame exhaust ", time.time() - start)
| 27.978261 | 74 | 0.590132 |
79443c67244ee4bb21ce0697a64ea89949999116 | 3,786 | py | Python | test/model/test_deepvar.py | tqdo/gluon-ts | 4628cd928177ca47d173f7da780b8aa03e545238 | [
"Apache-2.0"
] | 1 | 2020-02-16T12:05:43.000Z | 2020-02-16T12:05:43.000Z | test/model/test_deepvar.py | naftalic/gluon-ts | be7125321db1d39fced8b26a36f5f4ffe5989747 | [
"Apache-2.0"
] | null | null | null | test/model/test_deepvar.py | naftalic/gluon-ts | be7125321db1d39fced8b26a36f5f4ffe5989747 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# First-party imports
import pytest
from gluonts.dataset.artificial import constant_dataset
from gluonts.distribution import (
MultivariateGaussianOutput,
LowrankMultivariateGaussianOutput,
)
from gluonts.evaluation.backtest import backtest_metrics
from gluonts.model.deepvar import DeepVAREstimator
from gluonts.dataset.common import TrainDatasets
from gluonts.trainer import Trainer
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
from gluonts.evaluation import MultivariateEvaluator
def load_multivariate_constant_dataset():
dataset_info, train_ds, test_ds = constant_dataset()
grouper_train = MultivariateGrouper(max_target_dim=10)
grouper_test = MultivariateGrouper(num_test_dates=1, max_target_dim=10)
metadata = dataset_info.metadata
metadata.prediction_length = dataset_info.prediction_length
return TrainDatasets(
metadata=dataset_info.metadata,
train=grouper_train(train_ds),
test=grouper_test(test_ds),
)
dataset = load_multivariate_constant_dataset()
target_dim = int(dataset.metadata.feat_static_cat[0].cardinality)
metadata = dataset.metadata
estimator = DeepVAREstimator
@pytest.mark.timeout(10)
@pytest.mark.parametrize(
"distr_output, num_batches_per_epoch, Estimator, hybridize, "
"use_marginal_transformation",
[
(
LowrankMultivariateGaussianOutput(dim=target_dim, rank=2),
10,
estimator,
True,
True,
),
(
LowrankMultivariateGaussianOutput(dim=target_dim, rank=2),
10,
estimator,
False,
False,
),
(
LowrankMultivariateGaussianOutput(dim=target_dim, rank=2),
10,
estimator,
True,
False,
),
(None, 10, estimator, True, True),
(
MultivariateGaussianOutput(dim=target_dim),
10,
estimator,
False,
True,
),
(
MultivariateGaussianOutput(dim=target_dim),
10,
estimator,
True,
True,
),
],
)
def test_deepvar(
distr_output,
num_batches_per_epoch,
Estimator,
hybridize,
use_marginal_transformation,
):
estimator = Estimator(
num_cells=20,
num_layers=1,
pick_incomplete=True,
target_dim=target_dim,
prediction_length=metadata.prediction_length,
# target_dim=target_dim,
freq=metadata.freq,
distr_output=distr_output,
scaling=False,
use_marginal_transformation=use_marginal_transformation,
trainer=Trainer(
epochs=1,
batch_size=8,
learning_rate=1e-10,
num_batches_per_epoch=num_batches_per_epoch,
hybridize=hybridize,
),
)
agg_metrics, _ = backtest_metrics(
train_dataset=dataset.train,
test_dataset=dataset.test,
forecaster=estimator,
evaluator=MultivariateEvaluator(
quantiles=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)
),
)
assert agg_metrics["ND"] < 1.5
| 29.123077 | 75 | 0.656894 |
79443d66d212c7e94cb999a0db1d6b68227922a6 | 2,703 | py | Python | _assignments/basics/types/type_bool_b.py | sages-pl/2022-01-pythonsqlalchemy-aptiv | 1d6d856608e9dbe25b139e8968c48b7f46753b84 | [
"MIT"
] | null | null | null | _assignments/basics/types/type_bool_b.py | sages-pl/2022-01-pythonsqlalchemy-aptiv | 1d6d856608e9dbe25b139e8968c48b7f46753b84 | [
"MIT"
] | null | null | null | _assignments/basics/types/type_bool_b.py | sages-pl/2022-01-pythonsqlalchemy-aptiv | 1d6d856608e9dbe25b139e8968c48b7f46753b84 | [
"MIT"
] | null | null | null | """
* Assignment: Type Bool Simple
* Required: yes
* Complexity: easy
* Lines of code: 9 lines
* Time: 5 min
English:
1. What you need to put in expressions to get the expected outcome?
2. In place of ellipsis (`...`) insert only `True` or `False`
3. Run doctests - all must succeed
Polish:
1. Co należy podstawić w wyrażeniach aby otrzymać wartość oczekiwaną?
2. W miejsce trzech kropek (`...`) wstawiaj tylko `True` lub `False`
3. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> assert a is not Ellipsis, 'Assign result to variable: `a`'
>>> assert b is not Ellipsis, 'Assign result to variable: `b`'
>>> assert c is not Ellipsis, 'Assign result to variable: `c`'
>>> assert d is not Ellipsis, 'Assign result to variable: `d`'
>>> assert e is not Ellipsis, 'Assign result to variable: `e`'
>>> assert f is not Ellipsis, 'Assign result to variable: `f`'
>>> assert g is not Ellipsis, 'Assign result to variable: `g`'
>>> assert h is not Ellipsis, 'Assign result to variable: `h`'
>>> assert i is not Ellipsis, 'Assign result to variable: `i`'
>>> assert type(a) is bool, 'Variable `a` has invalid type, should be bool'
>>> assert type(b) is bool, 'Variable `b` has invalid type, should be bool'
>>> assert type(c) is bool, 'Variable `c` has invalid type, should be bool'
>>> assert type(d) is bool, 'Variable `d` has invalid type, should be bool'
>>> assert type(e) is bool, 'Variable `e` has invalid type, should be bool'
>>> assert type(f) is bool, 'Variable `f` has invalid type, should be bool'
>>> assert type(g) is bool, 'Variable `g` has invalid type, should be bool'
>>> assert type(h) is bool, 'Variable `h` has invalid type, should be bool'
>>> assert type(i) is bool, 'Variable `i` has invalid type, should be bool'
>>> bool(a)
True
>>> bool(b)
True
>>> bool(c)
False
>>> bool(d)
True
>>> bool(e)
True
>>> bool(f)
False
>>> bool(g)
True
>>> bool(h)
True
>>> bool(i)
False
"""
# bool: the expression result must be True
a = True == ...
# bool: the expression result must be True
b = True != ...
# bool: the expression result must be False
c = not ...
# bool: the expression result must be True
d = bool(...) == True
# bool: the expression result must be True
e = bool(...) == False
# bool: the expression result must be False
f = ... or ...
# bool: the expression result must be True
g = ... and ...
# bool: the expression result must be True
h = bool(bool(...) == ...) or False
# bool: the expression result must be False
i = bool(...) is not bool(False)
| 31.068966 | 79 | 0.627081 |
79443e5a4e57574603ae76c742f8ad7e238b0053 | 814 | py | Python | countess/tests/data/plugins/bad_options.py | VariantEffect/Enrich2-py3 | 5f8534c8c9259d90d99d70e5bd9140fd0fdc8ea4 | [
"BSD-3-Clause"
] | 4 | 2020-01-14T19:24:07.000Z | 2020-01-16T18:11:35.000Z | countess/tests/data/plugins/bad_options.py | VariantEffect/CountESS | 5f8534c8c9259d90d99d70e5bd9140fd0fdc8ea4 | [
"BSD-3-Clause"
] | 3 | 2020-01-01T10:38:15.000Z | 2020-01-03T09:45:41.000Z | countess/tests/data/plugins/bad_options.py | VariantEffect/CountESS | 5f8534c8c9259d90d99d70e5bd9140fd0fdc8ea4 | [
"BSD-3-Clause"
] | 1 | 2022-02-20T00:35:24.000Z | 2022-02-20T00:35:24.000Z | from countess.plugins.scoring import BaseScorerPlugin
from countess.plugins.options import Options
options = Options()
options.add_option(
name="Normalization Method",
varname="logr_method",
dtype=bool,
default="Wild Type",
choices={"Wild Type": "wt", "Full": "full", "Complete": "complete"},
hidden=False,
)
options.add_option(
name="Weighted",
varname="logr_method",
dtype=str,
default=True,
choices=None,
hidden=False,
)
class CountsScorer(BaseScorerPlugin):
name = "Counts Only"
version = "1.0"
author = "Alan Rubin, Daniel Esposito"
def __init__(self, store_manager, options):
super().__init__(store_manager, options)
def compute_scores(self):
return
def row_apply_function(self, *args, **kwargs):
return
| 21.421053 | 72 | 0.668305 |
79443eaba1fb1360fd7e47eea2b5e06967557ab2 | 1,796 | py | Python | src/backend/common/environment/environment.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | src/backend/common/environment/environment.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | src/backend/common/environment/environment.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | import enum
import os
from typing import Optional
from backend.common.environment.tasks import TasksRemoteConfig
@enum.unique
class EnvironmentMode(enum.Enum):
LOCAL = "local"
REMOTE = "remote"
# Mostly GAE env variables
# See https://cloud.google.com/appengine/docs/standard/python3/runtime#environment_variables
class Environment(object):
@staticmethod
def is_dev() -> bool:
return os.environ.get("GAE_ENV") == "localdev"
@staticmethod
def is_prod() -> bool:
env = os.environ.get("GAE_ENV")
return env is not None and env.startswith("standard")
@staticmethod
def service() -> Optional[str]:
return os.environ.get("GAE_SERVICE", None)
@staticmethod
def service_for_current_service() -> str:
# Get current service - otherwise, fallback on default service
service = Environment.service()
return service if service else "default"
@staticmethod
def project() -> Optional[str]:
return os.environ.get("GOOGLE_CLOUD_PROJECT", None)
@staticmethod
def log_level() -> Optional[str]:
return os.environ.get("TBA_LOG_LEVEL")
@staticmethod
def tasks_mode() -> EnvironmentMode:
return EnvironmentMode(os.environ.get("TASKS_MODE", "local"))
@staticmethod
def tasks_remote_config() -> Optional[TasksRemoteConfig]:
remote_config_ngrok_url = os.environ.get("TASKS_REMOTE_CONFIG_NGROK_URL", None)
if not remote_config_ngrok_url:
return None
return TasksRemoteConfig(ngrok_url=remote_config_ngrok_url)
@staticmethod
def ndb_log_level() -> Optional[str]:
return os.environ.get("NDB_LOG_LEVEL")
@staticmethod
def redis_url() -> Optional[str]:
return os.environ.get("REDIS_CACHE_URL")
| 28.967742 | 92 | 0.686526 |
79443f27835b478e98eca57225d2de723651a19e | 778 | py | Python | tests/unit/usecases/test_get_token.py | HathorNetwork/hathor-explorer-service | 81236deceac12ddfd813b33723481421a9064c82 | [
"MIT"
] | null | null | null | tests/unit/usecases/test_get_token.py | HathorNetwork/hathor-explorer-service | 81236deceac12ddfd813b33723481421a9064c82 | [
"MIT"
] | 51 | 2021-05-21T18:58:15.000Z | 2022-03-29T17:45:00.000Z | tests/unit/usecases/test_get_token.py | HathorNetwork/hathor-explorer-service | 81236deceac12ddfd813b33723481421a9064c82 | [
"MIT"
] | 1 | 2022-02-08T21:15:26.000Z | 2022-02-08T21:15:26.000Z | from unittest.mock import MagicMock
from pytest import fixture
from tests.fixtures.token_factory import TokenFactory
from usecases.get_token import GetToken
class TestGetToken:
@fixture
def token_gateway(self):
return MagicMock()
def test_get(self, token_gateway):
token = TokenFactory()
token_gateway.get_token = MagicMock(return_value=token)
get_token = GetToken(token_gateway)
result = get_token.get(token.id)
assert result
assert result['id'] == token.id
def test_get_not_found(self, token_gateway):
token_gateway.get_token = MagicMock(return_value=None)
get_token = GetToken(token_gateway)
result = get_token.get('abcdef1234567890')
assert result is None
| 22.228571 | 63 | 0.699229 |
79443fcc5f82726d3cf1bdc8314b36697d46bfd0 | 17,550 | py | Python | hw3/dqn.py | Mckiev/homework | f2e05dce1571d3398c148376f1f31a28ef8f2c2f | [
"MIT"
] | null | null | null | hw3/dqn.py | Mckiev/homework | f2e05dce1571d3398c148376f1f31a28ef8f2c2f | [
"MIT"
] | null | null | null | hw3/dqn.py | Mckiev/homework | f2e05dce1571d3398c148376f1f31a28ef8f2c2f | [
"MIT"
] | null | null | null | import uuid
import os, inspect
import time
import pickle
import sys
import gym.spaces
import itertools
import numpy as np
import random
import tensorflow as tf
import tensorflow.contrib.layers as layers
from collections import namedtuple
from dqn_utils import *
import logz
OptimizerSpec = namedtuple("OptimizerSpec", ["constructor", "kwargs", "lr_schedule"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(QLearner)[0]
params = {k: str(locals_[k]) if k in locals_ else None for k in args}
params['exp_name'] = locals_['q_func'].__name__ + locals_['double_q'] * '_doubleQ'
logz.save_params(params)
def get_num_params():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
print('%d parameters in %s' %(variable_parameters ,variable.name))
total_parameters += variable_parameters
print('Total : %d' %total_parameters)
sys.exit()
class QLearner(object):
def __init__(
self,
env,
q_func,
optimizer_spec,
session,
exploration=LinearSchedule(1000000, 0.1),
stopping_criterion=None,
replay_buffer_size=1000000,
batch_size=32,
gamma=0.99,
learning_starts=50000,
learning_freq=4,
frame_history_len=4,
target_update_freq=10000,
grad_norm_clipping=10,
rew_file=None,
logdir = 'res',
double_q=True,
lander=False):
"""Run Deep Q-learning algorithm.
You can specify your own convnet using q_func.
All schedules are w.r.t. total number of steps taken in the environment.
Parameters
----------
env: gym.Env
gym environment to train on.
q_func: function
Model to use for computing the q function. It should accept the
following named arguments:
img_in: tf.Tensor
tensorflow tensor representing the input image
num_actions: int
number of actions
scope: str
scope in which all the model related variables
should be created
reuse: bool
whether previously created variables should be reused.
optimizer_spec: OptimizerSpec
Specifying the constructor and kwargs, as well as learning rate schedule
for the optimizer
session: tf.Session
tensorflow session to use.
exploration: rl_algs.deepq.utils.schedules.Schedule
schedule for probability of chosing random action.
stopping_criterion: (env, t) -> bool
should return true when it's ok for the RL algorithm to stop.
takes in env and the number of steps executed so far.
replay_buffer_size: int
How many memories to store in the replay buffer.
batch_size: int
How many transitions to sample each time experience is replayed.
gamma: float
Discount Factor
learning_starts: int
After how many environment steps to start replaying experiences
learning_freq: int
How many steps of environment to take between every experience replay
frame_history_len: int
How many past frames to include as input to the model.
target_update_freq: int
How many experience replay rounds (not steps!) to perform between
each update to the target Q network
grad_norm_clipping: float or None
If not None gradients' norms are clipped to this value.
double_q: bool
If True, then use double Q-learning to compute target values. Otherwise, use vanilla DQN.
https://papers.nips.cc/paper/3964-double-q-learning.pdf
"""
assert type(env.observation_space) == gym.spaces.Box
assert type(env.action_space) == gym.spaces.Discrete
self.target_update_freq = target_update_freq
self.optimizer_spec = optimizer_spec
self.batch_size = batch_size
self.learning_freq = learning_freq
self.learning_starts = learning_starts
self.stopping_criterion = stopping_criterion
self.env = env
self.session = session
self.exploration = exploration
self.rew_file = os.path.join(logdir,'data_dump.pkl') if rew_file is None else rew_file
setup_logger(logdir, locals())
###############
# BUILD MODEL #
###############
if len(self.env.observation_space.shape) == 1:
# This means we are running on low-dimensional observations (e.g. RAM)
input_shape = self.env.observation_space.shape
else:
img_h, img_w, img_c = self.env.observation_space.shape
input_shape = (img_h, img_w, frame_history_len * img_c)
self.num_actions = self.env.action_space.n
# set up placeholders
# placeholder for current observation (or state)
self.obs_t_ph = tf.placeholder(
tf.float32 if lander else tf.uint8, [None] + list(input_shape))
# placeholder for current action
self.act_t_ph = tf.placeholder(tf.int32, [None])
# placeholder for current reward
self.rew_t_ph = tf.placeholder(tf.float32, [None])
# placeholder for next observation (or state)
self.obs_tp1_ph = tf.placeholder(
tf.float32 if lander else tf.uint8, [None] + list(input_shape))
# placeholder for end of episode mask
# this value is 1 if the next state corresponds to the end of an episode,
# in which case there is no Q-value at the next state; at the end of an
# episode, only the current state reward contributes to the target, not the
# next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)
self.done_mask_ph = tf.placeholder(tf.float32, [None])
# casting to float on GPU ensures lower data transfer times.
if lander:
obs_t_float = self.obs_t_ph
obs_tp1_float = self.obs_tp1_ph
else:
obs_t_float = tf.cast(self.obs_t_ph, tf.float32) / 255.0
obs_tp1_float = tf.cast(self.obs_tp1_ph, tf.float32) / 255.0
# Here, you should fill in your own code to compute the Bellman error. This requires
# evaluating the current and next Q-values and constructing the corresponding error.
# TensorFlow will differentiate this error for you, you just need to pass it to the
# optimizer. See assignment text for details.
# Your code should produce one scalar-valued tensor: total_error
# This will be passed to the optimizer in the provided code below.
# Your code should also produce two collections of variables:
# q_func_vars
# target_q_func_vars
# These should hold all of the variables of the Q-function network and target network,
# respectively. A convenient way to get these is to make use of TF's "scope" feature.
# For example, you can create your Q-function network with the scope "q_func" like this:
# <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
# And then you can obtain the variables like this:
# q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
# Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES"
# Tip: use huber_loss (from dqn_utils) instead of squared error when defining self.total_error
self.Q_vals = q_func(obs_t_float, self.num_actions, 'q_func', reuse = tf.AUTO_REUSE)
q_func_ph = tf.gather_nd(self.Q_vals, tf.stack([tf.range(tf.shape(self.Q_vals)[0]), self.act_t_ph], axis=1))
target_q_ph = q_func(obs_tp1_float, self.num_actions, 'target_q_func', reuse = tf.AUTO_REUSE)
if double_q:
target_index = tf.math.argmax(q_func(obs_tp1_float, self.num_actions, 'q_func', reuse = tf.AUTO_REUSE), axis = 1, output_type = tf.int32)
target_v_ph = tf.gather_nd(target_q_ph, tf.stack([tf.range(tf.shape(target_q_ph)[0]), target_index], axis=1))
else:
target_v_ph = tf.math.reduce_max(target_q_ph, axis = 1)
backup_ph = self.rew_t_ph + (1 - self.done_mask_ph) * (gamma * target_v_ph)
self.total_error = tf.math.reduce_mean(huber_loss(q_func_ph - backup_ph))
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')
# construct optimization op (with gradient clipping)
self.learning_rate = tf.placeholder(tf.float32, (), name="learning_rate")
optimizer = self.optimizer_spec.constructor(learning_rate=self.learning_rate, **self.optimizer_spec.kwargs)
self.train_fn = minimize_and_clip(optimizer, self.total_error,
var_list=q_func_vars, clip_val=grad_norm_clipping)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_fn = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_fn.append(var_target.assign(var))
self.update_target_fn = tf.group(*update_target_fn)
# construct the replay buffer
self.replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len, lander=lander)
self.replay_buffer_idx = None
###############
# RUN ENV #
###############
self.model_initialized = False
self.num_param_updates = 0
self.mean_episode_reward = -float('nan')
self.best_mean_episode_reward = -float('inf')
self.last_obs = self.env.reset()
self.log_every_n_steps = 10000
self.start_time = time.time()
self.t = 0
def stopping_criterion_met(self):
return self.stopping_criterion is not None and self.stopping_criterion(self.env, self.t)
def step_env(self):
### 2. Step the env and store the transition
# At this point, "self.last_obs" contains the latest observation that was
# recorded from the simulator. Here, your code needs to store this
# observation and its outcome (reward, next observation, etc.) into
# the replay buffer while stepping the simulator forward one step.
# At the end of this block of code, the simulator should have been
# advanced one step, and the replay buffer should contain one more
# transition.
# Specifically, self.last_obs must point to the new latest observation.
# Useful functions you'll need to call:
# obs, reward, done, info = env.step(action)
# this steps the environment forward one step
# obs = env.reset()
# this resets the environment if you reached an episode boundary.
# Don't forget to call env.reset() to get a new observation if done
# is true!!
# Note that you cannot use "self.last_obs" directly as input
# into your network, since it needs to be processed to include context
# from previous frames. You should check out the replay buffer
# implementation in dqn_utils.py to see what functionality the replay
# buffer exposes. The replay buffer has a function called
# encode_recent_observation that will take the latest observation
# that you pushed into the buffer and compute the corresponding
# input that should be given to a Q network by appending some
# previous frames.
# Don't forget to include epsilon greedy exploration!
# And remember that the first time you enter this loop, the model
# may not yet have been initialized (but of course, the first step
# might as well be random, since you haven't trained your net...)
idx = self.replay_buffer.store_frame(self.last_obs)
obs = self.replay_buffer.encode_recent_observation()
#checking if q_func was initialized
if not self.model_initialized:
ac = np.random.randint(self.num_actions)
else:
#Choosing eps-greedy action
eps = self.exploration.value(self.t)
if np.random.uniform() < eps:
ac = np.random.randint(self.num_actions)
else:
Q_vals = self.session.run(self.Q_vals, {self.obs_t_ph : obs[None]})
ac = np.argmax(Q_vals)
obs_tp1, rew, done, _ = self.env.step(ac)
self.replay_buffer.store_effect(idx, ac, rew, done)
if done:
obs_tp1 = self.env.reset()
self.last_obs = obs_tp1
def update_model(self):
### 3. Perform experience replay and train the network.
# note that this is only done if the replay buffer contains enough samples
# for us to learn something useful -- until then, the model will not be
# initialized and random actions should be taken
if (self.t > self.learning_starts and \
self.t % self.learning_freq == 0 and \
self.replay_buffer.can_sample(self.batch_size)):
# Here, you should perform training. Training consists of four steps:
# 3.a: use the replay buffer to sample a batch of transitions (see the
# replay buffer code for function definition, each batch that you sample
# should consist of current observations, current actions, rewards,
# next observations, and done indicator).
# 3.b: initialize the model if it has not been initialized yet; to do
# that, call
# initialize_interdependent_variables(self.session, tf.global_variables(), {
# self.obs_t_ph: obs_t_batch,
# self.obs_tp1_ph: obs_tp1_batch,
# })
# where obs_t_batch and obs_tp1_batch are the batches of observations at
# the current and next time step. The boolean variable model_initialized
# indicates whether or not the model has been initialized.
# Remember that you have to update the target network too (see 3.d)!
# 3.c: train the model. To do this, you'll need to use the self.train_fn and
# self.total_error ops that were created earlier: self.total_error is what you
# created to compute the total Bellman error in a batch, and self.train_fn
# will actually perform a gradient step and update the network parameters
# to reduce total_error. When calling self.session.run on these you'll need to
# populate the following placeholders:
# self.obs_t_ph
# self.act_t_ph
# self.rew_t_ph
# self.obs_tp1_ph
# self.done_mask_ph
# (this is needed for computing self.total_error)
# self.learning_rate -- you can get this from self.optimizer_spec.lr_schedule.value(t)
# (this is needed by the optimizer to choose the learning rate)
# 3.d: periodically update the target network by calling
# self.session.run(self.update_target_fn)
# you should update every target_update_freq steps, and you may find the
# variable self.num_param_updates useful for this (it was initialized to 0)
obs_batch, act_batch, rew_batch, obs_tp1_batch, done_mask = self.replay_buffer.sample(self.batch_size)
if not self.model_initialized:
initialize_interdependent_variables(self.session, tf.global_variables(),
{self.obs_t_ph : obs_batch, self.obs_tp1_ph : obs_tp1_batch})
self.model_initialized = True
self.session.run(self.train_fn, {self.obs_t_ph: obs_batch, self.act_t_ph: act_batch, self.rew_t_ph: rew_batch,
self.obs_tp1_ph: obs_tp1_batch, self.done_mask_ph: done_mask,
self.learning_rate : self.optimizer_spec.lr_schedule.value(self.t)},
options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
if self.num_param_updates % self.target_update_freq == 0:
self.session.run(self.update_target_fn)
self.num_param_updates += 1
self.t += 1
def log_progress(self):
episode_rewards = get_wrapper_by_name(self.env, "Monitor").get_episode_rewards()
episode_lengths = get_wrapper_by_name(self.env, "Monitor").get_episode_lengths()
if len(episode_rewards) > 0:
self.mean_episode_reward = np.mean(episode_rewards[-100:])
if len(episode_rewards) > 100:
self.best_mean_episode_reward = max(self.best_mean_episode_reward, self.mean_episode_reward)
if self.t % self.log_every_n_steps == 0 and self.model_initialized:
print("Timestep %d, total length %d" % (self.t, np.sum(episode_lengths)))
print("mean reward (100 episodes) %f" % self.mean_episode_reward)
print("best mean reward %f" % self.best_mean_episode_reward)
print("episodes %d" % len(episode_rewards))
print("exploration %f" % self.exploration.value(self.t))
print("learning_rate %f" % self.optimizer_spec.lr_schedule.value(self.t))
if self.start_time is not None:
print("toral running time %f" % ((time.time() - self.start_time) / 60.))
sys.stdout.flush()
with open(self.rew_file, 'wb') as f:
pickle.dump((episode_rewards, episode_lengths), f, pickle.HIGHEST_PROTOCOL)
logz.log_tabular("TotalTime", time.time() - self.start_time)
logz.log_tabular("Timestep", self.t)
logz.log_tabular("MeanEpisodeReward", self.mean_episode_reward)
logz.log_tabular("MaxMeanReturn", self.best_mean_episode_reward)
logz.dump_tabular()
def learn(*args, **kwargs):
alg = QLearner(*args, **kwargs)
while not alg.stopping_criterion_met():
alg.step_env()
# at this point, the environment should have been advanced one step (and
# reset if done was true), and self.last_obs should point to the new latest
# observation
alg.update_model()
alg.log_progress()
| 42.909535 | 143 | 0.691054 |
79443fe1f64e835f3b62210524a30df1e01c9f3e | 2,478 | py | Python | main.py | bph-tuwien/bph_co2 | 042e22a91fc3dd56af713837b725e5367dbe579f | [
"MIT"
] | null | null | null | main.py | bph-tuwien/bph_co2 | 042e22a91fc3dd56af713837b725e5367dbe579f | [
"MIT"
] | null | null | null | main.py | bph-tuwien/bph_co2 | 042e22a91fc3dd56af713837b725e5367dbe579f | [
"MIT"
] | null | null | null | from bph_co2.solver import CO2_Simulation, ppm_to_mg_m3, mg_m3_to_ppm
from bph_co2.timeseries import Timeseries
from bph_co2.window import Window
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from bph_co2.resources import Input_Data as case_data
if __name__ == '__main__':
# load .csv files
with pkg_resources.path(case_data, 'persons.csv') as path:
persons_filename = path.__str__()
with pkg_resources.path(case_data, 'internal_co2_source.csv') as path:
internal_co2_source_filename = path.__str__()
with pkg_resources.path(case_data, 'air_change_rate.csv') as path:
air_change_rate_filename = path.__str__()
with pkg_resources.path(case_data, 'window_state.csv') as path:
window_state_filename = path.__str__()
with pkg_resources.path(case_data, 'indoor_temperature.csv') as path:
indoor_temperature_filename = path.__str__()
with pkg_resources.path(case_data, 'outdoor_temperature.csv') as path:
outdoor_temperature_filename = path.__str__()
n_persons = Timeseries.from_csv(persons_filename, interpolation_scheme='previous')
internal_co2_source = Timeseries.from_csv(internal_co2_source_filename, interpolation_scheme='linear')
air_change_rate = Timeseries.from_csv(air_change_rate_filename, interpolation_scheme='linear')
window_state = Timeseries.from_csv(window_state_filename, interpolation_scheme='previous')
indoor_temperature = Timeseries.from_csv(indoor_temperature_filename, interpolation_scheme='linear')
outdoor_temperature = Timeseries.from_csv(outdoor_temperature_filename, interpolation_scheme='linear')
# create a window:
window = Window(hight=1,
area=1,
state=window_state)
sim = CO2_Simulation(name='test_simulation',
volume=51.48,
n_persons=n_persons,
emission_rate=27000,
internal_co2_source=internal_co2_source,
indoor_temperature=indoor_temperature,
outdoor_temperature=outdoor_temperature,
windows=[window],
air_change_rate=air_change_rate,
timestep=60,
t_end=26640)
res = sim.calculate()
res.plot()
| 39.967742 | 106 | 0.690476 |
794440aa4ffba94e194f66673d4484a422fe9fd8 | 849 | py | Python | fixture/mail.py | dondemonz/python_training_mantis | b327ba1c057685aa6381c3afd882d9dec58b0cfc | [
"Apache-2.0"
] | null | null | null | fixture/mail.py | dondemonz/python_training_mantis | b327ba1c057685aa6381c3afd882d9dec58b0cfc | [
"Apache-2.0"
] | null | null | null | fixture/mail.py | dondemonz/python_training_mantis | b327ba1c057685aa6381c3afd882d9dec58b0cfc | [
"Apache-2.0"
] | null | null | null | import poplib
import email
import time
class MailHelper:
def __init__(self, app):
self.app = app
def get_mail(self, username, password, subject):
for i in range(5):
pop = poplib.POP3(self.app.config["james"]["host"])
pop.user(username)
pop.pass_(password)
num = pop.stat()[0]
if num > 0:
for n in range(num):
msglines = pop.retr(n+1)[1]
msgtext = "\n".join(map(lambda x: x.decode("utf-8"), msglines))
msg = email.message_from_string(msgtext)
if msg.get("Subject") == subject:
pop.dele(n+1)
pop.quit()
return msg.get_payload()
pop.quit()
time.sleep(7)
return None | 31.444444 | 83 | 0.47232 |
7944410d9f441fa9c6cee1521402196999daee8f | 2,010 | py | Python | main.py | jpr2/01-Interactive-Fiction | f00aca64bdccb8d2ce6e6d02d5f9043340060eab | [
"MIT"
] | null | null | null | main.py | jpr2/01-Interactive-Fiction | f00aca64bdccb8d2ce6e6d02d5f9043340060eab | [
"MIT"
] | null | null | null | main.py | jpr2/01-Interactive-Fiction | f00aca64bdccb8d2ce6e6d02d5f9043340060eab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys,os,json,re
assert sys.version_info >= (3,9), "This script requires at least Python 3.9"
def load(l):
f = open(os.path.join(sys.path[0], l))
data = f.read()
j = json.loads(data)
return j
def find_passage(game_desc, pid):
for p in game_desc["passages"]:
if p["pid"] == pid:
return p
return {}
# Removes Harlowe formatting from Twison description
def format_passage(description):
description = re.sub(r'//([^/]*)//',r'\1',description)
description = re.sub(r"''([^']*)''",r'\1',description)
description = re.sub(r'~~([^~]*)~~',r'\1',description)
description = re.sub(r'\*\*([^\*]*)\*\*',r'\1',description)
description = re.sub(r'\*([^\*]*)\*',r'\1',description)
description = re.sub(r'\^\^([^\^]*)\^\^',r'\1',description)
description = re.sub(r'(\[\[[^\|]*?)\|([^\]]*?\]\])',r'\1->\2',description)
description = re.sub(r'\[\[([^(->\])]*?)->[^\]]*?\]\]',r'[ \1 ]',description)
description = re.sub(r'\[\[(.+?)\]\]',r'[ \1 ]',description)
return description
def update(current,choice,game_desc):
if choice == "":
return current
for l in current["links"]:
if l["name"].lower() == choice:
current = find_passage(game_desc, l["pid"])
return current
print("I don't understand. Please try again.")
return current
def render(current):
print("\n\n")
print(current["name"])
print(format_passage(current["text"]))
print("\n\n")
def get_input():
choice = input("What would you like to do? (type quit to exit) ")
choice = choice.lower().strip()
return choice
def main():
game_desc = load("game.json")
current = find_passage(game_desc, game_desc["startnode"])
choice = ""
while choice != "quit" and current != {}:
current = update(current,choice,game_desc)
render(current)
choice = get_input()
print("Thanks for playing!")
if __name__ == "__main__":
main() | 27.162162 | 81 | 0.568159 |
794441a423a5422e6375cb4957cfff4282f486de | 1,738 | py | Python | mbs_messaging_umb/conf.py | qixiang/mbs-messaging-umb | 54ef47bd8ad7ed3f119a68fdcf3589f383a0ac4e | [
"MIT"
] | 1 | 2017-09-20T15:59:37.000Z | 2017-09-20T15:59:37.000Z | mbs_messaging_umb/conf.py | qixiang/mbs-messaging-umb | 54ef47bd8ad7ed3f119a68fdcf3589f383a0ac4e | [
"MIT"
] | 10 | 2017-09-26T22:43:41.000Z | 2020-01-07T02:41:14.000Z | mbs_messaging_umb/conf.py | mikebonnet/mbs-messaging-umb | 60a8a50322057fdbf310fb9750080ea3301359c8 | [
"MIT"
] | 5 | 2017-09-27T21:52:11.000Z | 2019-12-11T04:43:50.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Written by Mike Bonnet <[email protected]>
import os
import imp
import logging
CONFFILE = '/etc/mbs-messaging-umb/config.py'
_CONF_MODULE = None
log = logging.getLogger(__name__)
def load_config():
"""
Load the configuration from the default config file.
"""
global _CONF_MODULE
if not _CONF_MODULE:
conffile = os.environ.get('MBS_MESSAGING_UMB_CONFIG', CONFFILE)
try:
_CONF_MODULE = imp.load_source('mbs_messaging_umb_config', conffile)
except Exception:
log.exception('Could not load config file: {0}'.format(conffile))
return _CONF_MODULE
| 38.622222 | 80 | 0.740506 |
794442b19ff00f6e2c6f53e2459fa8fd49f0d118 | 9,106 | py | Python | quiz.py | MatheusDMD/TechWeb_QuizPlatform | ffc76ec2efe14fed8daf37d9fa4a096259f1770a | [
"MIT"
] | null | null | null | quiz.py | MatheusDMD/TechWeb_QuizPlatform | ffc76ec2efe14fed8daf37d9fa4a096259f1770a | [
"MIT"
] | null | null | null | quiz.py | MatheusDMD/TechWeb_QuizPlatform | ffc76ec2efe14fed8daf37d9fa4a096259f1770a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from flask import Flask, request, render_template, make_response, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True);password = db.Column(db.String(50), unique=False)
def __init__(self, username, email, password):
self.username = username
self.email = email;
self.password = password
class Quiz(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), unique=False)
theme = db .Column(db.String(100), unique=False);user_id = db.Column(db.Integer, db.ForeignKey('user.id'));
def __init__(self,title, theme, user_id):
self.title = title
self.theme = theme
self.user_id = user_id
class dquestion(db.Model):
id = db.Column(db.Integer, primary_key=True)
question = db.Column(db.String(100), unique=False)
quiz_id = db.Column(db.Integer, db.ForeignKey('quiz.id'))
def __init__(self, question, quiz_id):
self.question = question
self.quiz_id = quiz_id
class mcquestion(db.Model):
id = db.Column(db.Integer, primary_key=True)
question = db.Column(db.String(100), unique=True)
alt1 = db.Column(db.String(100), unique=False)
alt2 = db.Column(db.String(100), unique=False)
alt3 = db.Column(db.String(100), unique=False)
alt4 = db.Column(db.String(100), unique=False)
correct_alt = db.Column(db.Integer, unique=False)
quiz_id = db.Column(db.Integer, db.ForeignKey('quiz.id'))
def __init__(self, question,alt1,alt2,alt3,alt4,correct_alt,quiz_id):
self.question = question
self.alt1 = alt1
self.alt2 = alt2
self.alt3 = alt3
self.alt4 = alt4
self.correct_alt = correct_alt
self.quiz_id = quiz_id
class mcanswer(db.Model):
id = db.Column(db.Integer, primary_key=True)
answer = db.Column(db.Integer, unique=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
quiz_id = db.Column(db.Integer, db.ForeignKey('quiz.id'))
question_id = db.Column(db.Integer, db.ForeignKey('mcquestion.id'))
def __init__(self,answer,user_id,quiz_id,question_id):
self.answer = answer
self.user_id = user_id
self.quiz_id = quiz_id
self.question_id = question_id
@app.route('/', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
mode = request.args.get('mode')
if mode=='login':
email = request.form["login_email"]
password = request.form["login_password"]
#voltar pra cá e ler como se
login_user = User.query.filter_by(email=email).first()
if login_user.password == password:
next_page = make_response(render_template('index.html'))
next_page.set_cookie('user_id', str(login_user.id))
return next_page
else:
return 'wrong password'
if mode=='register':
first_name = request.form["first_name"]
last_name = request.form["last_name"]
email = request.form["email"]
password = request.form["password"]
user = User(username=first_name+' '+last_name, email=email, password=password)
db.session.add(user)
db.session.commit();next_page2 = make_response(render_template('index.html'));next_page2.set_cookie('user_id', str(user.id))
return next_page2
return render_template('login.html')
@app.route('/main')
def main():
return render_template('index.html')
@app.route('/create', methods=['GET', 'POST'])
def create_quiz():
if request.method == 'POST':
title = request.form["title"]
theme = request.form["theme"];
user_id = request.cookies.get('user_id')
quiz = Quiz(title=title, theme=theme, user_id=user_id)
db.session.add(quiz)
db.session.commit()#return User.query.get(quiz.user_id).email
resp = make_response(render_template('question.html',quiz_id=quiz.id,visibility="visible"))
resp.set_cookie('quiz_id', str(quiz.id))
return resp
return render_template('create.html')
@app.route('/question', methods=['GET','POST'])
def create_question():
if request.method == 'POST':
print(request.values)
question = request.form["question"]
alt1 = request.form["alt1"]
alt2 = request.form["alt2"]
alt3 = request.form["alt3"]
alt4 = request.form["alt4"]
correct_alt = request.form["check"]
quiz_id = request.cookies.get('quiz_id')
m_question = mcquestion(question=question,alt1=alt1,alt2=alt2,alt3=alt3,alt4=alt4,correct_alt=correct_alt,quiz_id=quiz_id)
db.session.add(m_question)
db.session.commit()
quiz_questions = mcquestion.query.filter_by(quiz_id=quiz_id).all()
return render_template('questionlist.html',questions=quiz_questions,visibility="visible")
return render_template('question.html',visibility="hidden")
@app.route('/quizlist', methods=['GET','POST'])
def list_quiz():
if request.method == 'POST':
quiz_id = request.form["select"]
quiz_questions = mcquestion.query.filter_by(quiz_id=quiz_id).all()
return render_template('questionlist.html',questions=quiz_questions,quiz_id=quiz_id,visibility="visible")
else:
user_id = request.cookies.get('user_id')
quizs = Quiz.query.filter_by(user_id=user_id).all()
return render_template('quizlist.html',quizs=quizs)
@app.route('/stats', methods=['GET','POST'])
def stats_view():
if request.method == 'POST':
user_id = request.cookies.get('user_id')
question_id = request.form["select"]
question = mcquestion.query.get(question_id)
answer_list = mcanswer.query.filter_by(question_id=question_id).all()
number_answers = [0,0,0,0]
for answer in answer_list:
number_answers[answer.answer-1] += 1
if len(answer_list) == 0:
return redirect(url_for('list_quiz'))
percentage_answers = [(100*float(x)) / len(answer_list) for x in number_answers]
for i in percentage_answers:
print(i)
active_list = ['','','','']
active_list[question.correct_alt-1]='active'
myanswer = None
for answer in answer_list:
if answer.user_id == user_id:
myanswer = answer.answer
if myanswer == None:
visibility = 'hidden'
else:
visibility = 'visible'
return render_template('stats.html', percentages=percentage_answers,title=question.question,alt1=question.alt1,alt2=question.alt2,alt3=question.alt3,alt4=question.alt4,active=active_list,answer=myanswer,visibility=visibility)
@app.route('/userlist')
def list_user():
users = User.query.all()
return render_template('quizlist.html',quizs=users)
@app.route('/quiz/<quiz_id>', methods=['GET','POST'])
def answer_quiz(quiz_id):
user_id = request.cookies.get('user_id')
if user_id == None:
return "Login first"
else:
if request.method == 'POST':
questions = mcquestion.query.filter_by(quiz_id=quiz_id).all()
quest = request.form['question']
question = mcquestion.query.filter_by(question=quest).first()
quiz = Quiz.query.get(quiz_id)
answer = request.form['check']
user_answer = mcanswer(answer=answer,user_id=user_id,quiz_id=quiz_id,question_id=question.id)
db.session.add(user_answer)
db.session.commit()
if len(questions)-1 != questions.index(question):
question = questions[questions.index(question)+1]
return render_template('answerquiz.html',title=quiz.title,theme=quiz.theme,question=question.question,alt1=question.alt1,alt2=question.alt2,alt3=question.alt3,alt4=question.alt4)
else:
return redirect(url_for('quiz_done'))
questions = mcquestion.query.filter_by(quiz_id=quiz_id).all()
question = questions[0]
quiz = Quiz.query.get(quiz_id)
return render_template('answerquiz.html',title=quiz.title,theme=quiz.theme,question=question.question,alt1=question.alt1,alt2=question.alt2,alt3=question.alt3,alt4=question.alt4)
@app.route('/quiz_done')
def quiz_done():
if request.method == "POST":
return redirect(url_for('main'))
return render_template("quizanswered.html")
@app.route('/myansweredquiz', methods=['GET','POST'])
def answerquiz_view():
if request.method == 'POST':
quiz_id = request.form["select"]
quiz_questions = mcquestion.query.filter_by(quiz_id=quiz_id).all()
return render_template('questionlist.html',questions=quiz_questions,quiz_id=quiz_id,visibility="hidden")
else:
user_id = request.cookies.get('user_id')
myansweredquestions = mcanswer.query.filter_by(user_id=user_id).all()
list_of_quiz = []
for question in myansweredquestions:
if Quiz.query.get(question.quiz_id) not in list_of_quiz:
list_of_quiz.append(Quiz.query.get(question.quiz_id))
print(question.quiz_id)
print(list_of_quiz)
return render_template('donequizlist.html',quizs=list_of_quiz)
db.create_all()
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| 39.081545 | 227 | 0.694926 |
794443d127b042461f725f066044500437db629f | 771 | py | Python | passenger_wsgi.py | chriswilson1982/bottle-blog-uploader | b17d3ab51fdc32fb81ecfcf06c7722aaa9bf6d1e | [
"MIT"
] | null | null | null | passenger_wsgi.py | chriswilson1982/bottle-blog-uploader | b17d3ab51fdc32fb81ecfcf06c7722aaa9bf6d1e | [
"MIT"
] | 2 | 2021-02-07T13:22:32.000Z | 2021-02-07T13:23:21.000Z | passenger_wsgi.py | chriswilson1982/bottle-blog-uploader | b17d3ab51fdc32fb81ecfcf06c7722aaa9bf6d1e | [
"MIT"
] | null | null | null | import os, sys
# Use Python 3.7.1 interpreter
INTERP = "/home/chriswilson1982/opt/python-3.7.1/bin/python3"
# INTERP is present twice so that the new Python interpreter knows the actual executable path
if sys.executable != INTERP:
os.execl(INTERP, INTERP, *sys.argv)
# Add current directory to path, if isn't already
cmd_folder = os.path.dirname(os.path.abspath(__file__))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import bottle
from bottle import route, run, template
import blog_uploader # Custom routes
# Setup Dreamhost passenger hook
def application(environ, start_response):
return bottle.default_app().wsgi(environ,start_response)
# Main method for local development
if __name__ == "__main__":
bottle.debug(True)
run()
| 29.653846 | 93 | 0.758755 |
794443e4011377b8941e862e4b83cb001a5a17e6 | 686 | py | Python | migrations/versions/dff188188029_.py | stevenabadie/volunteercore | b797354aa52fbcfdca39925e6729263762a679ce | [
"MIT"
] | 2 | 2019-11-08T04:35:34.000Z | 2020-08-01T19:39:53.000Z | migrations/versions/dff188188029_.py | stevenabadie/volunteercore | b797354aa52fbcfdca39925e6729263762a679ce | [
"MIT"
] | 95 | 2018-09-29T22:50:51.000Z | 2019-05-27T16:37:46.000Z | migrations/versions/dff188188029_.py | stevenabadie/volunteercore | b797354aa52fbcfdca39925e6729263762a679ce | [
"MIT"
] | 4 | 2018-09-29T15:50:15.000Z | 2019-05-11T20:29:19.000Z | """empty message
Revision ID: dff188188029
Revises: a260230e29cb
Create Date: 2019-04-27 22:29:41.229608
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dff188188029'
down_revision = 'a260230e29cb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('opportunity', sa.Column('location_state', sa.String(length=20), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('opportunity', 'location_state')
# ### end Alembic commands ###
| 23.655172 | 98 | 0.704082 |
794443e6b3ccbe28e9eab30c44afd565a0375cf9 | 5,146 | py | Python | app/main/service/location_service.py | OdysseyMomentum/Cryptomice-Eonpeers | bc7f1cb0ef640662a04f0040603fde092cabe336 | [
"MIT"
] | null | null | null | app/main/service/location_service.py | OdysseyMomentum/Cryptomice-Eonpeers | bc7f1cb0ef640662a04f0040603fde092cabe336 | [
"MIT"
] | null | null | null | app/main/service/location_service.py | OdysseyMomentum/Cryptomice-Eonpeers | bc7f1cb0ef640662a04f0040603fde092cabe336 | [
"MIT"
] | null | null | null | import datetime
import uuid
from app.main.model.location import Location
from app.main.model.company import Company
from app.main.services import db
from app.main.util.tasks import make_gossip_call
from app.main.util.eonerror import EonError
from app.main.util.keymanagementutils import KeyManagementClient
from app.main.util.hashutils import HashUtils
def save_new_location(data):
"""
Save a new location for your owned company, i.e. sign the location_key with your key
"""
location = Location.query.filter_by(company_id=data['company_id'], name=data['name']).first()
if not location:
#get the key of the connected company, if "is_own", call the keymanagement system:
connected_company = Company.query.filter_by(public_id=data['company_id']).first()
if not connected_company:
raise EonError('Unknown company, craete the company first.', 400)
key = ''
kmc = KeyManagementClient()
hu = HashUtils()
if connected_company.is_own and connected_company.public_key==None:
#use the KMC to retrieve the public key and then sign the hash of the location_data
hashed_location_data = hu.digest(data['location_data'])
key = kmc.sign_message(hashed_location_data) #hexdigest this?
else:
raise EonError('The company does not have a known public key.', 400)
new_location = Location(
public_id=str(uuid.uuid4()),
name=data['name'],
location_data=data['location_data'],
created_on=datetime.datetime.utcnow(),
company_id = data['company_id'],
location_key=key['signed'].hex()
)
save_changes(new_location)
return generate_creation_ok_message(new_location)
else:
raise EonError('Another location already exists with the given data.', 409)
def save_new_external_location(data):
"""
Save a new location from another company, check the signature. The payload has the key because you can't compute it
This funciton is called from the POST new validation, so the request body is manipulated to find the proper
parameters, e.g. the validation request has the company public key, not the company_id as we need to insert into DB
Parameters
----------
data: dict
the body of the request with the following keys;
'name', name of the location
'location_key', the signed key for the location
'location_data', the payload describing the location
'company_id', the id of the connected company
Returns
-------
dict
the body of the response, a dict witht he following keys:
'status',
'message',
'public_id', the local public_id for this location
Raises
------
EonError
400, something is wrong witht the input data
409, the given company already has a location with the same key
"""
location = Location.query.filter_by(company_id=data['company_id'], location_key=data['location_key']).first()
if not location:
connected_company = Company.query.filter_by(public_id=data['company_id']).first()
if not connected_company:
raise EonError('Unknown company, craete the company first.', 400)
if connected_company.public_key and not connected_company.is_own:
public_key = connected_company.public_key
hu = HashUtils()
kmc = KeyManagementClient()
hashed_location_data = hu.digest(data['location_data'])
bytes_key = bytes.fromhex(data['location_key'])
signature_correct = kmc.verify_signed_message(bytes_key, hashed_location_data, public_key)
if not signature_correct:
raise EonError('Invalid signature for the given location.', 400)
else:
raise EonError('The company does not have a known public key.', 400)
#TODO:
#check the company public key by retrieving it
new_location = Location(
public_id=str(uuid.uuid4()),
name=data['name'],
location_data=data['location_data'],
created_on=datetime.datetime.utcnow(),
company_id = data['company_id'],
location_key=data['location_key']
)
save_changes(new_location)
return generate_creation_ok_message(new_location)
else:
raise EonError('Another location already exists with the given data.', 409)
def get_a_location(public_id):
return Location.query.filter_by(public_id=public_id).first()
def save_changes(data):
db.session.add(data)
db.session.commit()
def generate_creation_ok_message(location):
try:
response_object = {
'status': 'success',
'message': 'New location created and currently being checked.',
'public_id': location.public_id
}
return response_object, 201
except Exception:
response_object = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return response_object, 401
| 39.282443 | 119 | 0.661096 |
79444465c7fddebb6183a141985edf3aeb883a4a | 1,577 | py | Python | Methods/Slot/HoleM50/comp_radius.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | 2 | 2020-06-29T13:48:37.000Z | 2021-06-15T07:34:05.000Z | Methods/Slot/HoleM50/comp_radius.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | Methods/Slot/HoleM50/comp_radius.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""@package Methods.Slot.HoleM50.comp_radius
Compute the radius of the Slot method
@date Created on Fri Mar 18 09:51:15 2016
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
@todo unittest it
"""
from numpy import angle, arcsin, arctan, array, cos, exp
def comp_radius(self):
"""Compute the radius of the min and max circle that contains the slot
Parameters
----------
self : HoleM50
A HoleM50 object
Returns
-------
(Rmin,Rmax): tuple
Radius of the circle that contains the slot [m]
"""
Rbo = self.get_Rbo()
Rmax = Rbo - self.H1
# magnet pole pitch angle, must be <2*pi/2*p
alpham = 2 * arcsin(self.W0 / (2 * (Rbo - self.H1)))
Harc = (Rbo - self.H1) * (1 - cos(alpham / 2))
gammam = arctan((self.H0 - self.H1 - Harc) / (self.W0 / 2.0 - self.W1 / 2.0))
x78 = (self.H3 - self.H2) / cos(gammam) # distance from 7 to 8
Z9 = Rbo - Harc - self.H1 - 1j * self.W0 / 2.0
Z8 = Rbo - self.H0 - 1j * self.W1 / 2.0
Z7 = Rbo - self.H0 - x78 - 1j * self.W1 / 2.0
# Magnet coordinate with Z8 as center and x as the top edge of the magnet
Z8b = self.W2
Z8c = Z8b + self.W4
Z5 = Z8b - 1j * self.H3
Z4 = Z8c - 1j * self.H3
Z6 = Z5 + 1j * self.H2
Z3 = Z4 + 1j * self.H2
Zmag = array([Z8b, Z6, Z5, Z4, Z3, Z8c])
Zmag = Zmag * exp(1j * angle(Z9 - Z8))
Zmag = Zmag + Z8
# final complex numbers Zmag=[Z8b Z6 Z5 Z4 Z3 Z8c]
(Z8b, Z6, Z5, Z4, Z3, Z8c) = Zmag
Rmin = min(abs(Z5), abs(Z7))
return (Rmin, Rmax)
| 26.728814 | 81 | 0.578313 |
7944453980547c246fa8b1ed5fbddc97cd5067c6 | 6,705 | py | Python | bingads/v13/bulk/entities/ad_extensions/bulk_filterlink_ad_extensions.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 86 | 2016-02-29T03:24:28.000Z | 2022-03-29T09:30:21.000Z | bingads/v13/bulk/entities/ad_extensions/bulk_filterlink_ad_extensions.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 135 | 2016-04-12T13:31:28.000Z | 2022-03-29T02:18:51.000Z | bingads/v13/bulk/entities/ad_extensions/bulk_filterlink_ad_extensions.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 154 | 2016-04-08T04:11:27.000Z | 2022-03-29T21:21:07.000Z | from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.extensions import *
from .common import _BulkAdExtensionBase
from .common import _BulkCampaignAdExtensionAssociation
from .common import _BulkAdGroupAdExtensionAssociation
from .common import _BulkAccountAdExtensionAssociation
_FilterLinkAdExtension = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('FilterLinkAdExtension'))
class BulkFilterLinkAdExtension(_BulkAdExtensionBase):
""" Represents a filter link ad extension.
This class exposes the :attr:`filter_link_ad_extension` property that can be read and written
as fields of the Filter Link Ad Extension record in a bulk file.
For more information, see Filter Link Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, account_id=None, ad_extension=None):
if ad_extension and not isinstance(ad_extension, _FilterLinkAdExtension):
raise ValueError('The type of ad_extension is: {0}, should be: {1}'.format(
type(ad_extension),
'FilterLinkAdExtension'
))
super(BulkFilterLinkAdExtension, self).__init__(
account_id=account_id,
ad_extension=ad_extension
)
@property
def filter_link_ad_extension(self):
""" The filter link ad extension.
see Filter Link Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._ad_extension
@filter_link_ad_extension.setter
def filter_link_ad_extension(self, value):
self._ad_extension = value
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.AdExtensionHeaderType,
field_to_csv=lambda c: bulk_str(c.filter_link_ad_extension.AdExtensionHeaderType),
csv_to_field=lambda c, v: setattr(c.filter_link_ad_extension, 'AdExtensionHeaderType', v)
),
_SimpleBulkMapping(
header=_StringTable.Language,
field_to_csv=lambda c: c.filter_link_ad_extension.Language,
csv_to_field=lambda c, v: setattr(c.filter_link_ad_extension, 'Language', v)
),
_SimpleBulkMapping(
header=_StringTable.Texts,
field_to_csv=lambda c: field_to_csv_delimited_strings(c.filter_link_ad_extension.Texts),
csv_to_field=lambda c, v: csv_to_field_delimited_strings(c.filter_link_ad_extension.Texts, v)
),
_SimpleBulkMapping(
header=_StringTable.TrackingTemplate,
field_to_csv=lambda c: bulk_str(c.filter_link_ad_extension.TrackingUrlTemplate),
csv_to_field=lambda c, v: setattr(c.filter_link_ad_extension, 'TrackingUrlTemplate', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.CustomParameter,
field_to_csv=lambda c: field_to_csv_UrlCustomParameters(c.filter_link_ad_extension),
csv_to_field=lambda c, v: csv_to_field_UrlCustomParameters(c.filter_link_ad_extension, v)
),
_SimpleBulkMapping(
header=_StringTable.FinalUrl,
field_to_csv=lambda c: field_to_csv_Urls(c.filter_link_ad_extension.FinalUrls, c.filter_link_ad_extension.Id),
csv_to_field=lambda c, v: csv_to_field_Urls(c.filter_link_ad_extension.FinalUrls, v)
),
_SimpleBulkMapping(
header=_StringTable.FinalMobileUrl,
field_to_csv=lambda c: field_to_csv_Urls(c.filter_link_ad_extension.FinalMobileUrls, c.filter_link_ad_extension.Id),
csv_to_field=lambda c, v: csv_to_field_Urls(c.filter_link_ad_extension.FinalMobileUrls, v)
),
_SimpleBulkMapping(
header=_StringTable.FinalUrlSuffix,
field_to_csv=lambda c: bulk_optional_str(c.filter_link_ad_extension.FinalUrlSuffix, c.filter_link_ad_extension.Id),
csv_to_field=lambda c, v: setattr(c.filter_link_ad_extension, 'FinalUrlSuffix', v)
)
]
def process_mappings_from_row_values(self, row_values):
self.filter_link_ad_extension = _CAMPAIGN_OBJECT_FACTORY_V13.create('FilterLinkAdExtension')
self.filter_link_ad_extension.Type = 'FilterLinkAdExtension'
super(BulkFilterLinkAdExtension, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkFilterLinkAdExtension._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.filter_link_ad_extension, 'filter_link_ad_extension')
super(BulkFilterLinkAdExtension, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkFilterLinkAdExtension._MAPPINGS)
class BulkAccountFilterLinkAdExtension(_BulkAccountAdExtensionAssociation):
""" Represents an account level filter link ad extension.
This class exposes properties that can be read and written
as fields of the Account Filter Link Ad Extension record in a bulk file.
For more information, see Account Filter Link Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
pass
class BulkCampaignFilterLinkAdExtension(_BulkCampaignAdExtensionAssociation):
""" Represents a campaign level filter link ad extension.
This class exposes properties that can be read and written
as fields of the Campaign Filter Link Ad Extension record in a bulk file.
For more information, see Campaign Filter Link Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
pass
class BulkAdGroupFilterLinkAdExtension(_BulkAdGroupAdExtensionAssociation):
""" Represents an ad group level filter link ad extension.
This class exposes properties that can be read and written
as fields of the Ad Group Filter Link Ad Extension record in a bulk file.
For more information, see Ad Group Filter Link Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
pass
| 41.134969 | 128 | 0.726174 |
794447603e92de8cb82ee49b4bb228494e1cb9d7 | 4,164 | py | Python | tests/integration/cli/restart_test.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 421 | 2015-06-02T16:29:59.000Z | 2021-06-03T18:44:42.000Z | tests/integration/cli/restart_test.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 404 | 2015-06-02T20:23:42.000Z | 2019-08-21T16:59:41.000Z | tests/integration/cli/restart_test.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 16 | 2015-06-16T17:21:02.000Z | 2020-03-27T02:27:09.000Z | import datetime
import os
from subprocess import check_call
from tempfile import mkdtemp
import time
import dateutil.parser
from nose.tools import nottest
from dusty.compiler.spec_assembler import get_all_repos
from dusty import constants
from dusty.source import Repo
from dusty.subprocess import check_output_demoted
from ...testcases import DustyIntegrationTestCase
from ...fixtures import specs_fixture_with_depends
class TestRestartCLI(DustyIntegrationTestCase):
def setUp(self):
super(TestRestartCLI, self).setUp()
specs_fixture_with_depends()
for repo in get_all_repos(include_specs_repo=False):
self._set_up_fake_local_repo(path=repo.remote_path)
self.run_command('bundles activate bundle-a bundle-b')
self.run_command('up')
self.up_complete_time = max([self.container_start_time(app) for app in ['appa', 'appb', 'appc']])
def tearDown(self):
try:
self.run_command('stop')
except Exception:
pass
super(TestRestartCLI, self).tearDown()
def assertContainerHasRestarted(self, app_name):
self.assertTrue(self.container_has_restarted(app_name))
def assertContainerNotRestarted(self, app_name):
self.assertFalse(self.container_has_restarted(app_name))
def vm_current_time(self):
result = check_output_demoted(['docker-machine', 'ssh', constants.VM_MACHINE_NAME, 'date "+%Y-%m-%dT%X.%s"'])
return dateutil.parser.parse(result)
@nottest
def container_start_time(self, app_name):
inspected = self.inspect_container(app_name)
return dateutil.parser.parse(inspected['State']['StartedAt']).replace(tzinfo=None)
@nottest
def container_has_restarted(self, app_name):
start_time = self.container_start_time(app_name)
print 'Container for {} started: {}, Up complete: {}'.format(app_name, start_time, self.up_complete_time)
return start_time > self.up_complete_time
def test_restart_one(self):
self.run_command('restart appa')
self.assertContainerHasRestarted('appa')
self.assertContainerNotRestarted('appb')
self.assertContainerNotRestarted('appc')
def test_restart_all(self):
self.run_command('restart')
self.assertContainerHasRestarted('appa')
self.assertContainerHasRestarted('appb')
self.assertContainerHasRestarted('appc')
def test_restart_by_app_repo(self):
self.run_command('restart --repos repo-app-a')
self.assertContainerHasRestarted('appa')
self.assertContainerNotRestarted('appb')
self.assertContainerNotRestarted('appc')
def test_restart_by_lib_repo(self):
self.run_command('restart --repos repo-lib-a')
self.assertContainerHasRestarted('appa')
self.assertContainerHasRestarted('appb')
self.assertContainerNotRestarted('appc')
def test_restart_sync(self):
new_file_name = 'sync_file'
repo = Repo.resolve(get_all_repos(include_specs_repo=False), 'repo-app-a')
with open(os.path.join(repo.local_path, new_file_name), 'w+') as f:
f.write('new file!')
self.run_command('restart appa')
self.assertFileInContainer('appa', os.path.join('/app/a/', new_file_name))
def test_restart_dead_container(self):
self.run_command('stop appa')
self.run_command('restart appa')
self.assertContainerHasRestarted('appa')
def test_restart_fails_with_dead_link(self):
self.run_command('stop appa')
output = self.run_command('restart appb')
self.assertInSameLine(output, 'Cannot restart appb', 'appa')
def test_restart_with_repo_swap(self):
repo_override_dir = mkdtemp()
extra_file = 'train'
extra_file_path = os.path.join(repo_override_dir, extra_file)
check_call(['touch', extra_file_path])
self.run_command('repos override repo-app-a {}'.format(repo_override_dir))
self.run_command('restart appa')
extra_file_container_path = os.path.join('/app/a', extra_file)
self.assertFileInContainer('appa', extra_file_container_path)
| 39.283019 | 117 | 0.705572 |
7944481f280df7add91ac92355de83332396b15e | 935 | py | Python | modules/data/dto/telegram_query_response.py | amirgraily7/tulo-chatbot | 493a20525eeca98003fb0fc63da18f5e4c3d4a6f | [
"MIT"
] | 3 | 2019-10-07T04:11:55.000Z | 2021-11-26T08:00:47.000Z | modules/data/dto/telegram_query_response.py | usriva2405/ml-bank-chatbot-telegram | 493a20525eeca98003fb0fc63da18f5e4c3d4a6f | [
"MIT"
] | 39 | 2019-09-25T03:14:57.000Z | 2022-03-11T23:59:56.000Z | modules/data/dto/telegram_query_response.py | usriva2405/ml-bank-chatbot-telegram | 493a20525eeca98003fb0fc63da18f5e4c3d4a6f | [
"MIT"
] | 2 | 2020-05-12T15:19:15.000Z | 2021-01-31T11:54:01.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 00:33:46 2019
@author: usrivastava
"""
import logging
import json
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
class TelegramQueryResponse:
@classmethod
def extract_response(cls, answer):
print("extracting response... :")
# get response element, remove the opening and closing [], replace ' with ", so that it is convertible to json
if answer is not None:
try:
response_str = str(answer.get("response"))[1:-1].replace("'", '"')
response = json.loads(response_str).get("text")
except Exception as e:
logger.error(e)
response = "I think your query broke me. Try after some time!!"
return response
| 28.333333 | 118 | 0.603209 |
79444859e3a1e824d5e6bf6f41ea05c817f1f120 | 11,094 | py | Python | examples/ui/ULAO04.py | Picarro-kskog/mcculw | 5a00dfbef2426772f0ec381f7795a2d5fd696a76 | [
"MIT"
] | null | null | null | examples/ui/ULAO04.py | Picarro-kskog/mcculw | 5a00dfbef2426772f0ec381f7795a2d5fd696a76 | [
"MIT"
] | null | null | null | examples/ui/ULAO04.py | Picarro-kskog/mcculw | 5a00dfbef2426772f0ec381f7795a2d5fd696a76 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import math
from builtins import * # @UnusedWildImport
from tkinter import messagebox
from mcculw import ul
from mcculw.enums import Status, FunctionType, ScanOptions
from examples.ui.uiexample import UIExample
from examples.props.ao import AnalogOutputProps
from mcculw.ul import ULError
import tkinter as tk
class ULAO04(UIExample):
def __init__(self, master=None):
super(ULAO04, self).__init__(master)
self.board_num = 0
self.ao_props = AnalogOutputProps(self.board_num)
self.create_widgets()
def start_scan(self):
# Build the data array
self.low_chan = self.get_low_channel_num()
self.high_chan = self.get_high_channel_num()
self.num_chans = self.high_chan - self.low_chan + 1
if self.low_chan > self.high_chan:
messagebox.showerror(
"Error",
"Low Channel Number must be greater than or equal to High "
"Channel Number")
self.set_ui_idle_state()
return
points_per_channel = 1000
rate = 1000
num_points = self.num_chans * points_per_channel
scan_options = (ScanOptions.BACKGROUND |
ScanOptions.CONTINUOUS | ScanOptions.SCALEDATA)
ao_range = self.ao_props.available_ranges[0]
self.memhandle = ul.scaled_win_buf_alloc(num_points)
# Check if the buffer was successfully allocated
if not self.memhandle:
messagebox.showerror("Error", "Failed to allocate memory")
self.start_button["state"] = tk.NORMAL
return
try:
data_array = self.memhandle_as_ctypes_array_scaled(
self.memhandle)
frequencies = self.add_example_data(
data_array, ao_range, self.num_chans, rate,
points_per_channel)
self.recreate_freq_frame()
self.display_signal_info(frequencies)
ul.a_out_scan(
self.board_num, self.low_chan, self.high_chan, num_points,
rate, ao_range, self.memhandle, scan_options)
# Start updating the displayed values
self.update_displayed_values()
except ULError as e:
self.show_ul_error(e)
self.set_ui_idle_state()
return
def display_signal_info(self, frequencies):
for channel_num in range(self.low_chan, self.high_chan + 1):
curr_row = channel_num - self.low_chan
self.freq_labels[curr_row]["text"] = str(
frequencies[curr_row]) + " Hz"
def add_example_data(self, data_array, ao_range, num_chans,
rate, points_per_channel):
# Calculate frequencies that will work well with the size of the array
frequencies = []
for channel_num in range(0, num_chans):
frequencies.append(
(channel_num + 1) / (points_per_channel / rate))
# Calculate an amplitude and y-offset for the signal
# to fill the analog output range
amplitude = (ao_range.range_max - ao_range.range_min) / 2
y_offset = (amplitude + ao_range.range_min) / 2
# Fill the array with sine wave data at the calculated frequencies.
# Note that since we are using the SCALEDATA option, the values
# added to data_array are the actual voltage values that the device
# will output
data_index = 0
for point_num in range(0, points_per_channel):
for channel_num in range(0, num_chans):
freq = frequencies[channel_num]
value = amplitude * math.sin(
2 * math.pi * freq * point_num / rate) + y_offset
data_array[data_index] = value
data_index += 1
return frequencies
def update_displayed_values(self):
# Get the status from the device
status, curr_count, curr_index = ul.get_status(
self.board_num, FunctionType.AOFUNCTION)
# Display the status info
self.update_status_labels(status, curr_count, curr_index)
# Call this method again until the stop button is pressed
if status == Status.RUNNING:
self.after(100, self.update_displayed_values)
else:
# Free the allocated memory
ul.win_buf_free(self.memhandle)
self.set_ui_idle_state()
def update_status_labels(self, status, curr_count, curr_index):
if status == Status.IDLE:
self.status_label["text"] = "Idle"
else:
self.status_label["text"] = "Running"
self.index_label["text"] = str(curr_index)
self.count_label["text"] = str(curr_count)
def recreate_freq_frame(self):
low_chan = self.low_chan
high_chan = self.high_chan
new_freq_frame = tk.Frame(self.freq_inner_frame)
curr_row = 0
self.freq_labels = []
for chan_num in range(low_chan, high_chan + 1):
curr_row += 1
channel_label = tk.Label(new_freq_frame)
channel_label["text"] = (
"Channel " + str(chan_num) + " Frequency:")
channel_label.grid(row=curr_row, column=0, sticky=tk.W)
freq_label = tk.Label(new_freq_frame)
freq_label.grid(row=curr_row, column=1, sticky=tk.W)
self.freq_labels.append(freq_label)
self.freq_frame.destroy()
self.freq_frame = new_freq_frame
self.freq_frame.grid()
def stop(self):
ul.stop_background(self.board_num, FunctionType.AOFUNCTION)
def exit(self):
self.stop()
self.master.destroy()
def set_ui_idle_state(self):
self.high_channel_entry["state"] = tk.NORMAL
self.low_channel_entry["state"] = tk.NORMAL
self.start_button["command"] = self.start
self.start_button["text"] = "Start"
def start(self):
self.high_channel_entry["state"] = tk.DISABLED
self.low_channel_entry["state"] = tk.DISABLED
self.start_button["command"] = self.stop
self.start_button["text"] = "Stop"
self.start_scan()
def get_low_channel_num(self):
if self.ao_props.num_chans == 1:
return 0
try:
return int(self.low_channel_entry.get())
except ValueError:
return 0
def get_high_channel_num(self):
if self.ao_props.num_chans == 1:
return 0
try:
return int(self.high_channel_entry.get())
except ValueError:
return 0
def validate_channel_entry(self, p):
if p == '':
return True
try:
value = int(p)
if(value < 0 or value > self.ao_props.num_chans - 1):
return False
except ValueError:
return False
return True
def create_widgets(self):
'''Create the tkinter UI'''
example_supported = (
self.ao_props.num_chans > 0
and self.ao_props.supports_scan)
if example_supported:
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
if self.ao_props.num_chans > 1:
channel_vcmd = self.register(self.validate_channel_entry)
curr_row = 0
low_channel_entry_label = tk.Label(main_frame)
low_channel_entry_label["text"] = "Low Channel Number:"
low_channel_entry_label.grid(
row=curr_row, column=0, sticky=tk.W)
self.low_channel_entry = tk.Spinbox(
main_frame, from_=0,
to=max(self.ao_props.num_chans - 1, 0),
validate='key', validatecommand=(channel_vcmd, '%P'))
self.low_channel_entry.grid(
row=curr_row, column=1, sticky=tk.W)
curr_row += 1
high_channel_entry_label = tk.Label(main_frame)
high_channel_entry_label["text"] = "High Channel Number:"
high_channel_entry_label.grid(
row=curr_row, column=0, sticky=tk.W)
self.high_channel_entry = tk.Spinbox(
main_frame, from_=0,
to=max(self.ao_props.num_chans - 1, 0),
validate='key', validatecommand=(channel_vcmd, '%P'))
self.high_channel_entry.grid(
row=curr_row, column=1, sticky=tk.W)
initial_value = min(self.ao_props.num_chans - 1, 3)
self.high_channel_entry.delete(0, tk.END)
self.high_channel_entry.insert(0, str(initial_value))
scan_info_group = tk.LabelFrame(
self, text="Scan Information", padx=3, pady=3)
scan_info_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)
scan_info_group.grid_columnconfigure(1, weight=1)
curr_row += 1
status_left_label = tk.Label(scan_info_group)
status_left_label["text"] = "Status:"
status_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.status_label = tk.Label(scan_info_group)
self.status_label["text"] = "Idle"
self.status_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
index_left_label = tk.Label(scan_info_group)
index_left_label["text"] = "Index:"
index_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.index_label = tk.Label(scan_info_group)
self.index_label["text"] = "-1"
self.index_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
count_left_label = tk.Label(scan_info_group)
count_left_label["text"] = "Count:"
count_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.count_label = tk.Label(scan_info_group)
self.count_label["text"] = "0"
self.count_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
self.freq_inner_frame = tk.Frame(scan_info_group)
self.freq_inner_frame.grid(
row=curr_row, column=0, columnspan=2, sticky=tk.W)
self.freq_frame = tk.Frame(self.freq_inner_frame)
self.freq_frame.grid()
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
self.start_button = tk.Button(button_frame)
self.start_button["text"] = "Start"
self.start_button["command"] = self.start
self.start_button.grid(row=0, column=0, padx=3, pady=3)
quit_button = tk.Button(button_frame)
quit_button["text"] = "Quit"
quit_button["command"] = self.exit
quit_button.grid(row=0, column=1, padx=3, pady=3)
if __name__ == "__main__":
# Start the example
ULAO04(master=tk.Tk()).mainloop()
| 36.37377 | 78 | 0.59744 |
79444902828c472c8b639eaa69c2993a688e2b13 | 3,373 | py | Python | tests/test_parser.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | tests/test_parser.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | tests/test_parser.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | "See: https://mkaz.blog/code/python-argparse-cookbook/"
import argparse
from reprlearn.models.plmodules.conv_fc_gan import ConvGenerator
from reprlearn.models.plmodules.conv_fc_gan import ConvFCGAN
def test_parser_basic():
my_parser = argparse.ArgumentParser()
my_parser.add_argument('--gpu_ids', action='store', type=str, nargs='*')
my_parser.add_argument('--list_gpu_id', action='store', type=str, nargs=1)
my_parser.add_argument('--int_gpu_id', action='store', type=str)
args = my_parser.parse_args()
print("---args---")
print(args)
print("gpu_ids: ", args.gpu_ids)
print(','.join(args.gpu_ids))
print("list_gpu_id: ", args.list_gpu_id)
print(','.join(args.list_gpu_id))
print("args.int_gpu_id: ", args.int_gpu_id)
def test_parser_boolean():
# See: stackoverflow.com/a/31347222
parser = argparse.ArgumentParser()
parser.add_argument('--early_stop', dest='early_stop', action='store_true')
parser.add_argument('--no_early_stop', dest='early_stop', action='store_false')
parser.set_defaults(early_stop=True)
for cmd in ['--early_stop', '--no_early_stop']:
args = parser.parse_args(cmd.split())
print('cli: ', cmd)
print(args)
def test_parser_conflict_handler_1():
parent_p = argparse.ArgumentParser()
parent_p.add_argument('--dimz', type=int, default=10)
parser = argparse.ArgumentParser(parents=[parent_p],
conflict_handler='resolve')
parser.add_argument('--dimz', type=int, default=20)
args = parser.parse_args()
print(args) # Namespace(dimz=20)
def test_parser_conflict_handler_2():
parent_p = argparse.ArgumentParser()
parent_p.add_argument('--dimz', type=int, default=10)
parser = argparse.ArgumentParser(parents=[parent_p],
conflict_handler='resolve')
parser.add_argument('--dimz', type=int, default=20)
args = parser.parse_args(['--dimz', '30'])
print('type: ', type(parser))
print(args) # Namespace(dimz=30)
def test_conv_gen_add_model_specific_args():
parser = ConvGenerator.add_model_specific_args()
args = parser.parse_args(['--latent_dim', '100'])
print(args)
def test_conv_fc_gan_add_model_specific_args():
parser = ConvFCGAN.add_model_specific_args()
arg_str = '--latent_dim 100 --latent_emb_dim 32 --lr_g 1e-2 --lr_d 1e-1 -k 5'
print('arg_str')
args = parser.parse_args(arg_str.split())
# or
# args, _ = parser.parse_known_args(arg_str.split())
print(args)
def test_conv_fc_gan_add_model_specific_args_with_parent():
parent = argparse.ArgumentParser(add_help=False) # add_help=False is important!
parent.add_argument('--latent_dim', type=int, default=1)
parser = ConvFCGAN.add_model_specific_args(parent)
arg_str = '--latent_dim 100 --latent_emb_dim 32 --lr_g 1e-2 --lr_d 1e-1 -k 5'
print('arg_str')
args = parser.parse_args(arg_str.split())
# or
# args, _ = parser.parse_known_args(arg_str.split())
print(args)
if __name__ == '__main__':
# test_parser_basic()
test_parser_boolean()
# test_parser_conflict_handler_1()
# test_parser_conflict_handler_2()
# test_conv_gen_add_model_specific_args()
# test_conv_fc_gan_add_model_specific_args()
# test_conv_fc_gan_add_model_specific_args_with_parent() | 31.820755 | 84 | 0.69078 |
7944497bc29382263c0a5459b7f81f8c2e732d9a | 12,156 | py | Python | pollination_sdk/models/step_array_input.py | pollination/python-sdk | 599e8dbfc6e547c5e18aa903b27c70d7ffef84e5 | [
"RSA-MD"
] | 2 | 2020-01-30T23:28:59.000Z | 2020-05-06T16:43:47.000Z | pollination_sdk/models/step_array_input.py | pollination/python-sdk | 599e8dbfc6e547c5e18aa903b27c70d7ffef84e5 | [
"RSA-MD"
] | 1 | 2020-10-02T18:00:25.000Z | 2020-10-02T18:00:25.000Z | pollination_sdk/models/step_array_input.py | pollination/python-sdk | 599e8dbfc6e547c5e18aa903b27c70d7ffef84e5 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.16.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pollination_sdk.configuration import Configuration
class StepArrayInput(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'alias': 'list[AnyOfDAGGenericInputAliasDAGStringInputAliasDAGIntegerInputAliasDAGNumberInputAliasDAGBooleanInputAliasDAGFolderInputAliasDAGFileInputAliasDAGPathInputAliasDAGArrayInputAliasDAGJSONObjectInputAliasDAGLinkedInputAlias]',
'annotations': 'dict(str, str)',
'default': 'list[object]',
'description': 'str',
'items_type': 'ItemType',
'name': 'str',
'required': 'bool',
'spec': 'object',
'type': 'str',
'value': 'list[object]'
}
attribute_map = {
'alias': 'alias',
'annotations': 'annotations',
'default': 'default',
'description': 'description',
'items_type': 'items_type',
'name': 'name',
'required': 'required',
'spec': 'spec',
'type': 'type',
'value': 'value'
}
def __init__(self, alias=None, annotations=None, default=None, description=None, items_type=None, name=None, required=False, spec=None, type='StepArrayInput', value=None, local_vars_configuration=None): # noqa: E501
"""StepArrayInput - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._alias = None
self._annotations = None
self._default = None
self._description = None
self._items_type = None
self._name = None
self._required = None
self._spec = None
self._type = None
self._value = None
self.discriminator = None
if alias is not None:
self.alias = alias
if annotations is not None:
self.annotations = annotations
if default is not None:
self.default = default
if description is not None:
self.description = description
if items_type is not None:
self.items_type = items_type
self.name = name
if required is not None:
self.required = required
if spec is not None:
self.spec = spec
if type is not None:
self.type = type
self.value = value
@property
def alias(self):
"""Gets the alias of this StepArrayInput. # noqa: E501
A list of aliases for this input in different platforms. # noqa: E501
:return: The alias of this StepArrayInput. # noqa: E501
:rtype: list[AnyOfDAGGenericInputAliasDAGStringInputAliasDAGIntegerInputAliasDAGNumberInputAliasDAGBooleanInputAliasDAGFolderInputAliasDAGFileInputAliasDAGPathInputAliasDAGArrayInputAliasDAGJSONObjectInputAliasDAGLinkedInputAlias]
"""
return self._alias
@alias.setter
def alias(self, alias):
"""Sets the alias of this StepArrayInput.
A list of aliases for this input in different platforms. # noqa: E501
:param alias: The alias of this StepArrayInput. # noqa: E501
:type alias: list[AnyOfDAGGenericInputAliasDAGStringInputAliasDAGIntegerInputAliasDAGNumberInputAliasDAGBooleanInputAliasDAGFolderInputAliasDAGFileInputAliasDAGPathInputAliasDAGArrayInputAliasDAGJSONObjectInputAliasDAGLinkedInputAlias]
"""
self._alias = alias
@property
def annotations(self):
"""Gets the annotations of this StepArrayInput. # noqa: E501
An optional dictionary to add annotations to inputs. These annotations will be used by the client side libraries. # noqa: E501
:return: The annotations of this StepArrayInput. # noqa: E501
:rtype: dict(str, str)
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""Sets the annotations of this StepArrayInput.
An optional dictionary to add annotations to inputs. These annotations will be used by the client side libraries. # noqa: E501
:param annotations: The annotations of this StepArrayInput. # noqa: E501
:type annotations: dict(str, str)
"""
self._annotations = annotations
@property
def default(self):
"""Gets the default of this StepArrayInput. # noqa: E501
Default value to use for an input if a value was not supplied. # noqa: E501
:return: The default of this StepArrayInput. # noqa: E501
:rtype: list[object]
"""
return self._default
@default.setter
def default(self, default):
"""Sets the default of this StepArrayInput.
Default value to use for an input if a value was not supplied. # noqa: E501
:param default: The default of this StepArrayInput. # noqa: E501
:type default: list[object]
"""
self._default = default
@property
def description(self):
"""Gets the description of this StepArrayInput. # noqa: E501
Optional description for input. # noqa: E501
:return: The description of this StepArrayInput. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this StepArrayInput.
Optional description for input. # noqa: E501
:param description: The description of this StepArrayInput. # noqa: E501
:type description: str
"""
self._description = description
@property
def items_type(self):
"""Gets the items_type of this StepArrayInput. # noqa: E501
Type of items in an array. All the items in an array must be from the same type. # noqa: E501
:return: The items_type of this StepArrayInput. # noqa: E501
:rtype: ItemType
"""
return self._items_type
@items_type.setter
def items_type(self, items_type):
"""Sets the items_type of this StepArrayInput.
Type of items in an array. All the items in an array must be from the same type. # noqa: E501
:param items_type: The items_type of this StepArrayInput. # noqa: E501
:type items_type: ItemType
"""
self._items_type = items_type
@property
def name(self):
"""Gets the name of this StepArrayInput. # noqa: E501
Input name. # noqa: E501
:return: The name of this StepArrayInput. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StepArrayInput.
Input name. # noqa: E501
:param name: The name of this StepArrayInput. # noqa: E501
:type name: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def required(self):
"""Gets the required of this StepArrayInput. # noqa: E501
A field to indicate if this input is required. This input needs to be set explicitly even when a default value is provided. # noqa: E501
:return: The required of this StepArrayInput. # noqa: E501
:rtype: bool
"""
return self._required
@required.setter
def required(self, required):
"""Sets the required of this StepArrayInput.
A field to indicate if this input is required. This input needs to be set explicitly even when a default value is provided. # noqa: E501
:param required: The required of this StepArrayInput. # noqa: E501
:type required: bool
"""
self._required = required
@property
def spec(self):
"""Gets the spec of this StepArrayInput. # noqa: E501
An optional JSON Schema specification to validate the input value. You can use validate_spec method to validate a value against the spec. # noqa: E501
:return: The spec of this StepArrayInput. # noqa: E501
:rtype: object
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this StepArrayInput.
An optional JSON Schema specification to validate the input value. You can use validate_spec method to validate a value against the spec. # noqa: E501
:param spec: The spec of this StepArrayInput. # noqa: E501
:type spec: object
"""
self._spec = spec
@property
def type(self):
"""Gets the type of this StepArrayInput. # noqa: E501
:return: The type of this StepArrayInput. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this StepArrayInput.
:param type: The type of this StepArrayInput. # noqa: E501
:type type: str
"""
if (self.local_vars_configuration.client_side_validation and
type is not None and not re.search(r'^StepArrayInput$', type)): # noqa: E501
raise ValueError(r"Invalid value for `type`, must be a follow pattern or equal to `/^StepArrayInput$/`") # noqa: E501
self._type = type
@property
def value(self):
"""Gets the value of this StepArrayInput. # noqa: E501
:return: The value of this StepArrayInput. # noqa: E501
:rtype: list[object]
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this StepArrayInput.
:param value: The value of this StepArrayInput. # noqa: E501
:type value: list[object]
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StepArrayInput):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StepArrayInput):
return True
return self.to_dict() != other.to_dict()
| 32.244032 | 243 | 0.620105 |
794449a5ea17a490aafdffa2c65b54736ac0be8d | 12,303 | py | Python | pyrr/geometric_tests.py | asnt/Pyrr | 4bc98bc1abc72f860f97563e2a4459813d0bfd88 | [
"Unlicense"
] | null | null | null | pyrr/geometric_tests.py | asnt/Pyrr | 4bc98bc1abc72f860f97563e2a4459813d0bfd88 | [
"Unlicense"
] | null | null | null | pyrr/geometric_tests.py | asnt/Pyrr | 4bc98bc1abc72f860f97563e2a4459813d0bfd88 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""Defines a number of functions to test interactions between
various forms data types.
"""
from __future__ import absolute_import, division, print_function
import math
import numpy as np
from . import rectangle, vector, vector3
from .utils import all_parameters_as_numpy_arrays, parameters_as_numpy_arrays
"""
TODO: line_intersect_plane
TODO: line_segment_intersect_plane
TODO: ray_intersect_ray
TODO: line_intersect_line
TODO: line_segment_intersect_line_segment
"""
@all_parameters_as_numpy_arrays
def point_intersect_line(point, line):
"""Calculates the intersection point of a point and aline.
Performed by checking if the cross-product
of the point relative to the line is
0.
"""
rl = line[1] - line[0]
rp = point - line[0]
cross = vector3.cross(rl, rp)
# check if the cross product is zero
if np.count_nonzero(cross) > 0:
return None
return point
@all_parameters_as_numpy_arrays
def point_intersect_line_segment(point, line):
"""Calculates the intersection point of a point and a line segment.
Performed by checking if the cross-product
of the point relative to the line is
0 and if the dot product of the point
relative to the line start AND the end
point relative to the line start is
less than the segment's squared length.
"""
rl = line[1] - line[0]
rp = point - line[0]
cross = vector3.cross(rl, rp)
dot = vector.dot(rp, rl)
squared_length = vector.squared_length(rl)
if np.count_nonzero(cross) > 0:
return None
if dot < 0.0 or dot > squared_length:
return None
return point
@all_parameters_as_numpy_arrays
def point_intersect_rectangle(point, rect):
"""Calculates the intersection point of a point and a 2D rectangle.
For 3D points, the Z axis will be ignored.
:return: Returns True if the point is touching
or within the rectangle.
"""
left, right, bottom, top = rectangle.bounds(rect)
if \
point[0] < left or \
point[0] > right or \
point[1] < bottom or \
point[1] > top:
return None
return point
@parameters_as_numpy_arrays('ray', 'plane')
def ray_intersect_plane(ray, plane, front_only=False):
"""Calculates the intersection point of a ray and a plane.
:param numpy.array ray: The ray to test for intersection.
:param numpy.array plane: The ray to test for intersection.
:param boolean front_only: Specifies if the ray should
only hit the front of the plane.
Collisions from the rear of the plane will be
ignored.
:return The intersection point, or None
if the ray is parallel to the plane.
Returns None if the ray intersects the back
of the plane and front_only is True.
"""
"""
Distance to plane is defined as
t = (pd - p0.n) / rd.n
where:
rd is the ray direction
pd is the point on plane . plane normal
p0 is the ray position
n is the plane normal
if rd.n == 0, the ray is parallel to the
plane.
"""
p = plane[:3] * plane[3]
n = plane[:3]
rd_n = vector.dot(ray[1], n)
if rd_n == 0.0:
return None
if front_only == True:
if rd_n >= 0.0:
return None
pd = vector.dot(p, n)
p0_n = vector.dot(ray[0], n)
t = (pd - p0_n) / rd_n
return ray[0] + (ray[1] * t)
@all_parameters_as_numpy_arrays
def point_closest_point_on_ray(point, ray):
"""Calculates the point on a ray that is closest to a point.
:param numpy.array point: The point to check with.
:param numpy.array ray: The ray to check against.
:rtype: numpy.array
:return: The closest point on the ray to the point.
"""
"""
t = (p - rp).n
cp = rp + (n * t)
where
p is the point
rp is the ray origin
n is the ray normal of unit length
t is the distance along the ray to the point
"""
normalized_n = vector.normalize(ray[1])
relative_point = (point - ray[0])
t = vector.dot(relative_point, normalized_n)
return ray[0] + (normalized_n * t)
@all_parameters_as_numpy_arrays
def point_closest_point_on_line(point, line):
"""Calculates the point on the line that is closest to
the specified point.
:param numpy.array point: The point to check with.
:param numpy.array line: The line to check against.
:rtype: numpy.array
:return: The closest point on the line to the point.
"""
"""
rl = va->b (relative line)
rp = va->p (relative point)
u' = u / |u| (normalize)
cp = a + (u' * (u'.v))
where:
a = line start
b = line end
p = point
cp = closest point
"""
rl = line[1] - line[0]
rp = point - line[0]
rl = vector.normalize(rl)
dot = vector.dot(rl, rp)
return line[0] + (rl * dot)
@all_parameters_as_numpy_arrays
def point_closest_point_on_line_segment(point, segment):
"""Calculates the point on the line segment that is closest
to the specified point.
This is similar to point_closest_point_on_line, except this
is against the line segment of finite length. Whereas point_closest_point_on_line
checks against a line of infinite length.
:param numpy.array point: The point to check with.
:param numpy.array line_segment: The finite line segment to check against.
:rtype: numpy.array
:return: The closest point on the line segment to the point.
"""
# check if the line has any length
rl = segment[1] - segment[0]
squared_length = vector.squared_length(rl)
if squared_length == 0.0:
return segment[0]
rp = point - segment[0]
# check that / squared_length is correct
dot = vector.dot(rp, rl) / squared_length;
if dot < 0.0:
return segment[0]
elif dot > 1.0:
return segment[1]
# within segment
# perform the same calculation as closest_point_on_line
return segment[0] + (rl * dot)
@all_parameters_as_numpy_arrays
def vector_parallel_vector(v1, v2):
"""Checks if two vectors are parallel.
:param numpy.array v1, v2: The vectors to check.
:rtype: boolean
:return: Returns True if the two vectors are parallel.
"""
# we cross product the 2 vectors
# if the result is 0, then they are parallel
cross = vector3.cross(v1, v2)
return 0 == np.count_nonzero(cross)
@all_parameters_as_numpy_arrays
def ray_parallel_ray(ray1, ray2):
"""Checks if two rays are parallel.
:param numpy.array ray1, ray2: The rays to check.
:rtype: boolean
:return: Returns True if the two rays are parallel.
"""
# we use a cross product in-case the ray direction
# isn't unit length
return vector_parallel_vector(ray1[ 1 ], ray2[ 1 ])
@all_parameters_as_numpy_arrays
def ray_coincident_ray(ray1, ray2):
"""Check if rays are coincident.
Rays must not only be parallel to each other, but reside
along the same vector.
:param numpy.array ray1, ray2: The rays to check.
:rtype: boolean
:return: Returns True if the two rays are co-incident.
"""
# ensure the ray's directions are the same
if ray_parallel_ray(ray1, ray2):
# get the delta between the two ray's start point
delta = ray2[0] - ray1[0]
# get the cross product of the ray delta and
# the direction of the rays
cross = vector3.cross(delta, ray2[1])
# if the cross product is zero, the start of the
# second ray is in line with the direction of the
# first ray
if np.count_nonzero(cross) > 0:
return False
return True
return False
@all_parameters_as_numpy_arrays
def ray_intersect_aabb(ray, aabb):
"""Calculates the intersection point of a ray and an AABB
:param numpy.array ray1: The ray to check.
:param numpy.array aabb: The Axis-Aligned Bounding Box to check against.
:rtype: numpy.array
:return: Returns a vector if an intersection occurs.
Returns None if no intersection occurs.
"""
"""
http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms
"""
# this is basically "numpy.divide( 1.0, ray[ 1 ] )"
# except we're trying to avoid a divide by zero warning
# so where the ray direction value is 0.0, just use infinity
# which is what we want anyway
direction = ray[1]
dir_fraction = np.empty(3, dtype = ray.dtype)
dir_fraction[direction == 0.0] = np.inf
dir_fraction[direction != 0.0] = np.divide(1.0, direction[direction != 0.0])
t1 = (aabb[0,0] - ray[0,0]) * dir_fraction[ 0 ]
t2 = (aabb[1,0] - ray[0,0]) * dir_fraction[ 0 ]
t3 = (aabb[0,1] - ray[0,1]) * dir_fraction[ 1 ]
t4 = (aabb[1,1] - ray[0,1]) * dir_fraction[ 1 ]
t5 = (aabb[0,2] - ray[0,2]) * dir_fraction[ 2 ]
t6 = (aabb[1,2] - ray[0,2]) * dir_fraction[ 2 ]
tmin = max(min(t1, t2), min(t3, t4), min(t5, t6))
tmax = min(max(t1, t2), max(t3, t4), max(t5, t6))
# if tmax < 0, ray (line) is intersecting AABB
# but the whole AABB is behind the ray start
if tmax < 0:
return None
# if tmin > tmax, ray doesn't intersect AABB
if tmin > tmax:
return None
# t is the distance from the ray point
# to intersection
t = min(x for x in [tmin, tmax] if x >= 0)
point = ray[0] + (ray[1] * t)
return point
@all_parameters_as_numpy_arrays
def point_height_above_plane(point, plane):
"""Calculates how high a point is above a plane.
:param numpy.array point: The point to check.
:param numpy.array plane: The plane to check.
:rtype: float
:return: The height above the plane as a float. The value will be
negative if the point is behind the plane.
"""
"""
http://www.vitutor.com/geometry/distance/point_plane.html
d(P) = (AX + BY + CZ + D) / sqrt(A^2 + B^2 + C^2)
Normal is unit length, so it's length is 1.0.
Therefore, we can ignore the division all together.
Just perform Pn . [XYZ1]
"""
return np.dot(plane, [point[0], point[1], point[2], 1.0])
@all_parameters_as_numpy_arrays
def point_closest_point_on_plane(point, plane):
"""Calculates the point on a plane that is closest to a point.
:param numpy.array point: The point to check with.
:param numpy.array plane: The infinite plane to check against.
:rtype: numpy.array
:return: The closest point on the plane to the point.
"""
"""
point on plane is defined as:
q' = q + (d - q.n)n
where:
q' is the point on the plane
q is the point we are checking
d is the value of normal dot position
n is the plane normal
"""
n = plane[:3]
p = n * plane[3]
d = np.dot(p, n)
qn = np.dot(point, n)
return point + (n * (d - qn))
@all_parameters_as_numpy_arrays
def sphere_does_intersect_sphere(s1, s2):
"""Checks if two spheres overlap.
Note: This will return True if the two spheres are
touching perfectly but sphere_penetration_sphere
will return 0.0 as the touch but don't penetrate.
This is faster than circle_penetrate_amount_circle
as it avoids a square root calculation.
:param numpy.array s1: The first circle.
:param numpy.array s2: The second circle.
:rtype: boolean
:return: Returns True if the circles overlap.
Otherwise, returns False.
"""
delta = s2[:3] - s1[:3]
distance_squared = vector.squared_length(delta)
radii_squared = math.pow(s1[3] + s2[3], 2.0)
if distance_squared > radii_squared:
return False
return True
@all_parameters_as_numpy_arrays
def sphere_penetration_sphere(s1, s2):
"""Calculates the distance two spheres have penetrated
into one another.
:param numpy.array s1: The first circle.
:param numpy.array s2: The second circle.
:rtype: float
:return: The total overlap of the two spheres.
This is essentially:
r1 + r2 - distance
Where r1 and r2 are the radii of circle 1 and 2
and distance is the length of the vector p2 - p1.
Will return 0.0 if the circles do not overlap.
"""
delta = s2[:3] - s1[:3]
distance = vector.length(delta)
combined_radii = s1[3] + s2[3]
penetration = combined_radii - distance
if penetration <= 0.0:
return 0.0
return penetration
| 30.604478 | 100 | 0.656181 |
79444a26f9bdb7949357d68acecf69d76291d727 | 50 | py | Python | comicolorization_sr/utility/__init__.py | DwangoMediaVillage/Comicolorization | 98f323e78baceae0b1086f01ac51b5e8a7515abb | [
"MIT"
] | 122 | 2017-08-21T10:01:07.000Z | 2022-03-21T13:52:19.000Z | comicolorization_sr/utility/__init__.py | DwangoMediaVillage/Comicolorization | 98f323e78baceae0b1086f01ac51b5e8a7515abb | [
"MIT"
] | 7 | 2017-10-20T15:12:13.000Z | 2022-01-30T23:04:37.000Z | comicolorization_sr/utility/__init__.py | DwangoMediaVillage/Comicolorization | 98f323e78baceae0b1086f01ac51b5e8a7515abb | [
"MIT"
] | 26 | 2017-08-22T08:11:20.000Z | 2022-03-09T14:59:18.000Z | from . import chainer_utility
from . import image
| 16.666667 | 29 | 0.8 |
79444a3621cf0bd4a7c2b6f8b9e3c8987946d001 | 960 | py | Python | More/E02_Flat/_RunAll.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 5 | 2020-06-20T22:01:23.000Z | 2021-08-06T04:39:50.000Z | More/E02_Flat/_RunAll.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 5 | 2020-05-17T09:32:27.000Z | 2021-03-15T19:45:52.000Z | More/E02_Flat/_RunAll.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 2 | 2021-02-25T19:07:45.000Z | 2022-01-09T21:14:06.000Z | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# Copyright (c) 2017 Thom Janssen <https://github.com/thomgb>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# Run all exmples in thes directory, creating the output in _export
# (Note that the content of the _export directory does not commit to Github)
#
import os
EXPORT_PATH = '_export'
if not os.path.exists(EXPORT_PATH):
os.mkdir(EXPORT_PATH)
import E00_HelloWorld
import E02_Strike
import E03_TextSizeSimple
import E11_TextBoxPosition
import E12_TextSize
import E13_TextBoxLines
import E14_TextBoxBaselines
import E15_MixedBaselines
import E16_ContextFunctions
#import E20_TextPositionByContext
| 26.666667 | 80 | 0.623958 |
79444a54e7f1a0ae1faf508d7f319d74214e5c43 | 680 | py | Python | packages/arb-compiler-evm/arbitrum/messagestack.py | JesseAbram/arbitrum | fc3b96bf165a201d930af65d6ad93777d897e951 | [
"Apache-2.0"
] | 1 | 2021-05-29T11:11:01.000Z | 2021-05-29T11:11:01.000Z | packages/arb-compiler-evm/arbitrum/messagestack.py | JesseAbram/arbitrum | fc3b96bf165a201d930af65d6ad93777d897e951 | [
"Apache-2.0"
] | 120 | 2021-02-18T07:19:59.000Z | 2022-03-30T12:08:45.000Z | packages/arb-compiler-evm/arbitrum/messagestack.py | JesseAbram/arbitrum | fc3b96bf165a201d930af65d6ad93777d897e951 | [
"Apache-2.0"
] | 1 | 2020-09-20T19:25:23.000Z | 2020-09-20T19:25:23.000Z | # Copyright 2020, Offchain Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import value
def addMessage(stack, message):
return value.Tuple([stack, message])
| 34 | 74 | 0.752941 |
79444aab197957c6343f2bb406d79112a6df472d | 9,612 | py | Python | uf2conv.py | jeremyherbert/musical-turtle-xiao | 7cbcf097c1be521ff970b2be4a780aec9de50f9e | [
"MIT"
] | 7 | 2020-04-24T09:06:17.000Z | 2021-11-12T05:08:14.000Z | uf2conv.py | jeremyherbert/musical-turtle-xiao | 7cbcf097c1be521ff970b2be4a780aec9de50f9e | [
"MIT"
] | 1 | 2020-08-04T21:47:14.000Z | 2020-08-09T13:05:28.000Z | uf2conv.py | jeremyherbert/musical-turtle-xiao | 7cbcf097c1be521ff970b2be4a780aec9de50f9e | [
"MIT"
] | 1 | 2020-09-02T09:47:03.000Z | 2020-09-02T09:47:03.000Z | #!/usr/bin/env python3
import sys
import struct
import subprocess
import re
import os
import os.path
import argparse
UF2_MAGIC_START0 = 0x0A324655 # "UF2\n"
UF2_MAGIC_START1 = 0x9E5D5157 # Randomly selected
UF2_MAGIC_END = 0x0AB16F30 # Ditto
families = {
'SAMD21': 0x68ed2b88,
'SAML21': 0x1851780a,
'SAMD51': 0x55114460,
'NRF52': 0x1b57745f,
'STM32F1': 0x5ee21072,
'STM32F4': 0x57755a57,
'ATMEGA32': 0x16573617,
'MIMXRT10XX': 0x4FB2D5BD
}
INFO_FILE = "/INFO_UF2.TXT"
appstartaddr = 0x2000
familyid = 0x0
def is_uf2(buf):
w = struct.unpack("<II", buf[0:8])
return w[0] == UF2_MAGIC_START0 and w[1] == UF2_MAGIC_START1
def is_hex(buf):
try:
w = buf[0:30].decode("utf-8")
except UnicodeDecodeError:
return False
if w[0] == ':' and re.match(b"^[:0-9a-fA-F\r\n]+$", buf):
return True
return False
def convert_from_uf2(buf):
global appstartaddr
numblocks = len(buf) // 512
curraddr = None
outp = b""
for blockno in range(numblocks):
ptr = blockno * 512
block = buf[ptr:ptr + 512]
hd = struct.unpack(b"<IIIIIIII", block[0:32])
if hd[0] != UF2_MAGIC_START0 or hd[1] != UF2_MAGIC_START1:
print("Skipping block at " + ptr + "; bad magic")
continue
if hd[2] & 1:
# NO-flash flag set; skip block
continue
datalen = hd[4]
if datalen > 476:
assert False, "Invalid UF2 data size at " + ptr
newaddr = hd[3]
if curraddr == None:
appstartaddr = newaddr
curraddr = newaddr
padding = newaddr - curraddr
if padding < 0:
assert False, "Block out of order at " + ptr
if padding > 10*1024*1024:
assert False, "More than 10M of padding needed at " + ptr
if padding % 4 != 0:
assert False, "Non-word padding size at " + ptr
while padding > 0:
padding -= 4
outp += b"\x00\x00\x00\x00"
outp += block[32 : 32 + datalen]
curraddr = newaddr + datalen
return outp
def convert_to_carray(file_content):
outp = "const unsigned char bindata[] __attribute__((aligned(16))) = {"
for i in range(len(file_content)):
if i % 16 == 0:
outp += "\n"
outp += "0x%02x, " % ord(file_content[i])
outp += "\n};\n"
return outp
def convert_to_uf2(file_content):
global familyid
datapadding = b""
while len(datapadding) < 512 - 256 - 32 - 4:
datapadding += b"\x00\x00\x00\x00"
numblocks = (len(file_content) + 255) // 256
outp = b""
for blockno in range(numblocks):
ptr = 256 * blockno
chunk = file_content[ptr:ptr + 256]
flags = 0x0
if familyid:
flags |= 0x2000
hd = struct.pack(b"<IIIIIIII",
UF2_MAGIC_START0, UF2_MAGIC_START1,
flags, ptr + appstartaddr, 256, blockno, numblocks, familyid)
while len(chunk) < 256:
chunk += b"\x00"
block = hd + chunk + datapadding + struct.pack(b"<I", UF2_MAGIC_END)
assert len(block) == 512
outp += block
return outp
class Block:
def __init__(self, addr):
self.addr = addr
self.bytes = bytearray(256)
def encode(self, blockno, numblocks):
global familyid
flags = 0x0
if familyid:
flags |= 0x2000
hd = struct.pack("<IIIIIIII",
UF2_MAGIC_START0, UF2_MAGIC_START1,
flags, self.addr, 256, blockno, numblocks, familyid)
hd += self.bytes[0:256]
while len(hd) < 512 - 4:
hd += b"\x00"
hd += struct.pack("<I", UF2_MAGIC_END)
return hd
def convert_from_hex_to_uf2(buf):
global appstartaddr
appstartaddr = None
upper = 0
currblock = None
blocks = []
for line in buf.split('\n'):
if line[0] != ":":
continue
i = 1
rec = []
while i < len(line) - 1:
rec.append(int(line[i:i+2], 16))
i += 2
tp = rec[3]
if tp == 4:
upper = ((rec[4] << 8) | rec[5]) << 16
elif tp == 2:
upper = ((rec[4] << 8) | rec[5]) << 4
assert (upper & 0xffff) == 0
elif tp == 1:
break
elif tp == 0:
addr = upper | (rec[1] << 8) | rec[2]
if appstartaddr == None:
appstartaddr = addr
i = 4
while i < len(rec) - 1:
if not currblock or currblock.addr & ~0xff != addr & ~0xff:
currblock = Block(addr & ~0xff)
blocks.append(currblock)
currblock.bytes[addr & 0xff] = rec[i]
addr += 1
i += 1
numblocks = len(blocks)
resfile = b""
for i in range(0, numblocks):
resfile += blocks[i].encode(i, numblocks)
return resfile
def to_str(b):
return b.decode("utf-8")
def get_drives():
drives = []
if sys.platform == "win32":
r = subprocess.check_output(["wmic", "PATH", "Win32_LogicalDisk",
"get", "DeviceID,", "VolumeName,",
"FileSystem,", "DriveType"])
for line in to_str(r).split('\n'):
words = re.split('\s+', line)
if len(words) >= 3 and words[1] == "2" and words[2] == "FAT":
drives.append(words[0])
else:
rootpath = "/media"
if sys.platform == "darwin":
rootpath = "/Volumes"
elif sys.platform == "linux":
tmp = rootpath + "/" + os.environ["USER"]
if os.path.isdir(tmp):
rootpath = tmp
for d in os.listdir(rootpath):
drives.append(os.path.join(rootpath, d))
def has_info(d):
try:
return os.path.isfile(d + INFO_FILE)
except:
return False
return list(filter(has_info, drives))
def board_id(path):
with open(path + INFO_FILE, mode='r') as file:
file_content = file.read()
return re.search("Board-ID: ([^\r\n]*)", file_content).group(1)
def list_drives():
for d in get_drives():
print(d, board_id(d))
def write_file(name, buf):
with open(name, "wb") as f:
f.write(buf)
print("Wrote %d bytes to %s" % (len(buf), name))
def main():
global appstartaddr, familyid
def error(msg):
print(msg)
sys.exit(1)
parser = argparse.ArgumentParser(description='Convert to UF2 or flash directly.')
parser.add_argument('input', metavar='INPUT', type=str, nargs='?',
help='input file (HEX, BIN or UF2)')
parser.add_argument('-b' , '--base', dest='base', type=str,
default="0x2000",
help='set base address of application for BIN format (default: 0x2000)')
parser.add_argument('-o' , '--output', metavar="FILE", dest='output', type=str,
help='write output to named file; defaults to "flash.uf2" or "flash.bin" where sensible')
parser.add_argument('-d' , '--device', dest="device_path",
help='select a device path to flash')
parser.add_argument('-l' , '--list', action='store_true',
help='list connected devices')
parser.add_argument('-c' , '--convert', action='store_true',
help='do not flash, just convert')
parser.add_argument('-D' , '--deploy', action='store_true',
help='just flash, do not convert')
parser.add_argument('-f' , '--family', dest='family', type=str,
default="0x0",
help='specify familyID - number or name (default: 0x0)')
parser.add_argument('-C' , '--carray', action='store_true',
help='convert binary file to a C array, not UF2')
args = parser.parse_args()
appstartaddr = int(args.base, 0)
if args.family.upper() in families:
familyid = families[args.family.upper()]
else:
try:
familyid = int(args.family, 0)
except ValueError:
error("Family ID needs to be a number or one of: " + ", ".join(families.keys()))
if args.list:
list_drives()
else:
if not args.input:
error("Need input file")
with open(args.input, mode='rb') as f:
inpbuf = f.read()
from_uf2 = is_uf2(inpbuf)
ext = "uf2"
if args.deploy:
outbuf = inpbuf
elif from_uf2:
outbuf = convert_from_uf2(inpbuf)
ext = "bin"
elif is_hex(inpbuf):
outbuf = convert_from_hex_to_uf2(inpbuf.decode("utf-8"))
elif args.carray:
outbuf = convert_to_carray(inpbuf)
ext = "h"
else:
outbuf = convert_to_uf2(inpbuf)
print("Converting to %s, output size: %d, start address: 0x%x" %
(ext, len(outbuf), appstartaddr))
if args.convert or ext != "uf2":
drives = []
if args.output == None:
args.output = "flash." + ext
else:
drives = get_drives()
if args.output:
write_file(args.output, outbuf)
else:
if len(drives) == 0:
error("No drive to deploy.")
for d in drives:
print("Flashing %s (%s)" % (d, board_id(d)))
write_file(d + "/NEW.UF2", outbuf)
if __name__ == "__main__":
main() | 31.933555 | 113 | 0.528506 |
79444b17605895eb6105a126a5e3470b566a33cf | 1,025 | py | Python | src/zojax/principal/ban/interfaces.py | Zojax/zojax.principal.ban | fe5ada1b253152469765c34a0fefbd7276b1aae8 | [
"ZPL-2.1"
] | null | null | null | src/zojax/principal/ban/interfaces.py | Zojax/zojax.principal.ban | fe5ada1b253152469765c34a0fefbd7276b1aae8 | [
"ZPL-2.1"
] | null | null | null | src/zojax/principal/ban/interfaces.py | Zojax/zojax.principal.ban | fe5ada1b253152469765c34a0fefbd7276b1aae8 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""$Id$
"""
from zope import schema, interface
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zojax.principal.ban')
class IBanPrincipalConfiglet(interface.Interface):
""" ban principal configlet """
banned = interface.Attribute('Banned principal ids')
def ban(uids):
""" ban principals """
def unban(uids):
""" unban principals """
| 32.03125 | 78 | 0.613659 |
79444b2c27c2400f22447ca29fea8ffb3aaf7886 | 2,012 | py | Python | data.py | bilhox/stardust | a808194fdf2b8b88e7ef9ce52c4d05d919609665 | [
"0BSD"
] | null | null | null | data.py | bilhox/stardust | a808194fdf2b8b88e7ef9ce52c4d05d919609665 | [
"0BSD"
] | null | null | null | data.py | bilhox/stardust | a808194fdf2b8b88e7ef9ce52c4d05d919609665 | [
"0BSD"
] | null | null | null |
imgs = {
"button":{
"default":{
"texture":"",
"logo":""
},
"saturation":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo saturation.jpg"
},
"brightness":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo luminosité.jpg"
},
"rot90":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo rotation 90°.jpg"
},
"rot180":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo rotation 180°.jpg"
},
"SymVert":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo symétrie verticale.jpg"
},
"SymHori":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo symétrie horizontal.jpg"
},
"convPBM":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo pbm.jpg"
},
"convPGM":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo pgm.jpg"
},
"resize":{
"texture":"./imgs/source/buttons/button design.png",
"logo":"./imgs/source/logos/logo redimension.jpg"
},
"save":{
"texture":"./imgs/source/logos/logo boutton save.png",
"logo":""
},
"delete":{
"texture":"./imgs/source/logos/logo delete.png",
"logo":""
},
"undo":{
"texture":"./imgs/source/logos/logo undo button.png",
"logo":""
}
}
} | 35.298246 | 72 | 0.452783 |
79444bb3eeba48e8cab4df37fc01e3c91312736b | 25 | py | Python | test.py | leemour/flask_extended | e4c99ddc281a092a965b00fd9fb5eae9e9d08e59 | [
"MIT"
] | null | null | null | test.py | leemour/flask_extended | e4c99ddc281a092a965b00fd9fb5eae9e9d08e59 | [
"MIT"
] | null | null | null | test.py | leemour/flask_extended | e4c99ddc281a092a965b00fd9fb5eae9e9d08e59 | [
"MIT"
] | null | null | null | msg = "Hello"
print(msg)
| 8.333333 | 13 | 0.64 |
79444dc9facbd23bc3bdf176ffcc0fca9ebcb6c1 | 22,998 | py | Python | examples/tough_heat/tough.py | jonghyunharrylee/PCGA | ed1b46f33847eafde685c97ef28d473baf63cf2f | [
"BSD-3-Clause"
] | 16 | 2018-09-09T11:27:20.000Z | 2022-03-09T01:46:37.000Z | examples/tough_heat/tough.py | jonghyunharrylee/PCGA | ed1b46f33847eafde685c97ef28d473baf63cf2f | [
"BSD-3-Clause"
] | 8 | 2018-03-19T19:36:38.000Z | 2022-01-26T20:09:27.000Z | examples/tough_heat/tough.py | jonghyunharrylee/PCGA | ed1b46f33847eafde685c97ef28d473baf63cf2f | [
"BSD-3-Clause"
] | 12 | 2018-06-29T13:16:40.000Z | 2022-03-09T01:46:25.000Z | import matplotlib
#matplotlib.use('Agg')
import math
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
import pandas as pd
# PyTOUGH specific modules
from mulgrids import *
from t2thermo import *
from t2data import *
from t2incons import *
from os import system
import scipy.io as sio
import datetime as dt
import os
import sys
from multiprocessing import Pool
import numpy as np
from shutil import copy2, rmtree
import subprocess
#from subprocess import call
from time import time
from scipy.interpolate import interp1d
from IPython.core.debugger import Tracer; debug_here = Tracer()
'''
three operations
1. write inputs
2. run simul
3. read input
'''
class Model:
def __init__(self,params = None):
self.idx = 0
self.homedir = os.path.abspath('./')
self.inputdir = os.path.abspath(os.path.join(self.homedir,"./input_files"))
self.deletedir = True
self.outputdir = None
self.parallel = False
self.record_cobs = False
from psutil import cpu_count # physcial cpu counts
self.ncores = cpu_count(logical=False)
if params is not None:
if 'deletedir' in params:
self.deletedir = params['deletedir']
if 'homedir' in params:
self.homedir = params['homedir']
self.inputdir = os.path.abspath(os.path.join(self.homedir,"./input_files"))
if 'inputdir' in params:
self.inputdir = params['inputdir']
if 'ncores' in params:
self.ncores = params['ncores']
if 'outputdir' in params:
# note that outputdir is not used for now; pyPCGA forces outputdir in ./simul/simul0000
self.outputdir = params['outputdir']
if 'parallel' in params:
self.parallel = params['parallel']
required_params = ("nx","dx","xlocs","ylocs","zlocs","obs_type","t_obs_interval", \
"max_timesteps","tstop","const_timestep","max_timestep","absolute_error","relative_error","print_interval", \
"relative_error","print_interval","timestep_reduction","gravity",'default_incons', \
"multi_params","solver_params","output_times_params")
if not params.keys() != required_params:
raise ValueError("You need to provide all the required tough parameters")
self.__dict__.update((k, v) for k, v in params.items() if k in required_params)
def create_dir(self,idx=None):
mydirbase = "./simul/simul"
if idx is None:
idx = self.idx
mydir = mydirbase + "{0:04d}".format(idx)
mydir = os.path.abspath(os.path.join(self.homedir, mydir))
if not os.path.exists(mydir):
os.makedirs(mydir)
for filename in os.listdir(self.inputdir):
copy2(os.path.join(self.inputdir,filename),mydir)
return mydir
def cleanup(self,outputdir=None):
"""
Removes outputdir if specified. Otherwise removes all output files
in the current working directory.
"""
import shutil
import glob
log = "dummy.log"
if os.path.exists(log):
os.remove(log)
if outputdir is not None and outputdir != os.getcwd():
if os.path.exists(outputdir):
shutil.rmtree(outputdir)
else:
filelist = glob.glob("*.out")
filelist += glob.glob("*.sim")
for file in filelist:
os.remove(file)
def run_model(self,s,idx=0):
sim_dir = self.create_dir(idx)
os.chdir(sim_dir)
# Create TOUGH2 input data file:
dat = t2data()
dat.title = '3D synthetic Ex1'
dx = self.dx # grid size
nx = self.nx # number of grids in each dimension. Don't use large grid because of pytough naming convention
geo = self.construct_grid(dx, nx)
dat.grid = t2grid().fromgeo(geo)
# simulation parameters:
# Table 4.9 page 78 pytough tutorial and Appendix E of TOUGH2 tutorial
# data.parameter is a dictionary
# each parameter can be called as dat.parameter['parameter name']
dat.parameter.update(
{'max_timesteps': self.max_timesteps, # maximum number of time steps
'tstop': self.tstop, # stop time
'const_timestep': self.const_timestep, # time step length
'max_timestep': self.max_timestep, # maximum time step size
'absolute_error': self.absolute_error, # absolute conexgence tolerance
'relative_error': self.relative_error, # relative convergence tolerance
'print_interval': self.print_interval, # time step interval for printing
'timestep_reduction': self.timestep_reduction, # time step reduction factor
'gravity': self.gravity, # gravitational acceleration
'default_incons': self.default_incons}) # default initial conditions
# Pressure in Pa, 100 m water = 10.e5 Pa water, 10 is the temperature in Celcius
dat.start = True
# Table 4.9 page 78 pytough tutorial and Appendix E of TOUGH2 tutorial
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.396.8810&rep=rep1&type=pdf
# Set MOPs: PARAM option in INFILE
dat.parameter['option'][1] = 1
dat.parameter['option'][5] = 3
dat.parameter['option'][7] = 1
dat.parameter['option'][11] = 2
dat.parameter['option'][12] = 2
dat.parameter['option'][15] = 1
dat.parameter['option'][16] = 4
dat.parameter['option'][20] = 0
dat.parameter['option'][21] = 1
# Set relative permeability (Corey) and capillarity functions:
# Table 4.10, page 79 PYTOUGH
dat.relative_permeability = {'type': 7, 'parameters': [0.95, 0.5, 1., 0.25, 0.]}
dat.capillarity = {'type': 7, 'parameters': [0.8, 0.32, 1.e-4, 1.e7, 0.999]}
# rocktype object, Table 3.3, page 67 PyTOUGH
r1 = rocktype('dfalt', permeability = [0.e-13]*3,density = 2600, porosity = 0.25,conductivity =2.51 , specific_heat = 920,)
r2 = rocktype('HOMO1', permeability = [9.e-13, 3.e-14, 3.e-14],density = 2600, porosity = 0.25,conductivity =2.51 , specific_heat = 920,)
r3 = rocktype('OUTB1', permeability = [1.e-13]*3,density = 2600, porosity = 0.25,conductivity =2.51 , specific_heat = 20000,)
dat.grid.add_rocktype(r2)
dat.grid.add_rocktype(r3)
dat.multi.update(self.multi_params)
# SOLVR Table 4.11 page 79 PyTough
dat.solver.update(self.solver_params)
# TIMES table 4.8
dat.output_times.update(self.output_times_params)
# rocktypes:
# Setting rocktype based on the block's 'z' coordinate
z_bottom = -17.
z_top = -2.
# assign all non boundary elements to r2
for blk in dat.grid.blocklist[1:]:
if z_bottom < blk.centre[2] < z_top:
blk.rocktype=r2
else: blk.rocktype=r3
for blk, pmx in zip(dat.grid.blocklist[1:], np.exp(s)):
blk.pmx = pmx
# setting the blocks for FOFT
# this section needs to be modified. FOFT blocks can be given by the coordinate of the block, or by the block name
# currently, it accepts the cell number in each direction
# for example the first cell here is located in grid (2,2) in 5th layer
xlocs, ylocs, zlocs = self.xlocs, self.ylocs, self.zlocs
nxlocs, nylocs, nzlocs = xlocs.shape[0],ylocs.shape[0],zlocs.shape[0]
#nobs = nxlocs*nylocs*nzlocs
x_obs = np.tile(xlocs,nylocs*nzlocs)
y_obs = np.tile(np.repeat(ylocs,nxlocs),nylocs)
z_obs = np.repeat(zlocs,nxlocs*nylocs)
# from function definition above
# set_measurement_blk(dat, x, y, z, nx):
self.set_measurement_blk(dat, x_obs, y_obs, z_obs, nx)
# adding boundary conditions
center = [150, 150] # [x,y] position of the center
L = 40 # length of one side of square
qmax = 15000
L_scale = 300
self.heat_generator(dat, geo, center, L, qmax, L_scale, method = 'Square')
# write data
dat.write('INFILE')
self.modify_infile()
self.clear_FOFT(sim_dir)
from time import time
stime = time()
# running the tough2 model
subprocess.call(["mpirun","-n","6","--bind-to","none","tough2-mp-eos1.debug"], stdout=subprocess.PIPE)
print('tough simulation run: %f sec' % (time() - stime))
# read simulation results with obs_type = 'Gas Pressure' and/or 'Temperature'
#from time import time
measurements = []
for str_obs_type in self.obs_type:
#stime = time()
measurements.extend(self.observation_model(sim_dir,str_obs_type))
#print('measurement interpolation: %f sec' % (time() - stime))
simul_obs = np.array(measurements).reshape(-1)
os.chdir(self.homedir)
if self.deletedir:
rmtree(sim_dir, ignore_errors=True)
# self.cleanup(sim_dir)
return simul_obs
def clear_FOFT(self,cur_dir):
"""To delete previous FOFT qnd MESH files in the directory.
You can delete SAVE, INCON, ... files the same way if you need to do it before running"""
for file in os.listdir(cur_dir):
if file.startswith("FOFT") or file.startswith("MES"):
try:
os.remove(file)
except Exception.e: # was a comma, changed to a period
print(e) # was print e, changed to print(e)
def read_FOFT(self,cur_dir):
""" Function to read all FOFT files in the directory.
The function returns a pandas dataframe containing measurements"""
FOFT_files = [filename for filename in os.listdir('.') if filename.startswith("FOFT")]
columns_name = ['Element','Time', 'Gas Pressure', 'Gas Saturation','Temperature']
frame = []
for filename in FOFT_files:
df = pd.read_fwf(filename,colspecs=[(12,16),(21,35),(37,52),(54,69),(72,86)],skiprows=[0,1],names=columns_name)
frame.append(df)
frame = pd.concat(frame)
# with open(filename, 'rb') as f_input:
# count =0
# for row in f_input:
# if count > 1:
# # follow the file format!!!
# rows.append([row[12:16],row[21:35],row[37:52],row[54:69],row[72:86]])
# count = count + 1
# frame = pd.DataFrame(rows, columns=columns_name)
frame[['Time','Gas Pressure','Gas Saturation','Temperature']] = frame[['Time','Gas Pressure','Gas Saturation','Temperature']].apply(pd.to_numeric)
return frame
def set_measurement_blk_list(self,dat, x_mid, dx, y_mid, dy, z_mid, dz):
''' set measurement blk list
'''
for blk in dat.grid.blocklist[1:]:
if z_mid-dz/2 < blk.centre[2] < z_mid+dz/2:
if x_mid-dx/2 < blk.centre[0] < x_mid+dx/2:
if y_mid-dy/2 < blk.centre[1] < y_mid+dy/2:
dat.history_block.append(blk.name)
return
def set_measurement_blk(self,dat, x, y, z, nx):
''' set measurement blk
'''
dat.history_block = []
x_obs = np.array(x)
y_obs = np.array(y)
z_obs = np.array(z)
block_list = nx[0]*nx[1]*(z_obs-1)+nx[0]*(y_obs-1)+x_obs
for blk_number in block_list:
dat.history_block.append(dat.grid.blocklist[blk_number].name)
return
def heat_generator(self,dat, geo, center, L, qmax, L_scale, method = 'Square'):
''' heat generator
'''
cols =[]
if method =='Square':
for col in geo.columnlist:
if center[0] - L/2 <= col.centre[0] <= center[0] + L/2:
if center[1] - L/2<= col.centre[1]<= center[1] + L/2:
cols.append(col)
dxs=np.array([col.centre[0]-center[0] for col in cols])/L_scale
dys=np.array([col.centre[1]-center[1] for col in cols])/L_scale
#print(dxs)
#print(dys)
corlength = 0.5
qcol=qmax*np.exp(-0.5*((dxs*dxs+dys*dys)/(corlength*corlength)))
#print(qcol)
#layer=geo.layerlist[-1] Changing to add the heat in a non-boundary layer
layer=geo.layerlist[-5]
# Page 87 from pyTOUGH #####################################
# adding the generator to the problem
dat.clear_generators()
dat.add_generator(t2generator(name=' in1', block=' d125', type='COM1', gx=.6, ex=3.E+5))
dat.add_generator(t2generator(name=' in2', block=' e125', type='COM1', gx=.6, ex=3.E+5))
dat.add_generator(t2generator(name=' in3', block=' f125', type='COM1', gx=.6, ex=3.E+5))
dat.add_generator(t2generator(name=' in4', block=' g125', type='COM1', gx=.6, ex=3.E+5))
dat.add_generator(t2generator(name=' ex1', block=' d145', type='COM1', gx=-.6))
dat.add_generator(t2generator(name=' ex2', block=' e145', type='COM1', gx=-.6))
dat.add_generator(t2generator(name=' ex3', block=' f145', type='COM1', gx=-.6))
dat.add_generator(t2generator(name=' ex4', block=' g145', type='COM1', gx=-.6))
#print('Generators added')
#for col,q in zip(cols,qcol):
# blkname=geo.block_name(layer.name,col.name)
# genname=' q'+col.name
# dat.add_generator(t2generator(name=genname, block=blkname, type='HEAT', gx=q*col.area))
dat.short_output={}
else:
raise NotImplementedError
return
def construct_grid(self,dx_grid, nx):
"""Constructing a TOUGH2 input geometry file for a 3D rectangular
model with the domain size of 300*100*20 m^3 and grid size 10*10*2 m^3."""
# Here we set up a rectangular grid. For other option, refer to pytough examples and tutorials
dx = [dx_grid[0]]*nx[0]
dy = [dx_grid[1]]*nx[1]
dz = [dx_grid[2]]*nx[2]
# use convention =2 for naming convention. Otherwise it will generate an error when you want to write INFILE
# for atmos_type, refer to page 124 pyTOUGH tutorial
geo = mulgrid().rectangular(dx, dy, dz, atmos_type = 0, convention=2)
geo.write('geom.dat') # write the geometry file
return geo
def read_SAVE(self):
''' read SAVE using pandas
'''
data_SAVE= pd.read_csv('SAVE',sep="\s+", header =None, skiprows=[0], error_bad_lines = False)
data_SAVE = data_SAVE.drop([0,1, len(data_SAVE)-1])
blk_1 = []
blk_2 = []
blk_3 = []
for i in range(2,len(data_SAVE)+2):
if i%2 ==0:
if math.isnan(data_SAVE[2][i]):
blk_1.append(data_SAVE[1][i])
else:
blk_1.append(data_SAVE[2][i])
else:
blk_2.append(float(data_SAVE[0][i]))
blk_3.append(float(data_SAVE[1][i]))
data_save = {'Saturation': blk_1,
'Pressure': blk_2,
'Temperature': blk_3}
df = pd.DataFrame.from_dict(data_save)
return df
def plot_FoFT(self,data_FOFT, Elem, variable = 'Temperature', ylim = [0, 100], xlim = [0, 86400], figname=None):
''' plot FoFT
NOTE THAT PLOTFOFT WILL NOT PLOT ANY OF THE LOCATIONS THAT HAVE A BLANK IN THEIR NAME
'''
x = np.array(data_FOFT[data_FOFT['Element']==Elem]['Time'])
plt.plot(x,data_FOFT[data_FOFT['Element']==Elem][variable])
#plt.axis([0, x[-1], ylim[0], ylim[1]])
plt.axis([xlim[0], xlim[1], ylim[0], ylim[1]])
#plt.yticks([])
if figname is not None:
plt.savefig(figname+'.png')
plt.show()
plt.close()
return
def plot_SAVE(self, dat, df, nx, y=0, col = 'Temperature', clim = [0, 100], figname=None):
''' plot_SAVE
'''
elevation_save = np.zeros((3000,1))
for idblock in range(1,3001):
elevation_save[idblock-1] = dat.grid.blocklist[idblock].centre[2]
z = np.reshape(elevation_save,(nx[2],nx[1],nx[0]))
data_save=np.array(df[col])
if col == 'Pressure':
# Gas pressure converted to m H2O + elevation = hydraulic head in water
a = np.reshape(data_save*0.00010197,(nx[2],nx[1],nx[0])).T + z.T
#a = np.reshape(data_save,(nx[2],nx[1],nx[0])).T
plt.imshow(a[:,y,:].T)
#plt.colorbar()
plt.clim(clim[0],clim[1])
#plt.title('Hydraulic head (m)')
plt.title('Pressure in Pa')
plt.xlabel('x(m)')
plt.ylabel('z(m)')
plt.colorbar(orientation='horizontal')
elif col == 'Temperature':
a = np.reshape(data_save,(nx[2],nx[1],nx[0])).T
plt.imshow(a[:,y,:].T)
#plt.colorbar()
plt.clim(clim[0],clim[1])
plt.title('Temperature in C')
plt.xlabel('x(m)')
plt.ylabel('z(m)')
plt.colorbar(orientation='horizontal')
elif col == 'Gas Pressure':
raise NotImplementedError
else:
raise ValueError('it support (Gas )Pressure or Temperature')
if figname is not None:
plt.savefig(figname+'.png')
plt.close()
return
def modify_infile(self):
''' modify_infile
'''
f = open("INFILE", "r")
contents = f.readlines()
f.close()
contents.insert(5, 'SEED\n')
f = open("INFILE", "w")
contents = "".join(contents)
f.write(contents)
f.close()
return
def observation_model(self,cur_dir,obs_type):
''' observation_model
'''
if obs_type != 'Temperature' and obs_type != 'Gas Pressure':
raise ValueError('obs_type should be either Temperature of Gas Pressure')
sim_results = self.read_FOFT(cur_dir)
# order by Element and Time for easy plotting
#sim_obs.sort_values(by=['Element','Time'])
#obs = sim_obs[obs_type]
elements = sim_results['Element'].unique()
elements.sort()
dt = self.t_obs_interval
obs = []
for elem in elements:
t = np.array(sim_results[sim_results['Element']==elem]['Time'])
strtidx= np.ceil(t.min()/dt).astype(np.int)
endidx = np.floor(t.max()/dt).astype(np.int)
t_obs = [dt*i for i in range(strtidx,endidx+1)]
obs_all = np.array(sim_results[sim_results['Element']==elem][obs_type])
f = interp1d(t, obs_all)
obs_at_t_obs = f(t_obs)
obs.extend(obs_at_t_obs)
return np.array(obs)
def run(self,s,par,ncores=None):
if ncores is None:
ncores = self.ncores
method_args = range(s.shape[1])
args_map = [(s[:, arg:arg + 1], arg) for arg in method_args]
if par:
pool = Pool(processes=ncores)
simul_obs = pool.map(self, args_map)
else:
simul_obs =[]
for item in args_map:
simul_obs.append(self(item))
return np.array(simul_obs).T
#pool.close()
#pool.join()
def __call__(self,args):
return self.run_model(args[0],args[1])
if __name__ == '__main__':
import tough
import numpy as np
from time import time
s = np.loadtxt("true_30_10_10_gau.txt")
s = s.reshape(-1, 1)
nx = [30, 10, 10]
dx = [10., 10., 2.]
# monitoring indices
xlocs = np.arange(5,21,2)
ylocs = np.array([1,3,5,7])
zlocs = np.array([3,5,7,9])
params = {'nx':nx,'dx':dx, 'deletedir':False, 'xlocs': xlocs, 'ylocs':ylocs, 'zlocs':zlocs, \
'obs_type':['Gas Pressure','Temperature'],'t_obs_interval':86400.*5.,\
'max_timesteps': 9000, 'tstop': 0.32342126E+08, 'const_timestep': 6, 'max_timestep':86400, \
'absolute_error': 1, 'relative_error': 5.e-6, 'print_interval': 9000, 'timestep_reduction': 3., \
'gravity': 9.81,'default_incons': [100.e4, 10], \
'multi_params':{'num_components': 1, 'num_equations':2, 'num_phases':2, 'num_secondary_parameters':6},
'solver_params':{'type': 5, 'z_precond':1,'o_precond': 0, 'relative_max_iterations':.8,'closure':1.e-7 },
'output_times_params': {'num_times_specified':2, 'time': [0.8640E+04, 0.32342126E+08]}}
par = False # parallelization false
mymodel = tough.Model(params)
print('(1) single run')
from time import time
stime = time()
simul_obs = mymodel.run(s,par)
print('simulation run: %f sec' % (time() - stime))
obs_true = np.loadtxt('obs_true.txt')
print(np.linalg.norm(obs_true.reshape(-1) - simul_obs.reshape(-1)))
import sys
sys.exit(0)
#obs = np.copy(simul_obs)
#nobs = obs.shape[0]
#obs[:nobs/2] = simul_obs[:nobs/2] + 10000.*np.random.randn(nobs/2,1)
#obs[nobs/2:] = simul_obs[nobs/2:] + 0.5*np.random.randn(nobs/2,1)
#np.savetxt('obs.txt',obs)
#np.savetxt('obs_pres.txt',obs[:nobs/2])
#np.savetxt('obs_temp.txt',obs[nobs/2:])
ncores = 6
nrelzs = 12
print('(2) parallel run with ncores = %d' % ncores)
par = True # parallelization false
srelz = np.zeros((np.size(s,0),nrelzs),'d')
for i in range(nrelzs):
srelz[:,i:i+1] = s + 0.1*np.random.randn(np.size(s,0),1)
stime = time()
simul_obs_all = mymodel.run(srelz,par,ncores = ncores)
print('simulation run: %f sec' % (time() - stime))
print(simul_obs_all)
| 39.447684 | 155 | 0.551744 |
79444dd90f91e18c47a6e28909c674c78388baea | 1,270 | py | Python | setup.py | bouttier/django-crispy-forms | 9a564f3aa1486dc1e7e0f5160c551d2072de5157 | [
"MIT"
] | null | null | null | setup.py | bouttier/django-crispy-forms | 9a564f3aa1486dc1e7e0f5160c551d2072de5157 | [
"MIT"
] | null | null | null | setup.py | bouttier/django-crispy-forms | 9a564f3aa1486dc1e7e0f5160c551d2072de5157 | [
"MIT"
] | null | null | null | import crispy_forms
from setuptools import setup, find_packages
tests_require = [
'Django>=1.3,<1.8',
]
setup(
name='django-crispy-forms',
version=crispy_forms.__version__,
description="Best way to have Django DRY forms",
long_description=open('README.rst').read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
extras_require={
'tests': tests_require,
},
keywords=['forms', 'django', 'crispy', 'DRY'],
author='Miguel Araujo',
author_email='[email protected]',
url='http://github.com/maraujop/django-crispy-forms',
license='MIT',
packages=find_packages(exclude=['docs']),
include_package_data=True,
zip_safe=False,
)
| 30.97561 | 71 | 0.623622 |
79444e25ce148c1642adf3000d8173970cdd0cb5 | 325 | py | Python | advent2020/d05_binary_boarding.py | LukasGelbmann/advent-of-code | 886747353dd00ed068fcedd45f0965f2cc433bcb | [
"MIT"
] | 6 | 2020-12-01T11:01:33.000Z | 2020-12-12T15:15:24.000Z | advent2020/d05_binary_boarding.py | LukasGelbmann/advent-of-code | 886747353dd00ed068fcedd45f0965f2cc433bcb | [
"MIT"
] | null | null | null | advent2020/d05_binary_boarding.py | LukasGelbmann/advent-of-code | 886747353dd00ed068fcedd45f0965f2cc433bcb | [
"MIT"
] | 1 | 2020-12-03T08:47:59.000Z | 2020-12-03T08:47:59.000Z | def solve(puzzle_input):
binary_input = puzzle_input.translate(str.maketrans('FBLR', '0101'))
seat_ids = {int(line, base=2) for line in binary_input.splitlines()}
print(max(seat_ids))
print(missing(seat_ids))
def missing(seat_ids):
i = min(seat_ids)
while i in seat_ids:
i += 1
return i
| 25 | 72 | 0.661538 |
79444e9dd44de5edfd61f80262b3d7b6978e1ede | 4,839 | py | Python | test/functional/interface_http.py | Chellit/Chellit | 7d804cfc64b4e91234b68f14b82f12c752eb6aae | [
"MIT"
] | 2 | 2021-02-01T08:29:18.000Z | 2021-06-28T23:45:28.000Z | test/functional/interface_http.py | Chellit/Chellit | 7d804cfc64b4e91234b68f14b82f12c752eb6aae | [
"MIT"
] | null | null | null | test/functional/interface_http.py | Chellit/Chellit | 7d804cfc64b4e91234b68f14b82f12c752eb6aae | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Chellit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import ChellitTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (ChellitTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because chellitd should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 43.990909 | 108 | 0.634842 |
79445028477c5d67a39ed23425f0739179ebcab2 | 1,193 | py | Python | paso4/edgesAuthors.py | staplezz/datathon2020 | 476bdc85c3152cf1ed2aa1769ed0e4624eb5ca44 | [
"MIT"
] | null | null | null | paso4/edgesAuthors.py | staplezz/datathon2020 | 476bdc85c3152cf1ed2aa1769ed0e4624eb5ca44 | [
"MIT"
] | null | null | null | paso4/edgesAuthors.py | staplezz/datathon2020 | 476bdc85c3152cf1ed2aa1769ed0e4624eb5ca44 | [
"MIT"
] | null | null | null | import pandas as pd
import itertools
'''
Parte 4: Construcción de las aristas usando el catálogo
de autores filtrados.
'''
#Importamos el catálogo de autores.
filtered_authorships = pd.read_csv('filtered_authorships.csv')
#Diccionario para ir guardando los autores de cada artículo.
article_dict = dict()
#Guardamos los datos del catálogo en el diccionario.
for index, row in filtered_authorships.iterrows():
article_dict.setdefault(row['id_article'], []).append(row['author'])
#Conjunto de aristas de los autores que han colaborado juntos.
aristas = set()
for key, item in article_dict.items():
#Combinaciones de autores que han trabajado juntos.
combinaciones = itertools.combinations(item, 2)
for tupla in combinaciones:
aristas.add(tupla)
#Quitamos los autores duplicados.
aristas = {frozenset(autor) for autor in aristas}
#Convertimos a lista de tuplas.
aristas = [tuple(i) for i in aristas]
#Filtramos tuplas con longitud menor a 2.
aristas = [i for i in aristas if len(i) == 2]
#Convertimos a dataframe
aristas = pd.DataFrame(aristas, columns=['source', 'target'])
#Creamos el csv de aristas.
aristas.to_csv("edges.csv", encoding='utf-8', index=False)
| 29.097561 | 72 | 0.749371 |
794452849ec65fc0697b4dd3023d879697129a4b | 1,217 | py | Python | src/logpipe/models.py | securingsam/django-logpipe | 8db0a5d7df80dbc33708e7ce2f1c939232d39028 | [
"ISC"
] | null | null | null | src/logpipe/models.py | securingsam/django-logpipe | 8db0a5d7df80dbc33708e7ce2f1c939232d39028 | [
"ISC"
] | null | null | null | src/logpipe/models.py | securingsam/django-logpipe | 8db0a5d7df80dbc33708e7ce2f1c939232d39028 | [
"ISC"
] | null | null | null | from django.db import models
class KafkaOffset(models.Model):
topic = models.CharField(max_length=200,
help_text='The Kafka topic name')
partition = models.PositiveIntegerField(
help_text='The Kafka partition identifier')
offset = models.PositiveIntegerField(default=0,
help_text='The current offset in the Kafka partition')
class Meta:
unique_together = ('topic', 'partition')
ordering = ('topic', 'partition', 'offset')
def __str__(self):
return 'topic="{}", partition="{}", offset="{}"'.format(self.topic, self.partition, self.offset)
class KinesisOffset(models.Model):
stream = models.CharField(max_length=200,
help_text='The Kinesis stream name')
shard = models.CharField(max_length=200,
help_text='The Kinesis shard ID')
sequence_number = models.CharField(max_length=200,
help_text='The current sequence number in the Kinesis shard')
class Meta:
unique_together = ('stream', 'shard')
ordering = ('stream', 'shard', 'sequence_number')
def __str__(self):
return 'stream="{}", shard="{}", sequence_number="{}"'.format(self.stream, self.shard, self.sequence_number)
| 31.205128 | 116 | 0.667214 |
794452b75b8e12d5d28fafadadb760d7e9eeac64 | 9,511 | py | Python | train_fns.py | dberga/MineGAN | 36b048c2fcaeb80b22f3c03288e33d862d7e3113 | [
"MIT"
] | null | null | null | train_fns.py | dberga/MineGAN | 36b048c2fcaeb80b22f3c03288e33d862d7e3113 | [
"MIT"
] | null | null | null | train_fns.py | dberga/MineGAN | 36b048c2fcaeb80b22f3c03288e33d862d7e3113 | [
"MIT"
] | null | null | null | ''' train_fns.py
Functions for the main loop of training different conditional image models
'''
import torch
import torch.nn as nn
import torchvision
import os
import utils
import losses
import pdb
# Dummy training function for debugging
def dummy_training_function():
def train(x, y):
return {}
return train
def GAN_training_function(G, D, M, GD, z_, y_, ema, state_dict, config):# yaxing
def train(x, y, stage):
G.optim.zero_grad()
D.optim.zero_grad()
M.optim.zero_grad()# yaxing # How many chunks to split x and y into?
x = torch.split(x, config['batch_size'])
y = torch.split(y, config['batch_size'])
counter = 0
# Optionally toggle D and G's "require_grad"
if config['toggle_grads']:# yaxing: hert it is True
utils.toggle_grad(D, True)
utils.toggle_grad(G, False)
utils.toggle_grad(M, False) # yaxing
for step_index in range(config['num_D_steps']):
# If accumulating gradients, loop multiple times before an optimizer step
D.optim.zero_grad()
for accumulation_index in range(config['num_D_accumulations']):
z_.sample_()
y_.sample_()
# yaxing: set gy and dy is equal 0, since we donot know label
D_fake, D_real = GD(z_[:config['batch_size']], y_[:config['batch_size']],
x[counter], y[counter], train_G=False,
split_D=config['split_D'])
# Compute components of D's loss, average them, and divide by
# the number of gradient accumulations
D_loss_real, D_loss_fake = losses.discriminator_loss(D_fake, D_real)
D_loss = (D_loss_real + D_loss_fake) / float(config['num_D_accumulations'])
D_loss.backward()
counter += 1
# Optionally apply ortho reg in D
if config['D_ortho'] > 0.0:# yaxing: hert it is 0.0
# Debug print to indicate we're using ortho reg in D.
print('using modified ortho reg in D')
utils.ortho(D, config['D_ortho'])
D.optim.step()
# Optionally toggle "requires_grad"
if config['toggle_grads']:
utils.toggle_grad(D, False)
if stage==1:
utils.toggle_grad(G, False)# yaxing
else:
utils.toggle_grad(G, True)# yaxing
utils.toggle_grad(M, True) # yaxing
# Zero G's gradients by default before training G, for safety
G.optim.zero_grad()
M.optim.zero_grad()# yaxing
# If accumulating gradients, loop multiple times
for accumulation_index in range(config['num_G_accumulations']): # yaxing: hert it is 1
z_.sample_()
y_.sample_()
#D_fake = GD(z_, y_, train_G=True, split_D=config['split_D'])
# yaxing: set gy and dy is equal 0, since we donot know label
D_fake, M_regu= GD(z_, y_, train_G=True, split_D=config['split_D'], train_M=True, M_regu=True)
#G_loss = losses.generator_loss(D_fake) / float(config['num_G_accumulations'])
M_loss = losses.generator_loss(D_fake, M_regu) / float(config['num_G_accumulations'])
#pdb.set_trace()
#G_loss.backward()
M_loss.backward()
# Optionally apply modified ortho reg in G
if config['G_ortho'] > 0.0:# yaxing: hert it is 0.0
print('using modified ortho reg in G') # Debug print to indicate we're using ortho reg in G
# Don't ortho reg shared, it makes no sense. Really we should blacklist any embeddings for this
utils.ortho(G, config['G_ortho'],
blacklist=[param for param in G.shared.parameters()])
if stage==2:
G.optim.step()
M.optim.step()
# If we have an ema, update it, regardless of if we test with it or not
if config['ema']:
ema.update(state_dict['itr'])
#out = {'G_loss': float(G_loss.item()),
out = {'G_loss': float(M_loss.item()),
'D_loss_real': float(D_loss_real.item()),
'D_loss_fake': float(D_loss_fake.item())}
# Return G's loss and the components of D's loss.
return out
return train
''' This function takes in the model, saves the weights (multiple copies if
requested), and prepares sample sheets: one consisting of samples given
a fixed noise seed (to show how the model evolves throughout training),
a set of full conditional sample sheets, and a set of interp sheets. '''
def save_and_sample(G, D, M, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name):
utils.save_weights(G, D, M, state_dict, config['weights_root'],
experiment_name, None, G_ema if config['ema'] else None)
# Save an additional copy to mitigate accidental corruption if process
# is killed during a save (it's happened to me before -.-)
# if config['num_save_copies'] > 0:
# utils.save_weights(G, D, M, state_dict, config['weights_root'],
# experiment_name,
# 'copy%d' % state_dict['save_num'],
# G_ema if config['ema'] else None)
# state_dict['save_num'] = (state_dict['save_num'] + 1 ) % config['num_save_copies']
# # Use EMA G for samples or non-EMA?
which_G = G_ema if config['ema'] and config['use_ema'] else G
# Accumulate standing statistics?
#if config['accumulate_stats']:
# utils.accumulate_standing_stats(G_ema if config['ema'] and config['use_ema'] else G,
# z_, y_, config['n_classes'],
# config['num_standing_accumulations'])
# Save a random sample sheet with fixed z and y
with torch.no_grad():
if config['parallel']:
z_from_M, y_from_M = nn.parallel.data_parallel(M, (fixed_z,))
fixed_Gz = nn.parallel.data_parallel(which_G, (z_from_M, y_from_M))
#fixed_Gz = nn.parallel.data_parallel(which_G, (fixed_z, which_G.shared(fixed_y)))
else:
z_from_M, y_from_M = M(fixed_z)
fixed_Gz = which_G(z_from_M, y_from_M)
fixed_Gz = which_G(z_from_M, y_from_M)
#fixed_Gz = which_G(fixed_z, which_G.shared(fixed_y))
if not os.path.isdir('%s/%s' % (config['samples_root'], experiment_name)):
os.mkdir('%s/%s' % (config['samples_root'], experiment_name))
image_filename = '%s/%s/fixed_samples%d.jpg' % (config['samples_root'],
experiment_name,
state_dict['itr'])
torchvision.utils.save_image(fixed_Gz.float().cpu(), image_filename,
nrow=int(fixed_Gz.shape[0] **0.5), normalize=True)
# For now, every time we save, also save sample sheets
utils.sample_sheet(which_G,M,
classes_per_sheet=utils.classes_per_sheet_dict[config['dataset']],
num_classes=config['n_classes'],
samples_per_class=10, parallel=config['parallel'],
samples_root=config['samples_root'],
experiment_name=experiment_name,
folder_number=state_dict['itr'],
z_=z_)
# Also save interp sheets
# yaxing
# for fix_z, fix_y in zip([False, False, True], [False, True, False]):
# utils.interp_sheet(which_G,
# num_per_sheet=16,
# num_midpoints=8,
# num_classes=config['n_classes'],
# parallel=config['parallel'],
# samples_root=config['samples_root'],
# experiment_name=experiment_name,
# folder_number=state_dict['itr'],
# sheet_number=0,
# fix_z=fix_z, fix_y=fix_y, device='cuda')
''' This function runs the inception metrics code, checks if the results
are an improvement over the previous best (either in IS or FID,
user-specified), logs the results, and saves a best_ copy if it's an
improvement. '''
def test(G, D, G_ema, z_, y_, state_dict, config, sample, get_inception_metrics,
experiment_name, test_log):
print('Gathering inception metrics...')
if config['accumulate_stats']:
utils.accumulate_standing_stats(G_ema if config['ema'] and config['use_ema'] else G,
z_, y_, config['n_classes'],
config['num_standing_accumulations'])
IS_mean, IS_std, FID = get_inception_metrics(sample,
config['num_inception_images'],
num_splits=10)
print('Itr %d: PYTORCH UNOFFICIAL Inception Score is %3.3f +/- %3.3f, PYTORCH UNOFFICIAL FID is %5.4f' % (state_dict['itr'], IS_mean, IS_std, FID))
# If improved over previous best metric, save approrpiate copy
if ((config['which_best'] == 'IS' and IS_mean > state_dict['best_IS'])
or (config['which_best'] == 'FID' and FID < state_dict['best_FID'])):
print('%s improved over previous best, saving checkpoint...' % config['which_best'])
utils.save_weights(G, D, state_dict, config['weights_root'],
experiment_name, 'best%d' % state_dict['save_best_num'],
G_ema if config['ema'] else None)
state_dict['save_best_num'] = (state_dict['save_best_num'] + 1 ) % config['num_best_copies']
state_dict['best_IS'] = max(state_dict['best_IS'], IS_mean)
state_dict['best_FID'] = min(state_dict['best_FID'], FID)
# Log results to file
test_log.log(itr=int(state_dict['itr']), IS_mean=float(IS_mean),
IS_std=float(IS_std), FID=float(FID))
| 45.94686 | 149 | 0.618337 |
79445569166dd37ae8a3ed66f1f40f05d1c5d6a7 | 2,625 | py | Python | workflow.py | ChenMnZ/auto-daily-health-report | cecae78185b27f1a7e9d3ed71c1bab5db4b33002 | [
"MIT"
] | null | null | null | workflow.py | ChenMnZ/auto-daily-health-report | cecae78185b27f1a7e9d3ed71c1bab5db4b33002 | [
"MIT"
] | null | null | null | workflow.py | ChenMnZ/auto-daily-health-report | cecae78185b27f1a7e9d3ed71c1bab5db4b33002 | [
"MIT"
] | null | null | null | import requests
import sys
import os
from checkin import health_report
from recent import check_recent
username = ""
password = ""
def report_with_server_chan(flag, reason="", success=""):
try:
server_chan_secret = os.environ["server_chan_secret"]
push_url = "https://sc.ftqq.com/" + server_chan_secret + ".send"
if flag:
result_title = "打卡成功提醒"
result_text = "今日打卡操作已成功!" + success
else:
result_title = "打卡失败提醒"
result_text = "今日打卡操作没有成功,请手动完成打卡。错误细节:" + reason
print(result_text)
session = requests.Session()
session.post(push_url, {
"text": result_title,
"desp": result_text
})
except KeyError:
print("Cannot report with Server-Chan: secret_key not set")
return
except Exception:
print("Cannot report with Server-Chan: unknown error")
return
try:
username = os.environ["xmu_username"]
password = os.environ["xmu_password"]
# webvpn_username = os.environ["webvpn_username"]
# webvpn_password = os.environ["webvpn_password"]
except KeyError:
reason = "You must provide a valid username & password and VPN account to log in xmuxg.xmu.edu.cn!"
print(reason)
report_with_server_chan(False, reason)
sys.exit(1)
try:
today_log, status = check_recent(username, password)
if status == 0 and today_log["today"]:
print("Already reported today :)")
sys.exit(0)
response, status = health_report(username, password)
if status != 0:
print("Report error, reason: " + response["reason"])
report_with_server_chan(False, response["reason"])
sys.exit(1)
today_log, status = check_recent(username, password)
if status == 0:
if today_log["today"]:
print("Automatically reported successfully!")
success_info = "当前连续打卡" + str(today_log["days"]) + "天,健康码为" + str(today_log["color"]) + "码!"
report_with_server_chan(True, success=success_info)
sys.exit(0)
else:
print("Automatically reported failed.")
reason = "System rejected the health-report request."
report_with_server_chan(False, reason)
sys.exit(1)
else:
report_with_server_chan(False, "Internal server error")
sys.exit(1)
except Exception as e:
reason = "Error occurred while sending the report request."
print(reason, e)
report_with_server_chan(False, reason)
sys.exit(1)
| 32.012195 | 105 | 0.612952 |
794456ee9d3ffa60a59e2778ae43cccece3bd984 | 5,383 | py | Python | python-package/mlbox/model/supervised/classification/feature_selector.py | maheshkarriisb/MLBox | 2e5e8eb488531b4cdbbe202dd6f70bba0ae3bebd | [
"BSD-3-Clause"
] | null | null | null | python-package/mlbox/model/supervised/classification/feature_selector.py | maheshkarriisb/MLBox | 2e5e8eb488531b4cdbbe202dd6f70bba0ae3bebd | [
"BSD-3-Clause"
] | null | null | null | python-package/mlbox/model/supervised/classification/feature_selector.py | maheshkarriisb/MLBox | 2e5e8eb488531b4cdbbe202dd6f70bba0ae3bebd | [
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Author: Axel ARONIO DE ROMBLAY <[email protected]>
# License: BSD 3 clause
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
import warnings
class Clf_feature_selector():
"""
Selects useful features. Several strategies are possible (filter and wrapper methods). Works for classification problems only (multiclass or binary).
Parameters
----------
strategy : string, defaut = "l1"
The strategy to select features.
Available strategies = "variance", "l1" or "rf_feature_importance"
threshold : float between 0. and 1., defaut = 0.3
The percentage of variable to discard according the strategy.
"""
def __init__(self, strategy = 'l1', threshold = 0.3):
self.strategy = strategy #'variance','l1, 'rf_feature_importance'
self.threshold = threshold #a float between 0. and 1. defaut : 0.3 ie we drop 0.3 of features
self.__fitOK = False
self.__to_discard = []
def get_params(self, deep=True):
return {'strategy' : self.strategy,
'threshold' : self.threshold}
def set_params(self,**params):
self.__fitOK = False
for k,v in params.items():
if k not in self.get_params():
warnings.warn("Invalid parameter a for feature selector Clf_feature_selector. Parameter IGNORED. Check the list of available parameters with `feature_selector.get_params().keys()`")
else:
setattr(self,k,v)
def fit(self, df_train, y_train):
'''
Fits Clf_feature_selector.
Parameters
----------
df_train : pandas dataframe of shape = (n_train, n_features)
The train dataset with numerical features and no NA
y_train : pandas series of shape = (n_train, ).
The target for classification task. Must be encoded.
Returns
-------
None
'''
### sanity checks
if ((type(df_train)!=pd.SparseDataFrame)&(type(df_train)!=pd.DataFrame)):
raise ValueError("df_train must be a DataFrame")
if (type(y_train) != pd.core.series.Series):
raise ValueError("y_train must be a Series")
if(self.strategy=='variance'):
coef = df_train.std()
abstract_threshold = np.percentile(coef,100.*self.threshold)
self.__to_discard = coef[coef<abstract_threshold].index
self.__fitOK = True
elif(self.strategy=='l1'):
model = LogisticRegression(C=0.01, penalty='l1',n_jobs=-1, random_state = 0) #to be tuned
model.fit(df_train, y_train)
coef = np.mean(np.abs(model.coef_),axis=0)
abstract_threshold = np.percentile(coef,100.*self.threshold)
self.__to_discard = df_train.columns[coef<abstract_threshold]
self.__fitOK = True
elif(self.strategy=='rf_feature_importance'):
model = RandomForestClassifier(n_estimators=50,n_jobs=-1, random_state = 0) #to be tuned
model.fit(df_train, y_train)
coef = model.feature_importances_
abstract_threshold = np.percentile(coef,100.*self.threshold)
self.__to_discard = df_train.columns[coef<abstract_threshold]
self.__fitOK = True
else:
raise ValueError("Strategy invalid. Please choose between 'variance', 'l1' or 'rf_feature_importance'")
return self
def transform(self, df):
'''
Transforms the dataset
Parameters
----------
df : pandas dataframe of shape = (n, n_features)
The dataset with numerical features and no NA
Returns
-------
df : pandas dataframe of shape = (n_train, n_features*(1-threshold))
The train dataset with relevant features
'''
if(self.__fitOK):
### sanity checks
if ((type(df)!=pd.SparseDataFrame)&(type(df)!=pd.DataFrame)):
raise ValueError("df must be a DataFrame")
return df.drop(self.__to_discard,axis=1)
else:
raise ValueError("call fit or fit_transform function before")
def fit_transform(self, df_train, y_train):
"""
Fits Clf_feature_selector and transforms the dataset
Parameters
----------
df_train : pandas dataframe of shape = (n_train, n_features)
The train dataset with numerical features and no NA
y_train : pandas series of shape = (n_train, ).
The target for classification task. Must be encoded.
Returns
-------
df_train : pandas dataframe of shape = (n_train, n_features*(1-threshold))
The train dataset with relevant features
"""
self.fit(df_train, y_train)
return self.transform(df_train)
| 29.740331 | 197 | 0.567527 |
794457037f02b448be9258e64df59f1d94cd02af | 3,169 | py | Python | member.py | Enon0831/hands-up | c18c61f900644efa1ee46d3b743edcf67cc22424 | [
"MIT"
] | null | null | null | member.py | Enon0831/hands-up | c18c61f900644efa1ee46d3b743edcf67cc22424 | [
"MIT"
] | null | null | null | member.py | Enon0831/hands-up | c18c61f900644efa1ee46d3b743edcf67cc22424 | [
"MIT"
] | null | null | null | import discord
import words
import datetime
class menber:
def __init__(self,text,num): # 初期化: インスタンス作成時に自動的に呼ばれる
self.n = [text,num]
self.name = []
self.res = []
self.resn =["(",0,")"]
self.tmp = 0
def add(self,name):
if(self.n[1] > 0):
self.n[1] -= 1
self.name.append(name)
def reserve(self,name):
if len(self.res) == 0:
self.tmp = 1
self.res.append(name)
for i in range(3):
self.n.insert(2+i,self.resn[i])
self.n[3] += 1
else:
self.res.append(name)
self.n[3] += 1
self.res.sort(reverse=True)
def sub(self,name):
for i in range(len(self.name)):
if self.name[i] == name:
self.name.pop(i)
self.n[1] += 1
break
def reservedel(self,name):
for i in range(len(self.res)):
if self.res[i] == name:
self.res.pop(i)
self.n[3] -= 1
break
if len(self.res) == 0:
self.tmp = 0
for i in range(3):
self.n.pop(2)
class guild:
def __init__(self):
self.mention = 0
self.mentiondef = 0
self.mentionnum = 3
self.time = {}
self.time_key = []
self.msg = ""
self.langage = "ja"
self.clear_time = datetime.datetime.strptime("2021-01-01 0:00:00",'%Y-%m-%d %H:%M:%S')
def set(self,time):
send = time + "@"
self.time[time] = menber(send,6)
self.time_key = sorted(self.time.keys())
def out(self,time):
del self.time[time]
self.time_key = sorted(self.time.keys())
def clear(self,time):
send = time + "@"
self.time[time] = menber(send,6)
def nowhands(server):
if len(server.time_key) == 0:
embed=None
m = words.now_none(server.langage)
return m , embed
else:
if (server.mention == 1):
mall = "@everyone\n"
server.mention = 0
else:
mall = "\n"
embed=discord.Embed(title="__***WAR LIST***__",color=0xee1111)
for i in server.time_key:
if server.time[i].tmp == 1:
mtmp = ">>> " + " , ".join(str(x) for x in server.time[i].name) + " " + " , ".join(str(x) for x in server.time[i].res)
if mtmp == ">>> ":
mtmp = ">>> " + words.now(server.langage)
embed.add_field(name="".join(str(x) for x in server.time[i].n), value=mtmp, inline=False)
embed.set_footer(text="仮 = Tentative , 補 = Substitute")
else:
mtmp = ">>> " + " , ".join(str(x) for x in server.time[i].name)
if mtmp == ">>> ":
mtmp = ">>> " + words.now(server.langage)
embed.add_field(name="".join(str(x) for x in server.time[i].n), value=mtmp, inline=False)
m = mall# + mwar + "```" + mtmp + "```"
return m , embed
| 32.336735 | 135 | 0.457242 |
7944576a31ed28becd72b58a857a6b15ddb9cd94 | 548 | py | Python | cluster_coords/__init__.py | sonnyparlin/cluster_coords | 13d1872957fedafd9b76f9b2913b297549fb173b | [
"MIT"
] | null | null | null | cluster_coords/__init__.py | sonnyparlin/cluster_coords | 13d1872957fedafd9b76f9b2913b297549fb173b | [
"MIT"
] | null | null | null | cluster_coords/__init__.py | sonnyparlin/cluster_coords | 13d1872957fedafd9b76f9b2913b297549fb173b | [
"MIT"
] | null | null | null | import math
import random
class RandomCoord:
def __init__(self, lng, lat):
self.lng = lng
self.lat = lat
def randomize_coords(self):
#Earth’s radius, sphere
R=6378137
# offsets in meters
dn = random.randint(5,25)
de = random.randint(5,25)
#Coordinate offsets in radians
dLat = dn/R
dLng = de/(R*math.cos(math.pi*self.lat/180))
#OffsetPosition, decimal degrees
latO = self.lat + dLat * 180/math.pi
lngO = self.lng + dLng * 180/math.pi
self.lng=lngO
self.lat=latO
return self
| 19.571429 | 48 | 0.638686 |
794458acb22d4b598726794c0ec6123263812cd6 | 2,718 | py | Python | anime/tests/rubberband_test.py | SodaCookie/pygame-animations | 44546b5254a659a38bed744e81008d999a328fb2 | [
"MIT"
] | 14 | 2015-07-02T03:27:23.000Z | 2017-10-13T09:27:21.000Z | anime/tests/rubberband_test.py | SodaCookie/pygame-animations | 44546b5254a659a38bed744e81008d999a328fb2 | [
"MIT"
] | 2 | 2015-07-30T04:07:37.000Z | 2015-07-30T04:15:43.000Z | anime/tests/rubberband_test.py | SodaCookie/pygame-animations | 44546b5254a659a38bed744e81008d999a328fb2 | [
"MIT"
] | 7 | 2015-09-09T15:04:45.000Z | 2019-11-21T04:52:09.000Z | import unittest
from anime.core.rubberband import RubberBand
from anime.core.filter import linear
class RubberTestClass(RubberBand):
def __init__(self):
super().__init__()
self.addition = 1
self.x = 0
self.y = 0
self.top_level = "test"
def set_pos(self, pos):
self.x = pos[0]
self.y = pos[1]
def get_pos(self):
return self.x, self.y
pos = property(get_pos, set_pos)
class RubberBandTester(unittest.TestCase):
def setUp(self):
self.a = RubberTestClass()
self.b = RubberTestClass()
self.c = RubberTestClass()
self.b.set_owner(self.a)
self.c.set_owner(self.b)
def tearDown(self):
del self.a
del self.b
del self.c
def test_addition(self):
self.assertEqual(self.c.get_absolute_value("addition"), 3)
self.assertEqual(self.b.get_absolute_value("addition"), 2)
def test_top_level(self):
self.assertEqual(self.c.get_absolute_value('top_level'), "test")
self.a.top_level = "test2"
self.assertEqual(self.c.get_absolute_value('top_level'), "test2")
self.b.top_level = "test3"
self.assertEqual(self.c.get_absolute_value('top_level'), "test2")
def test_addition_propagate(self):
self.a.addition = 4
self.assertEqual(self.c.get_absolute_value("addition"), 6)
def test_no_filter(self):
self.a.top_level = "test2"
self.assertFalse(self.a.is_dirty()) # does not contain a filter
def test_property(self):
self.a.set_filter('x', linear)
self.a.set_filter('y', linear)
self.a.pos = (1, 2)
self.assertEqual(self.a.get_dest('x'), 1)
self.assertEqual(self.a.get_dest('y'), 2)
def test_group_set(self):
self.a.group_set(x=10, y=5, top_level="hello")
self.assertEqual(self.a.x, 10)
self.assertEqual(self.a.y, 5)
self.assertEqual(self.a.top_level, "hello")
self.a.group_set({'x' : 5, 'y' : 10, 'top_level' : "world"})
self.assertEqual(self.a.x, 5)
self.assertEqual(self.a.y, 10)
self.assertEqual(self.a.top_level, "world")
def test_group_set_error(self):
with self.assertRaises(AttributeError):
self.a.group_set(doesnt_exist="hello")
def test_force_set(self):
self.a.set_filter('x', linear, 1)
self.b.set_filter('x', linear, 1)
self.a.x = 10
self.b.force_set('x', 10)
self.assertTrue(self.a.is_attr_dirty('x'))
self.assertEqual(self.a.x, 0)
self.assertFalse(self.b.is_attr_dirty('x'))
self.assertEqual(self.b.x, 10)
if __name__ == '__main__':
unittest.main() | 28.914894 | 73 | 0.615526 |
79445926a8fbde833ff0699dfc1be53a95d10378 | 20,686 | py | Python | tests/test_dynamodb/exceptions/test_dynamodb_exceptions.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | tests/test_dynamodb/exceptions/test_dynamodb_exceptions.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | 1 | 2022-03-07T07:39:03.000Z | 2022-03-07T07:39:03.000Z | tests/test_dynamodb/exceptions/test_dynamodb_exceptions.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
from boto3.dynamodb.conditions import Key
from botocore.exceptions import ClientError
from moto import mock_dynamodb
table_schema = {
"KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}],
"GlobalSecondaryIndexes": [
{
"IndexName": "GSI-K1",
"KeySchema": [
{"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"},
{"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "KEYS_ONLY"},
}
],
"AttributeDefinitions": [
{"AttributeName": "partitionKey", "AttributeType": "S"},
{"AttributeName": "gsiK1PartitionKey", "AttributeType": "S"},
{"AttributeName": "gsiK1SortKey", "AttributeType": "S"},
],
}
@mock_dynamodb
def test_query_gsi_with_wrong_key_attribute_names_throws_exception():
item = {
"partitionKey": "pk-1",
"gsiK1PartitionKey": "gsi-pk",
"gsiK1SortKey": "gsi-sk",
"someAttribute": "lore ipsum",
}
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
table = dynamodb.Table("test-table")
table.put_item(Item=item)
# check using wrong name for sort key throws exception
with pytest.raises(ClientError) as exc:
table.query(
KeyConditionExpression="gsiK1PartitionKey = :pk AND wrongName = :sk",
ExpressionAttributeValues={":pk": "gsi-pk", ":sk": "gsi-sk"},
IndexName="GSI-K1",
)["Items"]
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"Query condition missed key schema element: gsiK1SortKey"
)
# check using wrong name for partition key throws exception
with pytest.raises(ClientError) as exc:
table.query(
KeyConditionExpression="wrongName = :pk AND gsiK1SortKey = :sk",
ExpressionAttributeValues={":pk": "gsi-pk", ":sk": "gsi-sk"},
IndexName="GSI-K1",
)["Items"]
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"Query condition missed key schema element: gsiK1PartitionKey"
)
# verify same behaviour for begins_with
with pytest.raises(ClientError) as exc:
table.query(
KeyConditionExpression="gsiK1PartitionKey = :pk AND begins_with ( wrongName , :sk )",
ExpressionAttributeValues={":pk": "gsi-pk", ":sk": "gsi-sk"},
IndexName="GSI-K1",
)["Items"]
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"Query condition missed key schema element: gsiK1SortKey"
)
# verify same behaviour for between
with pytest.raises(ClientError) as exc:
table.query(
KeyConditionExpression="gsiK1PartitionKey = :pk AND wrongName BETWEEN :sk1 and :sk2",
ExpressionAttributeValues={
":pk": "gsi-pk",
":sk1": "gsi-sk",
":sk2": "gsi-sk2",
},
IndexName="GSI-K1",
)["Items"]
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"Query condition missed key schema element: gsiK1SortKey"
)
@mock_dynamodb
def test_empty_expressionattributenames():
ddb = boto3.resource("dynamodb", region_name="us-east-1")
ddb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
table = ddb.Table("test-table")
with pytest.raises(ClientError) as exc:
table.get_item(Key={"id": "my_id"}, ExpressionAttributeNames={})
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"ExpressionAttributeNames can only be specified when using expressions"
)
@mock_dynamodb
def test_empty_expressionattributenames_with_empty_projection():
ddb = boto3.resource("dynamodb", region_name="us-east-1")
ddb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
table = ddb.Table("test-table")
with pytest.raises(ClientError) as exc:
table.get_item(
Key={"id": "my_id"}, ProjectionExpression="", ExpressionAttributeNames={}
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal("ExpressionAttributeNames must not be empty")
@mock_dynamodb
def test_empty_expressionattributenames_with_projection():
ddb = boto3.resource("dynamodb", region_name="us-east-1")
ddb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
table = ddb.Table("test-table")
with pytest.raises(ClientError) as exc:
table.get_item(
Key={"id": "my_id"}, ProjectionExpression="id", ExpressionAttributeNames={}
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal("ExpressionAttributeNames must not be empty")
@mock_dynamodb
def test_update_item_range_key_set():
ddb = boto3.resource("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
table = ddb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
with pytest.raises(ClientError) as exc:
table.update_item(
Key={"partitionKey": "the-key"},
UpdateExpression="ADD x :one SET a = :a ADD y :one",
ExpressionAttributeValues={":one": 1, ":a": "lore ipsum"},
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
'Invalid UpdateExpression: The "ADD" section can only be used once in an update expression;'
)
@mock_dynamodb
def test_batch_get_item_non_existing_table():
client = boto3.client("dynamodb", region_name="us-west-2")
with pytest.raises(client.exceptions.ResourceNotFoundException) as exc:
client.batch_get_item(RequestItems={"my-table": {"Keys": [{"id": {"N": "0"}}]}})
err = exc.value.response["Error"]
assert err["Code"].should.equal("ResourceNotFoundException")
assert err["Message"].should.equal("Requested resource not found")
@mock_dynamodb
def test_batch_write_item_non_existing_table():
client = boto3.client("dynamodb", region_name="us-west-2")
with pytest.raises(client.exceptions.ResourceNotFoundException) as exc:
# Table my-table does not exist
client.batch_write_item(
RequestItems={"my-table": [{"PutRequest": {"Item": {}}}]}
)
err = exc.value.response["Error"]
assert err["Code"].should.equal("ResourceNotFoundException")
assert err["Message"].should.equal("Requested resource not found")
@mock_dynamodb
def test_create_table_with_redundant_attributes():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
dynamodb.create_table(
TableName="test-table",
AttributeDefinitions=[
{"AttributeName": "id", "AttributeType": "S"},
{"AttributeName": "created_at", "AttributeType": "N"},
],
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
BillingMode="PAY_PER_REQUEST",
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: Number of attributes in KeySchema does not exactly match number of attributes defined in AttributeDefinitions"
)
with pytest.raises(ClientError) as exc:
dynamodb.create_table(
TableName="test-table",
AttributeDefinitions=[
{"AttributeName": "id", "AttributeType": "S"},
{"AttributeName": "user", "AttributeType": "S"},
{"AttributeName": "created_at", "AttributeType": "N"},
],
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
GlobalSecondaryIndexes=[
{
"IndexName": "gsi_user-items",
"KeySchema": [{"AttributeName": "user", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
}
],
BillingMode="PAY_PER_REQUEST",
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: Some AttributeDefinitions are not used. AttributeDefinitions: [created_at, id, user], keys used: [id, user]"
)
@mock_dynamodb
def test_create_table_with_missing_attributes():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
dynamodb.create_table(
TableName="test-table",
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
{"AttributeName": "created_at", "KeyType": "RANGE"},
],
BillingMode="PAY_PER_REQUEST",
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"Invalid KeySchema: Some index key attribute have no definition"
)
with pytest.raises(ClientError) as exc:
dynamodb.create_table(
TableName="test-table",
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
GlobalSecondaryIndexes=[
{
"IndexName": "gsi_user-items",
"KeySchema": [{"AttributeName": "user", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
}
],
BillingMode="PAY_PER_REQUEST",
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: Some index key attributes are not defined in AttributeDefinitions. Keys: [user], AttributeDefinitions: [id]"
)
@mock_dynamodb
def test_create_table_with_redundant_and_missing_attributes():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
dynamodb.create_table(
TableName="test-table",
AttributeDefinitions=[
{"AttributeName": "created_at", "AttributeType": "N"}
],
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
BillingMode="PAY_PER_REQUEST",
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: Some index key attributes are not defined in AttributeDefinitions. Keys: [id], AttributeDefinitions: [created_at]"
)
with pytest.raises(ClientError) as exc:
dynamodb.create_table(
TableName="test-table",
AttributeDefinitions=[
{"AttributeName": "id", "AttributeType": "S"},
{"AttributeName": "created_at", "AttributeType": "N"},
],
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
GlobalSecondaryIndexes=[
{
"IndexName": "gsi_user-items",
"KeySchema": [{"AttributeName": "user", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
}
],
BillingMode="PAY_PER_REQUEST",
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: Some index key attributes are not defined in AttributeDefinitions. Keys: [user], AttributeDefinitions: [created_at, id]"
)
@mock_dynamodb
def test_put_item_wrong_attribute_type():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test-table",
AttributeDefinitions=[
{"AttributeName": "id", "AttributeType": "S"},
{"AttributeName": "created_at", "AttributeType": "N"},
],
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
{"AttributeName": "created_at", "KeyType": "RANGE"},
],
BillingMode="PAY_PER_REQUEST",
)
item = {
"id": {"N": "1"}, # should be a string
"created_at": {"N": "2"},
"someAttribute": {"S": "lore ipsum"},
}
with pytest.raises(ClientError) as exc:
dynamodb.put_item(TableName="test-table", Item=item)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: Type mismatch for key id expected: S actual: N"
)
item = {
"id": {"S": "some id"},
"created_at": {"S": "should be date not string"},
"someAttribute": {"S": "lore ipsum"},
}
with pytest.raises(ClientError) as exc:
dynamodb.put_item(TableName="test-table", Item=item)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: Type mismatch for key created_at expected: N actual: S"
)
@mock_dynamodb
# https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-KeyConditionExpression
def test_hash_key_cannot_use_begins_with_operations():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
table = dynamodb.create_table(
TableName="test-table",
KeySchema=[{"AttributeName": "key", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "key", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
)
items = [
{"key": "prefix-$LATEST", "value": "$LATEST"},
{"key": "prefix-DEV", "value": "DEV"},
{"key": "prefix-PROD", "value": "PROD"},
]
with table.batch_writer() as batch:
for item in items:
batch.put_item(Item=item)
table = dynamodb.Table("test-table")
with pytest.raises(ClientError) as ex:
table.query(KeyConditionExpression=Key("key").begins_with("prefix-"))
ex.value.response["Error"]["Code"].should.equal("ValidationException")
ex.value.response["Error"]["Message"].should.equal(
"Query key condition not supported"
)
# Test this again, but with manually supplying an operator
@mock_dynamodb
@pytest.mark.parametrize("operator", ["<", "<=", ">", ">="])
def test_hash_key_can_only_use_equals_operations(operator):
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test-table",
KeySchema=[{"AttributeName": "pk", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "pk", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
)
table = dynamodb.Table("test-table")
with pytest.raises(ClientError) as exc:
table.query(
KeyConditionExpression=f"pk {operator} :pk",
ExpressionAttributeValues={":pk": "p"},
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal("Query key condition not supported")
@mock_dynamodb
def test_creating_table_with_0_local_indexes():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
dynamodb.create_table(
TableName="test-table",
KeySchema=[{"AttributeName": "pk", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "pk", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
LocalSecondaryIndexes=[],
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: List of LocalSecondaryIndexes is empty"
)
@mock_dynamodb
def test_creating_table_with_0_global_indexes():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
with pytest.raises(ClientError) as exc:
dynamodb.create_table(
TableName="test-table",
KeySchema=[{"AttributeName": "pk", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "pk", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
GlobalSecondaryIndexes=[],
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"One or more parameter values were invalid: List of GlobalSecondaryIndexes is empty"
)
@mock_dynamodb
def test_multiple_transactions_on_same_item():
table_schema = {
"KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}],
"AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"}],
}
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
# Insert an item
dynamodb.put_item(TableName="test-table", Item={"id": {"S": "foo"}})
def update_email_transact(email):
return {
"Update": {
"Key": {"id": {"S": "foo"}},
"TableName": "test-table",
"UpdateExpression": "SET #e = :v",
"ExpressionAttributeNames": {"#e": "email_address"},
"ExpressionAttributeValues": {":v": {"S": email}},
}
}
with pytest.raises(ClientError) as exc:
dynamodb.transact_write_items(
TransactItems=[
update_email_transact("[email protected]"),
update_email_transact("[email protected]"),
]
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"Transaction request cannot include multiple operations on one item"
)
@mock_dynamodb
def test_transact_write_items__too_many_transactions():
table_schema = {
"KeySchema": [{"AttributeName": "pk", "KeyType": "HASH"}],
"AttributeDefinitions": [{"AttributeName": "pk", "AttributeType": "S"}],
}
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
def update_email_transact(email):
return {
"Put": {
"TableName": "test-table",
"Item": {"pk": {"S": ":v"}},
"ExpressionAttributeValues": {":v": {"S": email}},
}
}
update_email_transact(f"[email protected]")
with pytest.raises(ClientError) as exc:
dynamodb.transact_write_items(
TransactItems=[
update_email_transact(f"test{idx}@moto.com") for idx in range(26)
]
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.match("Member must have length less than or equal to 25")
@mock_dynamodb
def test_update_item_non_existent_table():
client = boto3.client("dynamodb", region_name="us-west-2")
with pytest.raises(client.exceptions.ResourceNotFoundException) as exc:
client.update_item(
TableName="non-existent",
Key={"forum_name": {"S": "LOLCat Forum"}},
UpdateExpression="set Body=:Body",
ExpressionAttributeValues={":Body": {"S": ""}},
)
err = exc.value.response["Error"]
assert err["Code"].should.equal("ResourceNotFoundException")
assert err["Message"].should.equal("Requested resource not found")
| 37.610909 | 172 | 0.618293 |
7944598b777cc351d1d273e27b1fd75b69da8ce4 | 1,802 | py | Python | colossalai/nn/layer/parallel_1d/_operation.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 1,630 | 2021-10-30T01:00:27.000Z | 2022-03-31T23:02:41.000Z | colossalai/nn/layer/parallel_1d/_operation.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 166 | 2021-10-30T01:03:01.000Z | 2022-03-31T14:19:07.000Z | colossalai/nn/layer/parallel_1d/_operation.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 253 | 2021-10-30T06:10:29.000Z | 2022-03-31T13:30:06.000Z | import torch
try:
import fused_mix_prec_layer_norm_cuda
except:
fused_mix_prec_layer_norm_cuda = None
class FusedLayerNormAffineFunction1D(torch.autograd.Function):
r"""Layernorm
Args:
input: input matrix.
weight: weight matrix.
bias: bias matrix.
normalized_shape: input shape from an expected input of size.
:math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability
"""
@staticmethod
def forward(ctx, input, weight, bias, normalized_shape, eps):
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
bias_ = bias.contiguous()
output, mean, invvar = fused_mix_prec_layer_norm_cuda.forward_affine(input_, ctx.normalized_shape, weight_,
bias_, ctx.eps)
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, weight_, bias_, mean, invvar = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
grad_input, grad_weight, grad_bias \
= fused_mix_prec_layer_norm_cuda.backward_affine(
grad_output.contiguous(), mean, invvar,
input_, ctx.normalized_shape,
weight_, bias_, ctx.eps)
return grad_input, grad_weight, grad_bias, None, None
| 39.173913 | 140 | 0.649834 |
79445a7a3578e26f5bf3b7563541d918868fa6b0 | 5,060 | py | Python | opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py | oxeye-nikolay/opentelemetry-python | b59e91472cb178a7fa94e243f3f0952aa720feee | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py | oxeye-nikolay/opentelemetry-python | b59e91472cb178a7fa94e243f3f0952aa720feee | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py | oxeye-nikolay/opentelemetry-python | b59e91472cb178a7fa94e243f3f0952aa720feee | [
"Apache-2.0"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
OpenTelemetry SDK Configurator for Easy Instrumentation with Distros
"""
from os import environ
from typing import Sequence, Tuple
from pkg_resources import iter_entry_points
from opentelemetry import trace
from opentelemetry.environment_variables import (
OTEL_PYTHON_ID_GENERATOR,
OTEL_TRACES_EXPORTER,
)
from opentelemetry.instrumentation.configurator import BaseConfigurator
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
from opentelemetry.sdk.trace.id_generator import IdGenerator
_EXPORTER_OTLP = "otlp"
_EXPORTER_OTLP_SPAN = "otlp_proto_grpc_span"
_RANDOM_ID_GENERATOR = "random"
_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR
def _get_id_generator() -> str:
return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)
def _get_exporter_names() -> Sequence[str]:
trace_exporters = environ.get(OTEL_TRACES_EXPORTER)
exporters = set()
if trace_exporters and trace_exporters.lower().strip() != "none":
exporters.update(
{
trace_exporter.strip()
for trace_exporter in trace_exporters.split(",")
}
)
if _EXPORTER_OTLP in exporters:
exporters.remove(_EXPORTER_OTLP)
exporters.add(_EXPORTER_OTLP_SPAN)
return list(exporters)
def _init_tracing(
exporters: Sequence[SpanExporter], id_generator: IdGenerator
):
# if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name
# from the env variable else defaults to "unknown_service"
provider = TracerProvider(
id_generator=id_generator(),
)
trace.set_tracer_provider(provider)
for _, exporter_class in exporters.items():
exporter_args = {}
provider.add_span_processor(
BatchSpanProcessor(exporter_class(**exporter_args))
)
def _import_tracer_provider_config_components(
selected_components, entry_point_name
) -> Sequence[Tuple[str, object]]:
component_entry_points = {
ep.name: ep for ep in iter_entry_points(entry_point_name)
}
component_impls = []
for selected_component in selected_components:
entry_point = component_entry_points.get(selected_component, None)
if not entry_point:
raise RuntimeError(
f"Requested component '{selected_component}' not found in entry points for '{entry_point_name}'"
)
component_impl = entry_point.load()
component_impls.append((selected_component, component_impl))
return component_impls
def _import_exporters(
exporter_names: Sequence[str],
) -> Sequence[SpanExporter]:
trace_exporters = {}
for (
exporter_name,
exporter_impl,
) in _import_tracer_provider_config_components(
exporter_names, "opentelemetry_traces_exporter"
):
if issubclass(exporter_impl, SpanExporter):
trace_exporters[exporter_name] = exporter_impl
else:
raise RuntimeError(f"{exporter_name} is not a trace exporter")
return trace_exporters
def _import_id_generator(id_generator_name: str) -> IdGenerator:
# pylint: disable=unbalanced-tuple-unpacking
[
(id_generator_name, id_generator_impl)
] = _import_tracer_provider_config_components(
[id_generator_name.strip()], "opentelemetry_id_generator"
)
if issubclass(id_generator_impl, IdGenerator):
return id_generator_impl
raise RuntimeError(f"{id_generator_name} is not an IdGenerator")
def _initialize_components():
exporter_names = _get_exporter_names()
trace_exporters = _import_exporters(exporter_names)
id_generator_name = _get_id_generator()
id_generator = _import_id_generator(id_generator_name)
_init_tracing(trace_exporters, id_generator)
class _OTelSDKConfigurator(BaseConfigurator):
"""A basic Configurator by OTel Python for initalizing OTel SDK components
Initializes several crucial OTel SDK components (i.e. TracerProvider,
MeterProvider, Processors...) according to a default implementation. Other
Configurators can subclass and slightly alter this initialization.
NOTE: This class should not be instantiated nor should it become an entry
point on the `opentelemetry-sdk` package. Instead, distros should subclass
this Configurator and enchance it as needed.
"""
def _configure(self, **kwargs):
_initialize_components()
| 32.229299 | 112 | 0.737549 |
79445ad7e44ebce2de5cb4865b449816c15ac321 | 8,629 | py | Python | candidate/Cuckoo.py | pckennethma/NeuralD | 226d7ea5c715c3fe29b311e896268462a4727713 | [
"Apache-2.0"
] | null | null | null | candidate/Cuckoo.py | pckennethma/NeuralD | 226d7ea5c715c3fe29b311e896268462a4727713 | [
"Apache-2.0"
] | null | null | null | candidate/Cuckoo.py | pckennethma/NeuralD | 226d7ea5c715c3fe29b311e896268462a4727713 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from util.solution_util import *
from logging import debug, info, error
import logging, time
logging.basicConfig(level=logging.ERROR)
class CuckooSimulator:
def __init__(self, table_size, keys, hash_func1, hash_func2):
self.hash_table = [-1 for i in range(table_size)]
self.key_num = len(keys)
self.keys = keys
self.hash_func1 = hash_func1
self.hash_func2 = hash_func2
self.has_cycle = self.insert()
def move(self, key, curr_loc, starting_key, is_first_time):
if starting_key == key and not is_first_time:
return False
location1 = self.hash_func1[key]
location2 = self.hash_func2[key]
if curr_loc == location1:
location = location2
else:
location = location1
if self.hash_table[location] == -1:
self.hash_table[location] = key
return True
else:
move_key = self.hash_table[location]
self.hash_table[location] = key
return self.move(move_key, location, starting_key, False)
def insert(self):
for key in self.keys:
location = self.hash_func1[key]
if self.hash_table[location] == -1:
self.hash_table[location] = key
else:
move_key = self.hash_table[location]
self.hash_table[location] = key
rlt = self.move(move_key, location, key, True)
if not rlt:
return False
return True
def construct_hash_func(virtual_memory__size, level):
dummy_size = 2**(level+1)
dont_care_size = 2**(level+1)
output_size = dummy_size * 4
rand_num = np.arange(0,output_size,dtype=int)
hash_func = np.random.choice(rand_num, size=virtual_memory__size+dummy_size+dont_care_size)
return hash_func
class CuckooProtocol:
def __init__(self, potential_size):
self.virtual_memory_size = potential_size
self.level_num = int(np.ceil(np.log2(potential_size)) + 1)
self.real_memory_size = sum([4*2**(i+1) for i in range(self.level_num)])
self.real_memory = RealMemory(self.real_memory_size)
self.hash_funcs1 = {i:construct_hash_func(potential_size, i) for i in range(self.level_num)}
self.hash_funcs2 = {i:construct_hash_func(potential_size, i) for i in range(self.level_num)}
def get_level(self, level):
level_start = sum([4*2**(i+1) for i in range(level)])
level_end = level_start + 4*2**(level+1)
return level_start, level_end
def scan_level1(self, bID):
l1_start, l1_end = self.get_level(0)
is_found = False
for addr in range(l1_start, l1_end):
mem_unit = self.real_memory.read(addr)
if mem_unit[0] is MemoryUnitType.NonEmpty and bID == mem_unit[1]["blockID"]:
is_found = True
return is_found
def retrieve_higher_level(self, level, bID):
level_start, _ = self.get_level(0)
is_found = False
addr1 = level_start + self.hash_funcs1[level][bID]
addr2 = level_start + self.hash_funcs2[level][bID]
mem_unit = self.real_memory.read(addr1)
if mem_unit[0] is MemoryUnitType.NonEmpty and bID == mem_unit[1]["blockID"]:
is_found = True
mem_unit = self.real_memory.read(addr2)
if mem_unit[0] is MemoryUnitType.NonEmpty and bID == mem_unit[1]["blockID"]:
is_found = True
return is_found
def write_back(self, bID):
l1_start, l1_end = self.get_level(0)
is_write = False
for addr in range(l1_start, l1_end):
mem_unit = self.real_memory.read(addr)
if mem_unit[0] is MemoryUnitType.NonEmpty and bID == mem_unit[1]["blockID"]:
self.real_memory.write(addr, bID)
is_write = True
elif mem_unit[0] is MemoryUnitType.Empty and not is_write:
self.real_memory.write(addr, bID)
is_write = True
else:
self.real_memory.write_nothing(addr)
return is_write
def move_down(self, level):
blocks = []
duplicates = []
curr_start, curr_end = self.get_level(level)
for addr in range(curr_start, curr_end):
mem_unit = self.real_memory.read(addr)
if mem_unit[0] is MemoryUnitType.NonEmpty and mem_unit[1]["blockID"] < self.virtual_memory_size:
blocks.append(mem_unit[1]["blockID"])
self.real_memory.clear_memory(addr)
next_start, next_end = self.get_level(level+1)
if level + 1 == self.level_num:
self.real_memory.allocate_more(4*2**(level+2))
self.level_num += 1
self.real_memory_size += 4*2**(level+2)
else:
for addr in range(next_start, next_end):
mem_unit = self.real_memory.read(addr)
if mem_unit[0] is MemoryUnitType.NonEmpty and mem_unit[1]["blockID"] < self.virtual_memory_size:
if mem_unit[1]["blockID"] in blocks:
pass
else:
blocks.append(mem_unit[1]["blockID"])
self.real_memory.clear_memory(addr)
self.hash_funcs1[level] = construct_hash_func(self.virtual_memory_size, level)
self.hash_funcs2[level] = construct_hash_func(self.virtual_memory_size, level)
# pad dummy block
to_be_insert = list(blocks) + [self.virtual_memory_size + dummy_offset for dummy_offset in range(2**(level+2))]
# pad don't-care block
to_be_insert += [ self.virtual_memory_size + i + 2**(level+2) for i in range(2*2**(level+2) - len(to_be_insert))]
has_cycle = True
while has_cycle:
self.hash_funcs1[level+1] = construct_hash_func(self.virtual_memory_size, level+1)
self.hash_funcs2[level+1] = construct_hash_func(self.virtual_memory_size, level+1)
cuckoo_simulator = CuckooSimulator(4*2**(level+2), to_be_insert, self.hash_funcs1[level+1], self.hash_funcs2[level+1])
has_cycle = cuckoo_simulator.has_cycle
# if has_cycle:
# print(4*2**(level+2), len(to_be_insert))
# print("has cycle")
for addr, block in enumerate(cuckoo_simulator.hash_table):
if block != -1:
self.real_memory.write(addr + next_start, block)
class VirtualMemory:
def __init__(self, size):
self.protocol = CuckooProtocol(size)
self.block_num = size
self.req_num = 0
def read(self, bID:int):
is_found = self.protocol.scan_level1(bID)
for level in range(1, self.protocol.level_num):
if is_found:
dummy_index = self.block_num + (self.req_num % (2**(level+1)))
self.protocol.retrieve_higher_level(level, dummy_index)
else:
is_found = self.protocol.retrieve_higher_level(level, bID)
is_write = self.protocol.write_back(bID)
assert is_write
self.req_num += 1
self.reshuffle()
def write(self, bID:int):
is_found = self.protocol.scan_level1(bID)
for level in range(1, self.protocol.level_num):
if is_found:
dummy_index = self.block_num + (self.req_num % (2**(level+1)))
self.protocol.retrieve_higher_level(level, dummy_index)
else:
is_found = self.protocol.retrieve_higher_level(level, bID)
is_write = self.protocol.write_back(bID)
assert is_write
self.req_num += 1
self.reshuffle()
def dump_seq(self, file_name):
self.protocol.real_memory.dump_memory_access_sequence(file_name)
def reshuffle(self):
for level in range(self.protocol.level_num):
if self.req_num % (2 ** (level + 1)) == 0:
self.protocol.move_down(level)
def dump_seq(self, label, file_name):
self.protocol.real_memory.dump_memory_access_sequence_with_label(label, file_name)
if __name__ == "__main__":
vm = VirtualMemory(4)
while True:
command = input("Command?")
if command == "D":
vm.protocol.real_memory.dump_memory()
for a in vm.protocol.real_memory.memory_access_seq:
print(a)
if command == "R":
bID = input("Block ID?")
vm.read(int(bID))
if command == "W":
bID = input("Block ID?")
vm.write(int(bID)) | 40.70283 | 130 | 0.605053 |
79445b26bb114547a8295c65156c1717ae0e6cec | 4,955 | py | Python | run_experiment_rq1.py | ntban15/retecs | b056d1a6548db3e879286b4017d45ac11e4eebef | [
"MIT"
] | null | null | null | run_experiment_rq1.py | ntban15/retecs | b056d1a6548db3e879286b4017d45ac11e4eebef | [
"MIT"
] | null | null | null | run_experiment_rq1.py | ntban15/retecs | b056d1a6548db3e879286b4017d45ac11e4eebef | [
"MIT"
] | 1 | 2021-05-16T09:37:37.000Z | 2021-05-16T09:37:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Experiment, evaluation and visualization for RQ1
from run_experiment_common import *
# For overriding defaults from run_experiment_common
PARALLEL = True
RUN_EXPERIMENT = True
VISUALIZE_RESULTS = True
def visualize():
search_pattern = 'rq_*_stats.p'
filename = 'rq'
iteration_results = glob.glob(os.path.join(DATA_DIR, search_pattern))
aggregated_results = os.path.join(DATA_DIR, filename)
df = stats.load_stats_dataframe(iteration_results, aggregated_results)
pure_df = df[(~df['agent'].isin(['heur_random', 'heur_sort', 'heur_weight'])) & (df['detected'] + df['missed']) > 0]
mean_df = pure_df.groupby(['step', 'env', 'agent', 'rewardfun'], as_index=False).mean()
# One subplot per data set (= one row in the paper)
# for env in mean_df['env'].unique():
# plotname = 'rq1_napfd_%s' % env
# fig, axarr = plt.subplots(1, 3, sharey=True, figsize=figsize_text(1.0, 0.45))
# i = 0
#
# for rewardfun in mean_df['rewardfun'].unique():
# for agidx, (labeltext, agent, linestyle) in enumerate(
# [('Network', 'mlpclassifier', '-'), ('Tableau', 'tableau', '--')]):
# rel_df = mean_df[(mean_df['env'] == env) & (mean_df['rewardfun'] == rewardfun)]
# rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,
# style=linestyle, color=sns.color_palette()[agidx], ax=axarr[i])
#
# x = rel_df.loc[rel_df['agent'] == agent, 'step']
# y = rel_df.loc[rel_df['agent'] == agent, 'napfd']
# trend = np.poly1d(np.polyfit(x, y, 1))
# axarr[i].plot(x, trend(x), linestyle, color='k', linewidth=0.8)
#
# axarr[i].set_xlabel('CI Cycle')
# axarr[i].legend_.remove()
# axarr[i].set_title(reward_names[rewardfun])
# axarr[i].set_xticks(np.arange(0, 350, 30), minor=False)
# axarr[i].set_xticklabels([0, '', 60, '', 120, '', 180, '', 240, '', 300], minor=False)
#
# axarr[i].xaxis.grid(True, which='minor')
#
# if i == 0:
# axarr[i].set_ylabel('NAPFD')
# axarr[i].legend(loc=2, frameon=True)
#
# i += 1
#
# fig.tight_layout()
# fig.subplots_adjust(wspace=0.08)
# save_figures(fig, plotname)
# plt.clf()
# One groupplot
fig, axarr = plt.subplots(3, 3, sharey=True, sharex=True, figsize=figsize_text(1.0, 1.2))
plotname = 'rq1_napfd'
subplot_labels = ['(a)', '(b)', '(c)']
for column, env in enumerate(sorted(mean_df['env'].unique(), reverse=True)):
for row, rewardfun in enumerate(mean_df['rewardfun'].unique()):
for agidx, (labeltext, agent, linestyle) in enumerate(
[('Network', 'mlpclassifier', '-'), ('Tableau', 'tableau', '--')]):
rel_df = mean_df[(mean_df['env'] == env) & (mean_df['rewardfun'] == rewardfun)]
rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,
style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row, column])
x = rel_df.loc[rel_df['agent'] == agent, 'step']
y = rel_df.loc[rel_df['agent'] == agent, 'napfd']
trend = np.poly1d(np.polyfit(x, y, 1))
axarr[row, column].plot(x, trend(x), linestyle, color='k', linewidth=0.8)
axarr[row, column].legend_.remove()
axarr[row, column].set_xticks(np.arange(0, 350, 30), minor=False)
axarr[row, column].set_xticklabels([0, '', 2, '', 4, '', 6, '', 8, '', 10], minor=False)
axarr[row, column].xaxis.grid(True, which='major')
if column == 1:
axarr[row, column].set_title('\\textbf{%s %s}' % (subplot_labels[row], reward_names[rewardfun]))
if row == 0:
if column == 1:
axarr[row, column].set_title('%s\n\\textbf{%s %s}' % (env_names[env], subplot_labels[row], reward_names[rewardfun]))
else:
axarr[row, column].set_title(env_names[env] + '\n')
elif row == 2:
axarr[row, column].set_xlabel('CI Cycle')
if column == 0:
axarr[row, column].set_ylabel('NAPFD')
if row == 0 and column == 0:
axarr[row, column].legend(loc=2, ncol=2, frameon=True, bbox_to_anchor=(0.065, 1.1))
fig.tight_layout()
fig.subplots_adjust(wspace=0.06, hspace=0.3)
save_figures(fig, plotname)
plt.clf()
if __name__ == '__main__':
if RUN_EXPERIMENT:
run_experiments(exp_run_industrial_datasets, parallel=PARALLEL)
if VISUALIZE_RESULTS:
visualize()
| 42.715517 | 136 | 0.550151 |
79445b7f457e3aa2c27ce8033541af6e9b145c22 | 7,211 | py | Python | erde/op/sjoin.py | culebron/erde | 9bbaaa1df46629a182c355413a120aa33dc6b377 | [
"BSD-3-Clause"
] | 16 | 2021-08-24T05:59:04.000Z | 2021-11-16T12:30:34.000Z | erde/op/sjoin.py | culebron/erde | 9bbaaa1df46629a182c355413a120aa33dc6b377 | [
"BSD-3-Clause"
] | null | null | null | erde/op/sjoin.py | culebron/erde | 9bbaaa1df46629a182c355413a120aa33dc6b377 | [
"BSD-3-Clause"
] | 2 | 2021-08-30T10:27:13.000Z | 2021-08-31T09:46:49.000Z | import geopandas as gpd
def _sj(left_df, right_df, left_on, right_on, op, how):
left_tmp = _df_on(left_df, left_on, 'left')
right_tmp = _df_on(right_df, right_on, 'right')
return gpd.sjoin(left_tmp, right_tmp, op=op, how=how)
def sjfull(left_df, right_df, left_on='geometry', right_on='geometry', suffixes=('', '_right'), join='inner', op='intersects'):
"""Full sjoin: makes sjoin by temporary geometries and puts both geometries in the resulting dataframe.
Use left_on and right_on to join by, for instance, buffers, but have the original points in the result.
Parameters
----------
left_df : GeoDataFrame
right_df : GeoDataFrame
left_on : str or GeoSeries, default 'geometry'
Column in the left GeoDataFrame or a GeoSeries with the same index, by which to do spatial join. These are not added anywhere.
right_on : str or GeoSeries, default 'geometry'
Same in the right GeoDataFrame
suffixes : 2-tuple of str, default ('', '_right')
Suffixes added if colum names coincide, same as in pd.DataFrame.merge
join : str, {'left', 'inner', 'right'}, default 'left'
What kind of join to do.
* 'inner' keeps only records with matches
* 'left' keeps records from left_df if there's no right match
* 'right' keeps records from right_df if there's no left match
op : str, {'intersects', 'within', 'contains'}, default 'intersects'
How geometries should match, e.g. left-contains-right.
"""
m = _sj(left_df, right_df, left_on, right_on, op, join).drop('geometry', axis=1).reset_index()
index_left, index_right = 'index' + suffixes[0], 'index' + suffixes[1]
m.rename(columns={'index': index_right if join == 'right' else index_left})
m['geometry' + suffixes[0]] = m[index_left].map(left_df['geometry'])
m['geometry' + suffixes[1]] = m[index_right].map(right_df['geometry'])
return m
def sagg(left_df, right_df, agg, left_on='geometry', right_on='geometry', suffixes=('', '_right'), join='left', op='intersects'):
"""Spatial aggregation. Aggregates the `right_df` attributes that spatially match `left_df`. E.g. if `left_df` is regions, and `right_df` is residential bulidings, this function can aggregate residents by regions:
regions_with_residents = sagg(regions_gdf, buildings_gdf, {'residents': 'sum'})
Parameters
----------
left_df : GeoDataFrame
Main dataframe, by which to aggregate.
right_df : GeoDataFrame
What dataframe to aggregate.
agg : dict
What to aggregate, format is the same as in pd.DataFrame.agg or gpd.dissolve.
left_on : str or GeoSeries, default 'geometry'
Column in the left GeoDataFrame or a GeoSeries with the same index, by which to do spatial join. These are not added anywhere.
right_on : str or GeoSeries, default 'geometry'
Same in the right GeoDataFrame
suffixes : 2-tuple of str, default ('', '_right')
Suffixes added if colum names coincide, same as in pd.DataFrame.merge
join : str, {'left' or 'inner'}, default 'left'
What kind of join to do.
op : str, {'intersects', 'within', 'contains'}, default 'intersects'
How geometries should match, e.g. left-contains-right.
Returns
-------
GeoDataFrame
A new dataframe, same as `left_df`, but with aggregated columns from `agg` argument.
"""
if not isinstance(agg, dict):
raise TypeError('agg argument must be a dict')
if len(agg) == 0:
raise ValueError('agg argument can\'t be empty')
m = _sj(left_df, right_df, left_on, right_on, op, join)
ind = m.index_right if 'join' != 'right' else m.index
for k in agg.keys(): # we put the data columns here, because they may contain `geometry` (_right), which gets lost after sjoin.
m[k] = ind.map(right_df[k])
m2 = m.groupby(m.index).agg(agg)
return left_df.join(m2, lsuffix=suffixes[0], rsuffix=suffixes[1], how=join)
def slookup(left_df, right_df, columns, left_on='geometry', right_on='geometry', suffixes=('', '_right'), join='left', op='intersects'):
"""Spatial lookup. For each row in left_df finds the matching record in right_df and takes the required columns. E.g. for each business, find its region:
business_plus_region = slookup(business_gdf, regions_gdf, 'name', suffixes=('', '_region'))
or
business_plus_region = slookup(business_gdf, regions_gdf, ['name', 'phone_code'], suffixes=('', '_region'))
Since lookup may find multiple matching geometries of right_df, it takes the first one. GeoPandas sjoin usually keeps the order as in original dataframes, but it's not guaranteed.
Parameters
----------
left_df : GeoDataFrame
For what to look up.
right_df : GeoDataFrame
Where to look up.
columns : str or iterable of str
Name(s) of column(s) to lookup and add to the left_df.
Other parameters are the same as in `sagg`.
Returns
-------
GeoDataFrame
A new dataframe, same as `left_df`, but also with looked up columns.
"""
if isinstance(columns, str):
columns = [columns]
return sagg(left_df, right_df, {k: 'first' for k in columns}, left_on, right_on, suffixes, join, op)
def sfilter(left_df, filter_geom, left_on='geometry', right_on='geometry', negative=False, op='intersects'):
"""Filters left_df by geometries in right_df.
Parameters
----------
left_df : GeoDataFrame
What to filter.
filter_geom : GeoDataFrame, GeoSeries, shapely geometry
With what to filter.
left_on : str or GeoSeries, default 'geometry'
Column in the left GeoDataFrame or a GeoSeries with the same index, by which to do spatial join. Not added anywhere.
For example, these can be buffers instead of original geometries (points), to filter by being within a distance.
right_on : str or GeoSeries, default 'geometry'
Same in the right GeoDataFrame
negative : bool, default False
Inverse filtering (keep those that don't match right_df geometries).
op : str, {'intersects', 'within', 'contains'}, default 'intersects'
How geometries should match, e.g. left-contains-right.
Returns
-------
GeoDataFrame
This is filtered `left_df` (a view of the original, not a copy).
"""
from shapely.geometry.base import BaseGeometry
if not isinstance(filter_geom, (gpd.GeoDataFrame, gpd.GeoSeries, BaseGeometry)):
raise TypeError(f'filter_geom should be GeoDataFrame, GeoSeries or shapely geometry, got {filter_geom.__class__} instead')
if isinstance(filter_geom, BaseGeometry):
filter_geom = gpd.GeoDataFrame({'geometry': [filter_geom]}, crs=left_df.crs)
elif isinstance(filter_geom, gpd.GeoSeries):
filter_geom = gpd.GeoDataFrame({'geometry': filter_geom})
m = _sj(left_df, filter_geom, left_on, right_on, op, 'inner')
isin = left_df.index.isin(m.index)
if negative: isin = ~isin
return left_df[isin]
def _df_on(df, geom, kind):
"""Creates a temporary GeoDataFrame with same index and requested geometry (column name or GeoSeries), which is used in the other functions for sjoin."""
if kind not in ('left', 'right'):
raise ValueError("`kind` argument can be 'left' or 'right'")
if isinstance(geom, gpd.GeoSeries):
if not geom.index.equals(df.index):
raise ValueError(f'{kind}_on GeoSeries index differs from that of {kind} dataframe')
return gpd.GeoDataFrame({'geometry': geom}, index=df.index)
if isinstance(geom, str):
return df[[geom]]
raise TypeError(f'{kind}_on argument must be either string, or GeoSeries')
| 40.284916 | 214 | 0.726113 |
79445c8ea0af7eff2d00ddb50a9acf08bbd4f21f | 17,762 | py | Python | redash/handlers/queries.py | ivanli1990/redash | 4508975749368e5851fd01f88b83a7aa16c58f62 | [
"BSD-2-Clause"
] | 1 | 2019-07-31T01:41:13.000Z | 2019-07-31T01:41:13.000Z | redash/handlers/queries.py | ivanli1990/redash | 4508975749368e5851fd01f88b83a7aa16c58f62 | [
"BSD-2-Clause"
] | 1 | 2019-05-21T14:38:05.000Z | 2019-05-23T09:30:55.000Z | redash/handlers/queries.py | ivanli1990/redash | 4508975749368e5851fd01f88b83a7aa16c58f62 | [
"BSD-2-Clause"
] | 4 | 2019-07-01T06:15:44.000Z | 2021-12-11T11:17:08.000Z | import sqlparse
from flask import jsonify, request, url_for
from flask_login import login_required
from flask_restful import abort
from sqlalchemy.orm.exc import StaleDataError
from funcy import partial
from redash import models, settings
from redash.authentication.org_resolving import current_org
from redash.handlers.base import (BaseResource, filter_by_tags, get_object_or_404,
org_scoped_rule, paginate, routes, order_results as _order_results)
from redash.handlers.query_results import run_query
from redash.permissions import (can_modify, not_view_only, require_access,
require_admin_or_owner,
require_object_modify_permission,
require_permission, view_only)
from redash.utils import collect_parameters_from_request
from redash.serializers import QuerySerializer
from redash.models.parameterized_query import ParameterizedQuery
# Ordering map for relationships
order_map = {
'name': 'lowercase_name',
'-name': '-lowercase_name',
'created_at': 'created_at',
'-created_at': '-created_at',
'schedule': 'schedule',
'-schedule': '-schedule',
'runtime': 'query_results-runtime',
'-runtime': '-query_results-runtime',
'executed_at': 'query_results-retrieved_at',
'-executed_at': '-query_results-retrieved_at',
'created_by': 'users-name',
'-created_by': '-users-name',
}
order_results = partial(
_order_results,
default_order='-created_at',
allowed_orders=order_map,
)
@routes.route(org_scoped_rule('/api/queries/format'), methods=['POST'])
@login_required
def format_sql_query(org_slug=None):
"""
Formats an SQL query using the Python ``sqlparse`` formatter.
:<json string query: The SQL text to format
:>json string query: Formatted SQL text
"""
arguments = request.get_json(force=True)
query = arguments.get("query", "")
return jsonify({'query': sqlparse.format(query, **settings.SQLPARSE_FORMAT_OPTIONS)})
class QuerySearchResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Search query text, names, and descriptions.
:qparam string q: Search term
:qparam number include_drafts: Whether to include draft in results
Responds with a list of :ref:`query <query-response-label>` objects.
"""
term = request.args.get('q', '')
if not term:
return []
include_drafts = request.args.get('include_drafts') is not None
self.record_event({
'action': 'search',
'object_type': 'query',
'term': term,
})
# this redirects to the new query list API that is aware of search
new_location = url_for(
'queries',
q=term,
org_slug=current_org.slug,
drafts='true' if include_drafts else 'false',
)
return {}, 301, {'Location': new_location}
class QueryRecentResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Retrieve up to 10 queries recently modified by the user.
Responds with a list of :ref:`query <query-response-label>` objects.
"""
results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)
return QuerySerializer(results, with_last_modified_by=False, with_user=False).serialize()
class BaseQueryListResource(BaseResource):
def get_queries(self, search_term):
if search_term:
results = models.Query.search(
search_term,
self.current_user.group_ids,
self.current_user.id,
include_drafts=True,
)
else:
results = models.Query.all_queries(
self.current_user.group_ids,
self.current_user.id,
include_drafts=True,
)
return filter_by_tags(results, models.Query.tags)
@require_permission('view_query')
def get(self):
"""
Retrieve a list of queries.
:qparam number page_size: Number of queries to return per page
:qparam number page: Page number to retrieve
:qparam number order: Name of column to order by
:qparam number q: Full text search term
Responds with an array of :ref:`query <query-response-label>` objects.
"""
# See if we want to do full-text search or just regular queries
search_term = request.args.get('q', '')
queries = self.get_queries(search_term)
results = filter_by_tags(queries, models.Query.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_results = order_results(results, fallback=not bool(search_term))
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
response = paginate(
ordered_results,
page=page,
page_size=page_size,
serializer=QuerySerializer,
with_stats=True,
with_last_modified_by=False
)
if search_term:
self.record_event({
'action': 'search',
'object_type': 'query',
'term': search_term,
})
else:
self.record_event({
'action': 'list',
'object_type': 'query',
})
return response
def require_access_to_dropdown_queries(user, query_def):
parameters = query_def.get('options', {}).get('parameters', [])
dropdown_query_ids = set([str(p['queryId']) for p in parameters if p['type'] == 'query'])
if dropdown_query_ids:
groups = models.Query.all_groups_for_query_ids(dropdown_query_ids)
if len(groups) < len(dropdown_query_ids):
abort(400, message="You are trying to associate a dropdown query that does not have a matching group. "
"Please verify the dropdown query id you are trying to associate with this query.")
require_access(dict(groups), user, view_only)
class QueryListResource(BaseQueryListResource):
@require_permission('create_query')
def post(self):
"""
Create a new query.
:<json number data_source_id: The ID of the data source this query will run on
:<json string query: Query text
:<json string name:
:<json string description:
:<json string schedule: Schedule interval, in seconds, for repeated execution of this query
:<json object options: Query options
.. _query-response-label:
:>json number id: Query ID
:>json number latest_query_data_id: ID for latest output data from this query
:>json string name:
:>json string description:
:>json string query: Query text
:>json string query_hash: Hash of query text
:>json string schedule: Schedule interval, in seconds, for repeated execution of this query
:>json string api_key: Key for public access to this query's results.
:>json boolean is_archived: Whether this query is displayed in indexes and search results or not.
:>json boolean is_draft: Whether this query is a draft or not
:>json string updated_at: Time of last modification, in ISO format
:>json string created_at: Time of creation, in ISO format
:>json number data_source_id: ID of the data source this query will run on
:>json object options: Query options
:>json number version: Revision version (for update conflict avoidance)
:>json number user_id: ID of query creator
:>json number last_modified_by_id: ID of user who last modified this query
:>json string retrieved_at: Time when query results were last retrieved, in ISO format (may be null)
:>json number runtime: Runtime of last query execution, in seconds (may be null)
"""
query_def = request.get_json(force=True)
data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)
require_access(data_source, self.current_user, not_view_only)
require_access_to_dropdown_queries(self.current_user, query_def)
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
query_def.pop(field, None)
query_def['query_text'] = query_def.pop('query')
query_def['user'] = self.current_user
query_def['data_source'] = data_source
query_def['org'] = self.current_org
query_def['is_draft'] = True
query = models.Query.create(**query_def)
models.db.session.add(query)
models.db.session.commit()
self.record_event({
'action': 'create',
'object_id': query.id,
'object_type': 'query'
})
return QuerySerializer(query, with_visualizations=True).serialize()
class QueryArchiveResource(BaseQueryListResource):
def get_queries(self, search_term):
if search_term:
return models.Query.search(
search_term,
self.current_user.group_ids,
self.current_user.id,
include_drafts=False,
include_archived=True,
)
else:
return models.Query.all_queries(
self.current_user.group_ids,
self.current_user.id,
include_drafts=False,
include_archived=True,
)
class MyQueriesResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Retrieve a list of queries created by the current user.
:qparam number page_size: Number of queries to return per page
:qparam number page: Page number to retrieve
:qparam number order: Name of column to order by
:qparam number search: Full text search term
Responds with an array of :ref:`query <query-response-label>` objects.
"""
search_term = request.args.get('q', '')
if search_term:
results = models.Query.search_by_user(search_term, self.current_user)
else:
results = models.Query.by_user(self.current_user)
results = filter_by_tags(results, models.Query.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_results = order_results(results, fallback=not bool(search_term))
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
return paginate(
ordered_results,
page,
page_size,
QuerySerializer,
with_stats=True,
with_last_modified_by=False,
)
class QueryResource(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
"""
Modify a query.
:param query_id: ID of query to update
:<json number data_source_id: The ID of the data source this query will run on
:<json string query: Query text
:<json string name:
:<json string description:
:<json string schedule: Schedule interval, in seconds, for repeated execution of this query
:<json object options: Query options
Responds with the updated :ref:`query <query-response-label>` object.
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
query_def = request.get_json(force=True)
require_object_modify_permission(query, self.current_user)
require_access_to_dropdown_queries(self.current_user, query_def)
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:
query_def.pop(field, None)
if 'query' in query_def:
query_def['query_text'] = query_def.pop('query')
if 'tags' in query_def:
query_def['tags'] = filter(None, query_def['tags'])
query_def['last_modified_by'] = self.current_user
query_def['changed_by'] = self.current_user
# SQLAlchemy handles the case where a concurrent transaction beats us
# to the update. But we still have to make sure that we're not starting
# out behind.
if 'version' in query_def and query_def['version'] != query.version:
abort(409)
try:
self.update_model(query, query_def)
models.db.session.commit()
except StaleDataError:
abort(409)
return QuerySerializer(query, with_visualizations=True).serialize()
@require_permission('view_query')
def get(self, query_id):
"""
Retrieve a query.
:param query_id: ID of query to fetch
Responds with the :ref:`query <query-response-label>` contents.
"""
q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(q, self.current_user, view_only)
result = QuerySerializer(q, with_visualizations=True).serialize()
result['can_edit'] = can_modify(q, self.current_user)
self.record_event({
'action': 'view',
'object_id': query_id,
'object_type': 'query',
})
return result
# TODO: move to resource of its own? (POST /queries/{id}/archive)
def delete(self, query_id):
"""
Archives a query.
:param query_id: ID of query to archive
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_admin_or_owner(query.user_id)
query.archive(self.current_user)
models.db.session.commit()
class QueryForkResource(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
"""
Creates a new query, copying the query text from an existing one.
:param query_id: ID of query to fork
Responds with created :ref:`query <query-response-label>` object.
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query.data_source, self.current_user, not_view_only)
forked_query = query.fork(self.current_user)
models.db.session.commit()
self.record_event({
'action': 'fork',
'object_id': query_id,
'object_type': 'query',
})
return QuerySerializer(forked_query, with_visualizations=True).serialize()
class QueryRefreshResource(BaseResource):
def post(self, query_id):
"""
Execute a query, updating the query object with the results.
:param query_id: ID of query to execute
Responds with query task details.
"""
# TODO: this should actually check for permissions, but because currently you can only
# get here either with a user API key or a query one, we can just check whether it's
# an api key (meaning this is a query API key, which only grants read access).
if self.current_user.is_api_user():
abort(403, message="Please use a user API key.")
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, not_view_only)
parameter_values = collect_parameters_from_request(request.args)
parameterized_query = ParameterizedQuery(query.query_text)
return run_query(parameterized_query, parameter_values, query.data_source, query.id)
class QueryTagsResource(BaseResource):
def get(self):
"""
Returns all query tags including those for drafts.
"""
tags = models.Query.all_tags(self.current_user, include_drafts=True)
return {
'tags': [
{
'name': name,
'count': count,
}
for name, count in tags
]
}
class QueryFavoriteListResource(BaseResource):
def get(self):
search_term = request.args.get('q')
if search_term:
base_query = models.Query.search(search_term, self.current_user.group_ids, include_drafts=True, limit=None)
favorites = models.Query.favorites(self.current_user, base_query=base_query)
else:
favorites = models.Query.favorites(self.current_user)
favorites = filter_by_tags(favorites, models.Query.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_favorites = order_results(favorites, fallback=not bool(search_term))
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
response = paginate(
ordered_favorites,
page,
page_size,
QuerySerializer,
with_stats=True,
with_last_modified_by=False,
)
self.record_event({
'action': 'load_favorites',
'object_type': 'query',
'params': {
'q': search_term,
'tags': request.args.getlist('tags'),
'page': page
}
})
return response
| 35.955466 | 127 | 0.633544 |
79445cbd93d3b0ab3da8efb8f15fe2cfd2476d13 | 2,600 | py | Python | parser.py | SerjShepelevich/14-- | 97d020d4680fb7159a8bdd94563f5434a12ee1b9 | [
"MIT"
] | null | null | null | parser.py | SerjShepelevich/14-- | 97d020d4680fb7159a8bdd94563f5434a12ee1b9 | [
"MIT"
] | null | null | null | parser.py | SerjShepelevich/14-- | 97d020d4680fb7159a8bdd94563f5434a12ee1b9 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import re
import pprint
import pandas as pd
URL = 'https://www.drom.ru/reviews/toyota/prius/5kopeek/'
def num_elements(soup,type_, name_):
reviews = soup.find_all(type_, class_= name_)
if len(reviews) > 0:
return reviews
else:
return 0
def text_data(danniie_):
if danniie_ != 0:
return danniie_[0].text
else:
return ''
def grab_rev_from_page(page_URL):
page = requests.get(page_URL)
soup = BeautifulSoup(page.text, 'html.parser')
# получаем список отзывов на странице
list_rev = num_elements(soup,'div', "b-media-cont b-media-cont_reviews")
rev_on_page = []
plus_rev = []
minus_rev = []
comment_rev = []
mod_desc = []
for l in list_rev:
reg = re.compile('[^a-zA-Z0-9,.а-яА-Я ]')
model_describe_text = text_data(num_elements(l, 'div', 'b-media-cont'))
model_describe_text = re.sub(' +', ' ', reg.sub('', model_describe_text))
mod_desc.append(model_describe_text)
plus_rev.append(text_data(num_elements(l, 'div', 'b-media-cont b-ico b-ico_type_plus-green b-ico_positioned_left')))
minus_rev.append(text_data(num_elements(l, 'div', 'b-media-cont b-ico b-ico_type_minus-red b-ico_positioned_left')))
comment_rev.append(text_data(num_elements(l, 'div', 'b-media-cont b-ico b-ico_type_repair b-ico_positioned_left')))
rev_on_page.append([mod_desc, plus_rev, minus_rev, comment_rev])
return rev_on_page
def list_pages(URL):
page = requests.get(URL)
soup = BeautifulSoup(page.text, 'html.parser')
list_pages = []
list_pages.append(URL)
num_pages = num_elements(soup,'div', "b-pagination__items")
for i in range(1,len(num_pages[0])):
list_pages.append(num_pages[0].contents[i].attrs['href'])
#pprint.pprint(list_pages)
return list_pages
def grab_rev_mashins(first_page_URL):
common_list = []
list_of_pages = list_pages(first_page_URL)
for URL in list_of_pages:
common_list.append(grab_rev_from_page(URL))
desc = []
plus_rev = []
minus_rev = []
comment_rev = []
for rec in common_list:
desc.extend(rec[0][0])
plus_rev.extend(rec[0][1])
minus_rev.extend(rec[0][2])
comment_rev.extend(rec[0][3])
common_dict = {}
common_dict.update({'desc':desc,
'plus_rev':plus_rev,
'minus_rev':minus_rev,
'comment_rev':comment_rev})
return common_dict
df = pd.DataFrame(data=grab_rev_mashins(URL))
df.to_csv('df_1.csv')
| 32.911392 | 124 | 0.651923 |
79445ce81fbd835aa2945a37aaadb7b752c4e27d | 1,999 | py | Python | lab2/text_recognizer/data/mnist.py | wook3024/fsdl-text-recognizer-2021-labs | 06ccc875eaa22971a9c3e78785566739ec75702a | [
"MIT"
] | null | null | null | lab2/text_recognizer/data/mnist.py | wook3024/fsdl-text-recognizer-2021-labs | 06ccc875eaa22971a9c3e78785566739ec75702a | [
"MIT"
] | null | null | null | lab2/text_recognizer/data/mnist.py | wook3024/fsdl-text-recognizer-2021-labs | 06ccc875eaa22971a9c3e78785566739ec75702a | [
"MIT"
] | null | null | null | """MNIST DataModule"""
import argparse
from torch.utils.data import random_split
from torchvision.datasets import MNIST as TorchMNIST
from torchvision import transforms
from text_recognizer.data.base_data_module import BaseDataModule, load_and_print_info
DOWNLOADED_DATA_DIRNAME = BaseDataModule.data_dirname() / "downloaded"
# NOTE: temp fix until https://github.com/pytorch/vision/issues/1938 is resolved
from six.moves import (
urllib,
) # pylint: disable=wrong-import-position, wrong-import-order
opener = urllib.request.build_opener()
opener.addheaders = [("User-agent", "Mozilla/5.0")]
urllib.request.install_opener(opener)
class MNIST(BaseDataModule):
"""
MNIST DataModule.
Learn more at https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html
"""
def __init__(self, args: argparse.Namespace) -> None:
super().__init__(args)
self.data_dir = DOWNLOADED_DATA_DIRNAME
self.transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
self.dims = (
1,
28,
28,
) # dims are returned when calling `.size()` on this object.
self.output_dims = (1,)
self.mapping = list(range(10))
def prepare_data(self, *args, **kwargs) -> None:
"""Download train and test MNIST data from PyTorch canonical source."""
TorchMNIST(self.data_dir, train=True, download=True)
TorchMNIST(self.data_dir, train=False, download=True)
def setup(self, stage=None) -> None:
"""Split into train, val, test, and set dims."""
mnist_full = TorchMNIST(self.data_dir, train=True, transform=self.transform)
self.data_train, self.data_val = random_split(mnist_full, [55000, 5000]) # type: ignore
self.data_test = TorchMNIST(
self.data_dir, train=False, transform=self.transform
)
if __name__ == "__main__":
load_and_print_info(MNIST)
| 34.465517 | 96 | 0.68084 |
794460a05b70998a4f793a12e82c7f15dc6cfee4 | 11,761 | py | Python | tools/client-side/svn-viewspec.py | tux-mind/platform_external_subversion | e3e715b637b0b7c6a6a02316f3be48f2c1d5181c | [
"Apache-2.0"
] | 2 | 2019-10-31T18:33:15.000Z | 2021-09-19T20:04:03.000Z | tools/client-side/svn-viewspec.py | wbond/subversion | 018aaa1933687f28bbfdcad9b7b988fd7435afcd | [
"Apache-2.0"
] | null | null | null | tools/client-side/svn-viewspec.py | wbond/subversion | 018aaa1933687f28bbfdcad9b7b988fd7435afcd | [
"Apache-2.0"
] | 2 | 2019-10-31T18:33:29.000Z | 2020-02-15T03:57:13.000Z | #!/usr/bin/env python
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
"""\
Usage: 1. __SCRIPTNAME__ checkout VIEWSPEC-FILE TARGET-DIR
2. __SCRIPTNAME__ examine VIEWSPEC-FILE
3. __SCRIPTNAME__ help
4. __SCRIPTNAME__ help-format
VIEWSPEC-FILE is the path of a file whose contents describe a
Subversion sparse checkouts layout, or '-' if that description should
be read from stdin. TARGET-DIR is the working copy directory created
by this script as it checks out the specified layout.
1. Parse VIEWSPEC-FILE and execute the necessary 'svn' command-line
operations to build out a working copy tree at TARGET-DIR.
2. Parse VIEWSPEC-FILE and dump out a human-readable representation of
the tree described in the specification.
3. Show this usage message.
4. Show information about the file format this program expects.
"""
FORMAT_HELP = """\
Viewspec File Format
====================
The viewspec file format used by this tool is a collection of headers
(using the typical one-per-line name:value syntax), followed by an
empty line, followed by a set of one-per-line rules.
The headers must contain at least the following:
Format - version of the viewspec format used throughout the file
Url - base URL applied to all rules; tree checkout location
The following headers are optional:
Revision - version of the tree items to checkout
Following the headers and blank line separator are the path rules.
The rules are list of URLs -- relative to the base URL stated in the
headers -- with optional annotations to specify the desired working
copy depth of each item:
PATH/** - checkout PATH and all its children to infinite depth
PATH/* - checkout PATH and its immediate children
PATH/~ - checkout PATH and its file children
PATH - checkout PATH non-recursively
By default, the top-level directory (associated with the base URL) is
checked out with empty depth. You can override this using the special
rules '**', '*', and '~' as appropriate.
It is not necessary to explicitly list the parent directories of each
path associated with a rule. If the parent directory of a given path
is not "covered" by a previous rule, it will be checked out with empty
depth.
Examples
========
Here's a sample viewspec file:
Format: 1
Url: http://svn.apache.org/repos/asf/subversion
Revision: 36366
trunk/**
branches/1.5.x/**
branches/1.6.x/**
README
branches/1.4.x/STATUS
branches/1.4.x/subversion/tests/cmdline/~
You may wish to version your viewspec files. If so, you can use this
script in conjunction with 'svn cat' to fetch, parse, and act on a
versioned viewspec file:
$ svn cat http://svn.example.com/specs/dev-spec.txt |
__SCRIPTNAME__ checkout - /path/to/target/directory
"""
#########################################################################
### Possible future improvements that could be made:
###
### - support for excluded paths (PATH!)
### - support for static revisions of individual paths (PATH@REV/**)
###
import sys
import os
import urllib
DEPTH_EMPTY = 'empty'
DEPTH_FILES = 'files'
DEPTH_IMMEDIATES = 'immediates'
DEPTH_INFINITY = 'infinity'
class TreeNode:
"""A representation of a single node in a Subversion sparse
checkout tree."""
def __init__(self, name, depth):
self.name = name # the basename of this tree item
self.depth = depth # its depth (one of the DEPTH_* values)
self.children = {} # its children (basename -> TreeNode)
def add_child(self, child_node):
child_name = child_node.name
assert not self.children.has_key(child_node)
self.children[child_name] = child_node
def dump(self, recurse=False, indent=0):
sys.stderr.write(" " * indent)
sys.stderr.write("Path: %s (depth=%s)\n" % (self.name, self.depth))
if recurse:
child_names = self.children.keys()
child_names.sort(svn_path_compare_paths)
for child_name in child_names:
self.children[child_name].dump(recurse, indent + 2)
class SubversionViewspec:
"""A representation of a Subversion sparse checkout specification."""
def __init__(self, base_url, revision, tree):
self.base_url = base_url # base URL of the checkout
self.revision = revision # revision of the checkout (-1 == HEAD)
self.tree = tree # the top-most TreeNode item
def svn_path_compare_paths(path1, path2):
"""Compare PATH1 and PATH2 as paths, sorting depth-first-ily.
NOTE: Stolen unapologetically from Subversion's Python bindings
module svn.core."""
path1_len = len(path1);
path2_len = len(path2);
min_len = min(path1_len, path2_len)
i = 0
# Are the paths exactly the same?
if path1 == path2:
return 0
# Skip past common prefix
while (i < min_len) and (path1[i] == path2[i]):
i = i + 1
# Children of paths are greater than their parents, but less than
# greater siblings of their parents
char1 = '\0'
char2 = '\0'
if (i < path1_len):
char1 = path1[i]
if (i < path2_len):
char2 = path2[i]
if (char1 == '/') and (i == path2_len):
return 1
if (char2 == '/') and (i == path1_len):
return -1
if (i < path1_len) and (char1 == '/'):
return -1
if (i < path2_len) and (char2 == '/'):
return 1
# Common prefix was skipped above, next character is compared to
# determine order
return cmp(char1, char2)
def parse_viewspec_headers(viewspec_fp):
"""Parse the headers from the viewspec file, return them as a
dictionary mapping header names to values."""
headers = {}
while 1:
line = viewspec_fp.readline().strip()
if not line:
break
name, value = [x.strip() for x in line.split(':', 1)]
headers[name] = value
return headers
def parse_viewspec(viewspec_fp):
"""Parse the viewspec file, returning a SubversionViewspec object
that represents the specification."""
headers = parse_viewspec_headers(viewspec_fp)
format = headers['Format']
assert format == '1'
base_url = headers['Url']
revision = int(headers.get('Revision', -1))
root_depth = DEPTH_EMPTY
rules = {}
while 1:
line = viewspec_fp.readline()
if not line:
break
line = line.rstrip()
# These are special rules for the top-most dir; don't fall thru.
if line == '**':
root_depth = DEPTH_INFINITY
continue
elif line == '*':
root_depth = DEPTH_IMMEDIATES
continue
elif line == '~':
root_depth = DEPTH_FILES
continue
# These are the regular per-path rules.
elif line[-3:] == '/**':
depth = DEPTH_INFINITY
path = line[:-3]
elif line[-2:] == '/*':
depth = DEPTH_IMMEDIATES
path = line[:-2]
elif line[-2:] == '/~':
depth = DEPTH_FILES
path = line[:-2]
else:
depth = DEPTH_EMPTY
path = line
# Add our rule to the set thereof.
assert not rules.has_key(path)
rules[path] = depth
tree = TreeNode('', root_depth)
paths = rules.keys()
paths.sort(svn_path_compare_paths)
for path in paths:
depth = rules[path]
path_parts = filter(None, path.split('/'))
tree_ptr = tree
for part in path_parts[:-1]:
child_node = tree_ptr.children.get(part, None)
if not child_node:
child_node = TreeNode(part, DEPTH_EMPTY)
tree_ptr.add_child(child_node)
tree_ptr = child_node
tree_ptr.add_child(TreeNode(path_parts[-1], depth))
return SubversionViewspec(base_url, revision, tree)
def checkout_tree(base_url, revision, tree_node, target_dir, is_top=True):
"""Checkout from BASE_URL, and into TARGET_DIR, the TREE_NODE
sparse checkout item. IS_TOP is set iff this node represents the
root of the checkout tree. REVISION is the revision to checkout,
or -1 if checking out HEAD."""
depth = tree_node.depth
revision_str = ''
if revision != -1:
revision_str = "--revision=%d " % (revision)
if is_top:
os.system('svn checkout "%s" "%s" --depth=%s %s'
% (base_url, target_dir, depth, revision_str))
else:
os.system('svn update "%s" --set-depth=%s %s'
% (target_dir, depth, revision_str))
child_names = tree_node.children.keys()
child_names.sort(svn_path_compare_paths)
for child_name in child_names:
checkout_tree(base_url + '/' + child_name,
revision,
tree_node.children[child_name],
os.path.join(target_dir, urllib.unquote(child_name)),
False)
def checkout_spec(viewspec, target_dir):
"""Checkout the view specification VIEWSPEC into TARGET_DIR."""
checkout_tree(viewspec.base_url,
viewspec.revision,
viewspec.tree,
target_dir)
def usage_and_exit(errmsg=None):
stream = errmsg and sys.stderr or sys.stdout
msg = __doc__.replace("__SCRIPTNAME__", os.path.basename(sys.argv[0]))
stream.write(msg)
if errmsg:
stream.write("ERROR: %s\n" % (errmsg))
sys.exit(errmsg and 1 or 0)
def main():
argc = len(sys.argv)
if argc < 2:
usage_and_exit('Not enough arguments.')
subcommand = sys.argv[1]
if subcommand == 'help':
usage_and_exit()
elif subcommand == 'help-format':
msg = FORMAT_HELP.replace("__SCRIPTNAME__",
os.path.basename(sys.argv[0]))
sys.stdout.write(msg)
elif subcommand == 'examine':
if argc < 3:
usage_and_exit('No viewspec file specified.')
fp = (sys.argv[2] == '-') and sys.stdin or open(sys.argv[2], 'r')
viewspec = parse_viewspec(fp)
sys.stdout.write("Url: %s\n" % (viewspec.base_url))
revision = viewspec.revision
if revision != -1:
sys.stdout.write("Revision: %s\n" % (revision))
else:
sys.stdout.write("Revision: HEAD\n")
sys.stdout.write("\n")
viewspec.tree.dump(True)
elif subcommand == 'checkout':
if argc < 3:
usage_and_exit('No viewspec file specified.')
if argc < 4:
usage_and_exit('No target directory specified.')
fp = (sys.argv[2] == '-') and sys.stdin or open(sys.argv[2], 'r')
checkout_spec(parse_viewspec(fp), sys.argv[3])
else:
usage_and_exit('Unknown subcommand "%s".' % (subcommand))
if __name__ == "__main__":
main()
| 33.893372 | 75 | 0.623671 |
794460a66aa4002165f1877f12cfd5ef85e274d8 | 5,709 | py | Python | venv/Lib/site-packages/pymongo/periodic_executor.py | omar115/face-recognition-opencv | 37d3e29483e55b8ad1a440a40d82003f1052a457 | [
"MIT"
] | 2 | 2019-07-31T16:33:07.000Z | 2019-09-18T21:48:57.000Z | venv/Lib/site-packages/pymongo/periodic_executor.py | omar115/face-recognition-opencv | 37d3e29483e55b8ad1a440a40d82003f1052a457 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pymongo/periodic_executor.py | omar115/face-recognition-opencv | 37d3e29483e55b8ad1a440a40d82003f1052a457 | [
"MIT"
] | null | null | null | # Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Run a target function on a background thread."""
import atexit
import threading
import time
import weakref
from pymongo.monotonic import time as _time
class PeriodicExecutor(object):
def __init__(self, interval, min_interval, target, name=None):
""""Run a target function periodically on a background thread.
If the target's return value is false, the executor stops.
:Parameters:
- `interval`: Seconds between calls to `target`.
- `min_interval`: Minimum seconds between calls if `wake` is
called very often.
- `target`: A function.
- `name`: A name to give the underlying thread.
"""
# threading.Event and its internal condition variable are expensive
# in Python 2, see PYTHON-983. Use a boolean to know when to wake.
# The executor's design is constrained by several Python issues, see
# "periodic_executor.rst" in this repository.
self._event = False
self._interval = interval
self._min_interval = min_interval
self._target = target
self._stopped = False
self._thread = None
self._name = name
self._thread_will_exit = False
self._lock = threading.Lock()
def open(self):
"""Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
"""
with self._lock:
if self._thread_will_exit:
# If the background thread has read self._stopped as True
# there is a chance that it has not yet exited. The call to
# join should not block indefinitely because there is no
# other work done outside the while loop in self._run.
try:
self._thread.join()
except ReferenceError:
# Thread terminated.
pass
self._thread_will_exit = False
self._stopped = False
started = False
try:
started = self._thread and self._thread.is_alive()
except ReferenceError:
# Thread terminated.
pass
if not started:
thread = threading.Thread(target=self._run, name=self._name)
thread.daemon = True
self._thread = weakref.proxy(thread)
_register_executor(self)
thread.start()
def close(self, dummy=None):
"""Stop. To restart, call open().
The dummy parameter allows an executor's close method to be a weakref
callback; see monitor.py.
"""
self._stopped = True
def join(self, timeout=None):
if self._thread is not None:
try:
self._thread.join(timeout)
except (ReferenceError, RuntimeError):
# Thread already terminated, or not yet started.
pass
def wake(self):
"""Execute the target function soon."""
self._event = True
def __should_stop(self):
with self._lock:
if self._stopped:
self._thread_will_exit = True
return True
return False
def _run(self):
while not self.__should_stop():
try:
if not self._target():
self._stopped = True
break
except:
with self._lock:
self._stopped = True
self._thread_will_exit = True
raise
deadline = _time() + self._interval
while not self._stopped and _time() < deadline:
time.sleep(self._min_interval)
if self._event:
break # Early wake.
self._event = False
# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started,
# an executor is kept alive by a strong reference from its thread and perhaps
# from other objects. When the thread dies and all other referrers are freed,
# the executor is freed and removed from _EXECUTORS. If any threads are
# running when the interpreter begins to shut down, we try to halt and join
# them to avoid spurious errors.
_EXECUTORS = set()
def _register_executor(executor):
ref = weakref.ref(executor, _on_executor_deleted)
_EXECUTORS.add(ref)
def _on_executor_deleted(ref):
_EXECUTORS.remove(ref)
def _shutdown_executors():
if _EXECUTORS is None:
return
# Copy the set. Stopping threads has the side effect of removing executors.
executors = list(_EXECUTORS)
# First signal all executors to close...
for ref in executors:
executor = ref()
if executor:
executor.close()
# ...then try to join them.
for ref in executors:
executor = ref()
if executor:
executor.join(1)
executor = None
atexit.register(_shutdown_executors)
| 32.622857 | 80 | 0.592048 |
794462abfa8e4b71adb2b665bd053fe877d55d76 | 659 | py | Python | run.py | iofu728/Chinese_T-Sequence-annotation | a923abe2c8f09dbee752937d80a4c063e581d124 | [
"MIT"
] | 2 | 2019-06-26T04:43:26.000Z | 2020-04-09T06:19:01.000Z | run.py | iofu728/Chinese_T-Sequence-annotation | a923abe2c8f09dbee752937d80a4c063e581d124 | [
"MIT"
] | null | null | null | run.py | iofu728/Chinese_T-Sequence-annotation | a923abe2c8f09dbee752937d80a4c063e581d124 | [
"MIT"
] | 2 | 2020-04-09T06:19:02.000Z | 2020-12-19T13:50:52.000Z | # -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2019-06-22 23:39:03
# @Last Modified by: gunjianpan
# @Last Modified time: 2019-06-24 02:37:38
import param
from util import echo
import dataLoad
from model import Model
def main(sa_type: param.SA_TYPE, run_id: str):
''' run model '''
param.change_run_id(run_id)
echo(1, '....... Load data ......')
train, dev, test = dataLoad.load_data(sa_type)
echo(1, '....... Data load Over ......')
sa_model = Model(train, dev, test, sa_type)
echo(1, '------ Begin Train -------')
sa_model.run_model()
if __name__ == "__main__":
main(param.SA_TYPE.NER, 'NER_BILSTM_CRF')
| 25.346154 | 50 | 0.620637 |
79446478f01e8e1f00143b0210ef7a1ee7b61f14 | 2,969 | py | Python | ESN/EchoTorch-master/examples/models/NilsNet_example.py | RogerFu18/drunken-monkey | 2f8498a83105481d0d189b20407f6e3f658b1053 | [
"MIT"
] | null | null | null | ESN/EchoTorch-master/examples/models/NilsNet_example.py | RogerFu18/drunken-monkey | 2f8498a83105481d0d189b20407f6e3f658b1053 | [
"MIT"
] | null | null | null | ESN/EchoTorch-master/examples/models/NilsNet_example.py | RogerFu18/drunken-monkey | 2f8498a83105481d0d189b20407f6e3f658b1053 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# File : examples/timeserie_prediction/switch_attractor_esn
# Description : NARMA 30 prediction with ESN.
# Date : 26th of January, 2018
#
# This file is part of EchoTorch. EchoTorch is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Nils Schaetti <[email protected]>
# Imports
import torch
import echotorch.utils
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
import os
from torch.autograd import Variable
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.show()
# end imshow
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
# Create a NilsNet
nilsnet = echotorch.models.NilsNet(reservoir_dim=1000, sfa_dim=100, ica_dim=100)
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
print(inputs.size())
print(classes.size())
inputs = Variable(inputs)
classes = Variable(classes)
# Make a grid from batch
# out = torchvision.utils.make_grid(inputs)
# imshow(out, title=[class_names[x] for x in classes])
outputs = nilsnet(inputs)
print(outputs)
print(outputs.size()) | 32.271739 | 80 | 0.67969 |
794465b10e3fd2195e74d7fa3144329140bb111e | 1,354 | py | Python | tensorflow/python/ops/gradients.py | knightvishal/tensorflow | 5d3dd19b7146d954fc1b4e9e44e9881e75d363c1 | [
"Apache-2.0"
] | 4 | 2021-06-15T17:26:07.000Z | 2021-11-17T10:58:08.000Z | tensorflow/python/ops/gradients.py | knightvishal/tensorflow | 5d3dd19b7146d954fc1b4e9e44e9881e75d363c1 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/gradients.py | knightvishal/tensorflow | 5d3dd19b7146d954fc1b4e9e44e9881e75d363c1 | [
"Apache-2.0"
] | 4 | 2018-02-18T03:39:01.000Z | 2018-10-18T11:49:17.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.eager import function
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.gradients_impl import AggregationMethod
from tensorflow.python.ops.gradients_impl import gradients
from tensorflow.python.ops.gradients_impl import hessians
from tensorflow.python.ops.gradients_impl import UnconnectedGradients
# pylint: enable=unused-import
| 45.133333 | 80 | 0.768095 |
7944661400386d684c6fb2c829a64dc9ec8406e6 | 6,294 | py | Python | local_eval/eval_qnrf.py | hayko1995/JHU-CROWD-Detection | 68808b90a0af168a38743389313ea0e1d28e1a28 | [
"MIT"
] | null | null | null | local_eval/eval_qnrf.py | hayko1995/JHU-CROWD-Detection | 68808b90a0af168a38743389313ea0e1d28e1a28 | [
"MIT"
] | null | null | null | local_eval/eval_qnrf.py | hayko1995/JHU-CROWD-Detection | 68808b90a0af168a38743389313ea0e1d28e1a28 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
from scipy import spatial as ss
import pdb
import cv2
from utils import hungarian, read_pred_and_gt, AverageMeter, AverageCategoryMeter
import argparse
import scipy.io
import math
flagError = False
# id_std = [i for i in range(3110,3610,1)]
# id_std[59] = 3098
id_std = [i for i in range(1, 335, 1)]
num_classes = 1
def compute_metrics(dist_matrix, match_matrix, pred_num, gt_num, sigma, level):
for i_pred_p in range(pred_num):
pred_dist = dist_matrix[i_pred_p, :]
match_matrix[i_pred_p, :] = pred_dist <= sigma
tp, assign = hungarian(match_matrix)
fn_gt_index = np.array(np.where(assign.sum(0) == 0))[0]
tp_pred_index = np.array(np.where(assign.sum(1) == 1))[0]
tp_gt_index = np.array(np.where(assign.sum(0) == 1))[0]
fp_pred_index = np.array(np.where(assign.sum(1) == 0))[0]
level_list = level[tp_gt_index]
tp = tp_pred_index.shape[0]
fp = fp_pred_index.shape[0]
fn = fn_gt_index.shape[0]
tp_c = np.zeros([num_classes])
fn_c = np.zeros([num_classes])
for i_class in range(num_classes):
tp_c[i_class] = (level[tp_gt_index] == i_class).sum()
fn_c[i_class] = (level[fn_gt_index] == i_class).sum()
return tp, fp, fn, tp_c, fn_c
def main(gt_file, pred_file, index):
cnt_errors = {'mae': AverageMeter(), 'mse': AverageMeter(), 'nae': AverageMeter(), }
metrics_s = {'tp': AverageMeter(), 'fp': AverageMeter(), 'fn': AverageMeter(),
'tp_c': AverageCategoryMeter(num_classes), 'fn_c': AverageCategoryMeter(num_classes)}
metrics_l = {'tp': AverageMeter(), 'fp': AverageMeter(), 'fn': AverageMeter(),
'tp_c': AverageCategoryMeter(num_classes), 'fn_c': AverageCategoryMeter(num_classes)}
pred_data, gt_data = read_pred_and_gt(pred_file, gt_file)
for i_sample in id_std:
# init
gt_p, pred_p, fn_gt_index, tp_pred_index, fp_pred_index = [], [], [], [], []
tp_s, fp_s, fn_s, tp_l, fp_l, fn_l = [0, 0, 0, 0, 0, 0]
tp_c_l = np.zeros([num_classes])
fn_c_l = np.zeros([num_classes])
if gt_data[i_sample]['num'] == 0 and pred_data[i_sample]['num'] != 0:
pred_p = pred_data[i_sample]['points']
fp_pred_index = np.array(range(pred_p.shape[0]))
fp_s = fp_pred_index.shape[0]
fp_l = fp_pred_index.shape[0]
if pred_data[i_sample]['num'] == 0 and gt_data[i_sample]['num'] != 0:
gt_p = gt_data[i_sample]['points']
level = gt_data[i_sample]['level']
fn_gt_index = np.array(range(gt_p.shape[0]))
fn_s = fn_gt_index.shape[0]
fn_l = fn_gt_index.shape[0]
for i_class in range(num_classes):
fn_c_l[i_class] = (level[fn_gt_index] == i_class).sum()
if gt_data[i_sample]['num'] != 0 and pred_data[i_sample]['num'] != 0:
pred_p = pred_data[i_sample]['points']
gt_p = gt_data[i_sample]['points']
sigma_l = gt_data[i_sample]['sigma'][:, 1]
level = gt_data[i_sample]['level']
# dist
dist_matrix = ss.distance_matrix(pred_p, gt_p, p=2)
match_matrix = np.zeros(dist_matrix.shape, dtype=bool)
# sigma_s and sigma_l
tp_l, fp_l, fn_l, tp_c_l, fn_c_l = compute_metrics(dist_matrix, match_matrix, pred_p.shape[0],
gt_p.shape[0], sigma_l, level)
metrics_l['tp'].update(tp_l)
metrics_l['fp'].update(fp_l)
metrics_l['fn'].update(fn_l)
metrics_l['tp_c'].update(tp_c_l)
metrics_l['fn_c'].update(fn_c_l)
gt_count, pred_cnt = gt_data[i_sample]['num'], pred_data[i_sample]['num']
s_mae = abs(gt_count - pred_cnt)
s_mse = (gt_count - pred_cnt) * (gt_count - pred_cnt)
cnt_errors['mae'].update(s_mae)
cnt_errors['mse'].update(s_mse)
if gt_count != 0:
s_nae = abs(gt_count - pred_cnt) / gt_count
cnt_errors['nae'].update(s_nae)
ap_l = metrics_l['tp'].sum / (metrics_l['tp'].sum + metrics_l['fp'].sum + 1e-20)
ar_l = metrics_l['tp'].sum / (metrics_l['tp'].sum + metrics_l['fn'].sum + 1e-20)
f1m_l = 2 * ap_l * ar_l / (ap_l + ar_l)
ar_c_l = metrics_l['tp_c'].sum / (metrics_l['tp_c'].sum + metrics_l['fn_c'].sum + 1e-20)
print('-----Localization performance----- ', index)
print('AP_large: ' + str(ap_l))
print('AR_large: ' + str(ar_l))
print('F1m_large: ' + str(f1m_l))
return ap_l, ar_l, f1m_l
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='FIDTM')
parser.add_argument('--data_path', default= '../jhu_crowd_v2.0', type=str, help='the QNRF data path')
args = parser.parse_args()
print( args.data_path)
img_test_path = args.data_path + '/test/'
gt_test_path = args.data_path + '/test/'
img_test = []
gt_test = []
for file_name in os.listdir(img_test_path):
if file_name.split('.')[1] == 'jpg':
img_test.append(img_test_path + file_name)
for file_name in os.listdir(gt_test_path):
if file_name.split('.')[1] == 'mat':
gt_test.append(file_name)
img_test.sort()
gt_test.sort()
ap_l_list = []
ar_l_list = []
f1m_l_list = []
for i in range(1, 101):
print("start process sigma = ", i)
f = open('./point_files/qnrf_gt.txt', 'w+')
k = 0
for img_path in img_test:
Gt_data = scipy.io.loadmat(gt_test_path + gt_test[k])
Gt_data = Gt_data['annPoints']
f.write('{} {} '.format(k + 1, len(Gt_data)))
''' fixed sigma'''
sigma_s = 4
sigma_l = i
for data in Gt_data:
f.write('{} {} {} {} {} '.format(math.floor(data[0]), math.floor(data[1]), sigma_s, sigma_l, 1))
f.write('\n')
k = k + 1
f.close()
gt_file = './point_files/qnrf_gt.txt'
pred_file = './point_files/qnrf_pred_fidt.txt'
ap_l, ar_l, f1m_l = main(gt_file, pred_file, i)
ap_l_list.append(ap_l)
ar_l_list.append(ar_l)
f1m_l_list.append(f1m_l)
print(np.mean(ap_l_list), np.mean(ar_l_list), np.mean(f1m_l_list))
| 36.381503 | 112 | 0.592628 |
7944667e94e030270c75291bd4fc903be626d052 | 711 | py | Python | covigator/pipeline/runner.py | TRON-Bioinformatics/covigator | 59cd5012217cb043d97c77ce5273d8930e74390d | [
"MIT"
] | 7 | 2021-07-23T14:09:51.000Z | 2022-01-26T20:26:27.000Z | covigator/pipeline/runner.py | TRON-Bioinformatics/covigator | 59cd5012217cb043d97c77ce5273d8930e74390d | [
"MIT"
] | 2 | 2021-07-27T08:30:22.000Z | 2022-02-22T20:06:05.000Z | covigator/pipeline/runner.py | TRON-Bioinformatics/covigator | 59cd5012217cb043d97c77ce5273d8930e74390d | [
"MIT"
] | null | null | null | from logzero import logger
import time
import subprocess
from covigator.exceptions import CovigatorPipelineError
def run_command(command, temporary_folder):
start = time.time()
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=temporary_folder, shell=True)
stdoutdata, stderrdata = p.communicate()
logger.info("Finished in {} secs command: '{}'".format(time.time() - start, command))
if p.returncode != 0:
error_message = decode(stderrdata)
logger.error(error_message)
raise CovigatorPipelineError("Error executing pipeline command: {}\n{}".format(command, error_message))
def decode(data):
return data.decode("utf8")
| 33.857143 | 111 | 0.721519 |
79446691d93d15e19453033b6272c2e11e593dd8 | 3,000 | py | Python | helper/decorators.py | growsimplee/django-helper | df9e4f80dfe04a18b1a17f9cd4a69a58a2e9b997 | [
"MIT"
] | null | null | null | helper/decorators.py | growsimplee/django-helper | df9e4f80dfe04a18b1a17f9cd4a69a58a2e9b997 | [
"MIT"
] | null | null | null | helper/decorators.py | growsimplee/django-helper | df9e4f80dfe04a18b1a17f9cd4a69a58a2e9b997 | [
"MIT"
] | null | null | null | import traceback
import time
import logging
from .settings import ERROR_QUEUE, SERVICE_NAME, AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY, AWS_REGION, ERROR_CODES
import boto3
import json
import json
from functools import lru_cache,wraps
logger = logging.getLogger("django")
def base_handler(func,error_handler,completion_handler):
def execution(*args,**kwargs):
start = time.time()
try:
result = func(*args, **kwargs)
end = time.time()
completion_handler(end,start)
except Exception as e :
result = error_handler(traceback.format_exc(),e,args,kwargs)
return result
return execution
def gs_task(task_id):
def error_handler(traceback, exception,args,kwargs):
logger.error(f"error in {task_id} with input {args},{kwargs} -> {exception} | traceback {traceback}")
sqs = boto3.resource('sqs', region_name=AWS_REGION, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY )
queue2 = sqs.get_queue_by_name(QueueName=ERROR_QUEUE)
data = {
"service" : SERVICE_NAME,
"task" : task_id,
"time": str(time.time()),
"exception" : str(exception),
"traceback" : traceback,
"input": f"{args},{kwargs}"
}
response = queue2.send_message(MessageBody=json.dumps(data),MessageGroupId=SERVICE_NAME, MessageAttributes={'errorType': {"StringValue":f"{SERVICE_NAME}","DataType":"String"}})
return {"success":False,"exception" : str(exception), "code": ERROR_CODES.get(type(exception).__name__,50000)}
def completion_handler(end,start):
logger.info(f"{task_id} execution completed in {(end-start)*1000} ms")
def base_wrapper(func):
return base_handler(func,error_handler,completion_handler)
return base_wrapper
def simple_gs_cache(maxsize):
def hashable_lru(func):
cache = lru_cache(maxsize=maxsize)
def deserialise(value):
try:
return json.loads(value)
except Exception:
return value
def func_with_serialized_params(*args, **kwargs):
_args = tuple([deserialise(arg) for arg in args])
_kwargs = {k: deserialise(v) for k, v in kwargs.items()}
return func(*_args, **_kwargs)
cached_function = cache(func_with_serialized_params)
@wraps(func)
def lru_decorator(*args, **kwargs):
_args = tuple([json.dumps(arg, sort_keys=True) if type(arg) in (list, dict) else arg for arg in args])
_kwargs = {k: json.dumps(v, sort_keys=True) if type(v) in (list, dict) else v for k, v in kwargs.items()}
return cached_function(*_args, **_kwargs)
lru_decorator.cache_info = cached_function.cache_info
lru_decorator.cache_clear = cached_function.cache_clear
return lru_decorator
return hashable_lru
| 38.961039 | 185 | 0.644 |
794467ea5227d786240a4dc2c21fda99810bd1c3 | 1,162 | py | Python | bpcs/bpcs_steg_decode.py | BburnN123/bpcs | f53caede7e202ce07b51890f028b9caf73a22937 | [
"MIT"
] | 20 | 2017-04-25T21:07:24.000Z | 2022-03-30T11:11:47.000Z | bpcs/bpcs_steg_decode.py | BburnN123/bpcs | f53caede7e202ce07b51890f028b9caf73a22937 | [
"MIT"
] | 4 | 2016-04-06T01:19:27.000Z | 2020-09-26T18:38:29.000Z | bpcs/bpcs_steg_decode.py | BburnN123/bpcs | f53caede7e202ce07b51890f028b9caf73a22937 | [
"MIT"
] | 12 | 2017-04-02T23:10:46.000Z | 2022-03-21T03:43:55.000Z | import numpy as np
from .logger import log
from .array_grid import get_next_grid_dims
from .act_on_image import ActOnImage
from .array_message import write_conjugated_message_grids
from .bpcs_steg import arr_bpcs_complexity
def remove_message_from_vessel(arr, alpha, grid_size):
messages = []
nfound, nkept, nleft = 0, 0, 0
complexities = []
for dims in get_next_grid_dims(arr, grid_size):
nfound += 1
grid = arr[tuple(dims)]
cmplx = arr_bpcs_complexity(grid)
if cmplx < alpha:
nleft += 1
continue
complexities.append(cmplx)
nkept += 1
messages.append(grid)
assert nfound == nkept + nleft
log.critical('Found {0} out of {1} grids with complexity above {2}'.format(nkept, nfound, alpha))
return messages
class BPCSDecodeImage(ActOnImage):
def modify(self, alpha):
return remove_message_from_vessel(self.arr, alpha, (8,8))
def decode(infile, outfile, alpha=0.45):
x = BPCSDecodeImage(infile, as_rgb=True, bitplane=True, gray=True, nbits_per_layer=8)
grids = x.modify(alpha)
write_conjugated_message_grids(outfile, grids, alpha)
| 33.2 | 101 | 0.692771 |
794468dd8c489310c851f3e4ee60453853255174 | 1,908 | py | Python | qa_tool/tests/test_algorithm.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 12 | 2017-08-30T18:21:00.000Z | 2021-12-09T04:04:17.000Z | qa_tool/tests/test_algorithm.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 424 | 2017-08-08T18:32:14.000Z | 2022-03-30T21:42:51.000Z | qa_tool/tests/test_algorithm.py | pg-irc/pathways-backend | 05a8c4e750523d2d081b030a248c5444d1ed7992 | [
"BSD-3-Clause"
] | 7 | 2017-09-29T21:14:37.000Z | 2019-12-30T21:07:37.000Z | from rest_framework import test as rest_test
from rest_framework import status
from qa_tool.tests.helpers import AlgorithmBuilder
from common.testhelpers.random_test_values import a_string
class AlgorithmsTests(rest_test.APITestCase):
def setUp(self):
self.data = {
'id': 'The id',
'name': 'The name',
'url': 'http://www.example.org',
'notes': 'The notes'
}
def test_can_get_entities(self):
AlgorithmBuilder().with_name(a_string()).create()
AlgorithmBuilder().with_name(a_string()).create()
url = '/qa/v1/algorithms/'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 2)
def test_can_get_one_entity(self):
algorithm_notes = a_string()
algorithm = AlgorithmBuilder().with_notes(algorithm_notes).create()
url = '/qa/v1/algorithms/{0}/'.format(algorithm.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()['notes'], algorithm_notes)
def test_cannot_post(self):
url = '/qa/v1/algorithms/'
response = self.client.post(url, self.data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_cannot_put(self):
algorithm = AlgorithmBuilder().create()
url = '/qa/v1/algorithms/{0}/'.format(algorithm.pk)
response = self.client.put(url, self.data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_cannot_delete(self):
algorithm = AlgorithmBuilder().create()
url = '/qa/v1/algorithms/{0}/'.format(algorithm.pk)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
| 39.75 | 82 | 0.67348 |
79446a9ddf397bc0da1ebfe5b49ce13fd498bbbc | 596 | py | Python | app/grandchallenge/cases/migrations/0006_rawimageuploadsession_user_uploads.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 7 | 2016-11-05T07:16:30.000Z | 2017-11-23T03:38:03.000Z | app/grandchallenge/cases/migrations/0006_rawimageuploadsession_user_uploads.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 113 | 2015-05-26T09:27:59.000Z | 2018-03-21T10:45:56.000Z | app/grandchallenge/cases/migrations/0006_rawimageuploadsession_user_uploads.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 7 | 2015-07-16T20:11:22.000Z | 2017-06-06T02:41:24.000Z | # Generated by Django 3.1.13 on 2021-10-13 09:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("uploads", "0004_auto_20211012_0957"),
("cases", "0005_rawimageuploadsession_import_result"),
]
operations = [
migrations.AddField(
model_name="rawimageuploadsession",
name="user_uploads",
field=models.ManyToManyField(
blank=True,
related_name="image_upload_sessions",
to="uploads.UserUpload",
),
)
]
| 25.913043 | 62 | 0.588926 |
79446ac1e5e04c50de25db8fb4c96ef4c81d994b | 245 | py | Python | cms/templatetags/js.py | eduncan911/django_cms | 66c27f059ca0779157a7c3cc2e007d8090f10351 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T20:00:40.000Z | 2017-04-27T20:00:40.000Z | cms/templatetags/js.py | eduncan911/django_cms | 66c27f059ca0779157a7c3cc2e007d8090f10351 | [
"BSD-3-Clause"
] | null | null | null | cms/templatetags/js.py | eduncan911/django_cms | 66c27f059ca0779157a7c3cc2e007d8090f10351 | [
"BSD-3-Clause"
] | null | null | null | from django import template
from django.utils import simplejson
from django.core.serializers.json import DjangoJSONEncoder
register = template.Library()
@register.filter
def js(value):
return simplejson.dumps(value, cls=DjangoJSONEncoder)
| 24.5 | 58 | 0.816327 |
79446b03be4726a2b3e1713d9cce0ec0b31de0b7 | 556 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/network/eos/argspec/facts/facts.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/network/eos/argspec/facts/facts.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/network/eos/argspec/facts/facts.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | # -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The arg spec for the eos facts module.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class FactsArgs(object):
""" The arg spec for the eos facts module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
'gather_subset': dict(default=['!config'], type='list'),
'gather_network_resources': dict(type='list'),
}
| 24.173913 | 92 | 0.652878 |
79446b50e34eec198e9b3c40189ae20f503341c6 | 13,110 | py | Python | apps/equipment/models/recording_model.py | kane-zh/MES_server | d8d28768a054eee6433e3900908afd331fd92281 | [
"Apache-2.0"
] | null | null | null | apps/equipment/models/recording_model.py | kane-zh/MES_server | d8d28768a054eee6433e3900908afd331fd92281 | [
"Apache-2.0"
] | null | null | null | apps/equipment/models/recording_model.py | kane-zh/MES_server | d8d28768a054eee6433e3900908afd331fd92281 | [
"Apache-2.0"
] | null | null | null | from .basicinfor_model import *
class PartsUseRecordModel(models.Model):
"""
配件消耗记录
"""
STATUS = (
("新建", "新建"),
("审核中", "审核中"),
("完成", "完成"),
("作废", "作废"),
)
id = models.AutoField(primary_key=True, unique=True)
name = models.CharField(max_length=32, name="name",null=True, blank=True, verbose_name="名称", help_text="配件消耗记录名称(建议唯一)")
code = models.CharField(max_length=32, unique=True, name="code", verbose_name="编码",
help_text="配件消耗记录编码(必须唯一)")
state = models.CharField(max_length=16, choices=STATUS, default="新建", name="state", verbose_name="状态",
help_text="配件消耗记录的使用状态")
file = models.ManyToManyField(EquipmentFileModel, blank=True, name="file", verbose_name="文件",
help_text="当前配件消耗记录的文件信息")
parts = models.ForeignKey(PartsInforDefinitionModel, name="parts", on_delete=models.CASCADE,verbose_name="备品",
help_text="当前消耗项对应的备品")
sum = models.IntegerField( name="sum", verbose_name="使用数量",help_text="当前消耗数量")
dataTime = models.DateTimeField(name="dataTime", null=True, blank=True,verbose_name="使用时间",help_text="当前配件消耗的时间")
handler = models.CharField(max_length=32, name="handler", null=True, blank=True, verbose_name="操作者",
help_text="进行操作的人员是")
attribute1 = models.CharField(max_length=32, null=True, blank=True, name="attribute1", verbose_name="属性1", help_text="当前班组的属性1")
attribute2 = models.CharField(max_length=32, null=True, blank=True, name="attribute2", verbose_name="属性2", help_text="当前班组的属性2")
attribute3 = models.CharField(max_length=32, null=True, blank=True, name="attribute3", verbose_name="属性3", help_text="当前班组的属性3")
attribute4 = models.CharField(max_length=32, null=True, blank=True, name="attribute4", verbose_name="属性4", help_text="当前班组的属性4")
attribute5 = models.CharField(max_length=32, null=True, blank=True, name="attribute5", verbose_name="属性5", help_text="当前班组的属性5")
desc = models.TextField(null=True, blank=True, name="desc", verbose_name="备注",
help_text="当前信息未列出的字段项,可以在此字段描述.每一项用;隔开")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间", help_text="当前信息创建的时间,后台会自动填充此字段")
update_time = models.DateTimeField(auto_now=True, verbose_name="更新时间", help_text="当前信息最后的更新时间,后台会自动填充此字段")
create_user = models.CharField(max_length=32, name="create_user", verbose_name="创建账号", help_text="创建当前信息的账号名称")
auditor = models.CharField(max_length=32, name="auditor", verbose_name="审核账号", help_text="可对当前信息进行审核的账号名称")
alter = models.ManyToManyField(EquipmentAlterRecordModel, blank=True, name="alter", verbose_name="审核记录",
help_text="当前信息的审核记录")
def __str__(self):
return self.code
class Meta:
db_table = "PartsUseRecordModel"
app_label = 'equipment'
verbose_name = "设备管理-配件消耗记录"
verbose_name_plural = verbose_name
permissions = {("read_partsuserecordmodel", u"Can read 设备管理-配件消耗记录"),
("admin_partsuserecordmodel", u"Can admin 设备管理-配件消耗记录")}
class MaintainRecordItemModel(models.Model):
"""
维护记录子项
"""
id = models.AutoField(primary_key=True, unique=True)
compoment = models.CharField(max_length=32, name="compoment", verbose_name="部件",help_text="进行此次维护消耗的部件")
method = models.TextField(null=True, blank=True, name="method", verbose_name="维护方法", help_text="维护方法")
result = models.CharField(max_length=32, name="result",null=True, blank=True, verbose_name="维护结果", help_text="进行此次维护消耗的结果")
attribute1 = models.CharField(max_length=32, null=True, blank=True, name="attribute1", verbose_name="属性1", help_text="当前班组的属性1")
attribute2 = models.CharField(max_length=32, null=True, blank=True, name="attribute2", verbose_name="属性2", help_text="当前班组的属性2")
attribute3 = models.CharField(max_length=32, null=True, blank=True, name="attribute3", verbose_name="属性3", help_text="当前班组的属性3")
attribute4 = models.CharField(max_length=32, null=True, blank=True, name="attribute4", verbose_name="属性4", help_text="当前班组的属性4")
attribute5 = models.CharField(max_length=32, null=True, blank=True, name="attribute5", verbose_name="属性5", help_text="当前班组的属性5")
file = models.ManyToManyField(EquipmentFileModel, blank=True, name="file", verbose_name="文件", help_text="当前维护记录子项的文件信息")
image = models.ManyToManyField(EquipmentImageModel, blank=True, name="image", verbose_name="照片",help_text="当前维护记录子项的照片信息")
desc = models.TextField(null=True, blank=True, name="desc", verbose_name="备注", help_text="当前信息未列出的字段项,可以在此字段描述.每一项用;隔开")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间",help_text="当前信息创建的时间,后台会自动填充此字段")
update_time = models.DateTimeField(auto_now=True, verbose_name="更新时间", help_text="当前信息最后的更新时间")
create_user = models.CharField(max_length=32, name="create_user",verbose_name="创建账号",help_text="创建当前信息的账号名称")
def __str__(self):
return str(self.id)
class Meta:
db_table = "MaintainRecordItemModel"
app_label = 'equipment'
verbose_name = "设备管理-维护记录子项"
verbose_name_plural = verbose_name
class MaintainRecordModel(models.Model):
"""
维护信息记录
"""
STATUS = (
("新建", "新建"),
("审核中", "审核中"),
("完成", "完成"),
("作废", "作废"),
)
id = models.AutoField(primary_key=True, unique=True)
name = models.CharField(max_length=32, name="name",null=True, blank=True, verbose_name="名称", help_text="维护记录名称(建议唯一)")
code = models.CharField(max_length=32, name="code",null=True, blank=True, verbose_name="编码",
help_text="维护记录编码(与类型联合唯一)")
type = models.ForeignKey(MaintainRecordTypeDefinitionModel, on_delete=models.CASCADE,
name="type",related_name="maintainRecordType_item", verbose_name="类型", help_text="当前维护的维护类型")
state = models.CharField(max_length=16, choices=STATUS, default="新建", name="state", verbose_name="状态",
help_text="设备维护信息的使用状态")
child = models.ManyToManyField(MaintainRecordItemModel, blank=True, name="child", verbose_name="维护记录子项",
help_text="当前检验记录子项")
attribute1 = models.CharField(max_length=32, null=True, blank=True, name="attribute1", verbose_name="属性1", help_text="当前维护记录附加的属性1")
attribute2 = models.CharField(max_length=32, null=True, blank=True, name="attribute2", verbose_name="属性2", help_text="当前维护记录附加的属性2")
attribute3 = models.CharField(max_length=32, null=True, blank=True, name="attribute3", verbose_name="属性3", help_text="当前维护记录附加的属性3")
attribute4 = models.CharField(max_length=32, null=True, blank=True, name="attribute4", verbose_name="属性4", help_text="当前维护记录附加的属性4")
attribute5 = models.CharField(max_length=32, null=True, blank=True, name="attribute5", verbose_name="属性5", help_text="当前维护记录附加的属性5")
file = models.ManyToManyField(EquipmentFileModel, blank=True, name="file", verbose_name="文件", help_text="当前维护记录的文件信息")
image = models.ManyToManyField(EquipmentImageModel, blank=True, name="image", verbose_name="照片",help_text="当前维护记录的照片信息")
equipment = models.ForeignKey(EquipmentAccountModel, name="equipment", on_delete=models.CASCADE,verbose_name="设备",
related_name="equipment_maintainRecord", help_text="当前维修记录对应的设备")
dataTime = models.DateTimeField(name="dataTime", null=True, blank=True,verbose_name="维修时间",help_text="当前设备维修的时间")
handler = models.CharField(max_length=32, name="handler", null=True, blank=True, verbose_name="操作者", help_text="进行维修的人员是")
time_consuming=models.CharField(max_length=32, name="time_consuming", null=True, blank=True, verbose_name="维护耗时", help_text="进行此次维护消耗的时间")
parts_use=models.ManyToManyField(PartsUseRecordModel, blank=True, name="parts_use", verbose_name="备品消耗",
related_name="partsUserRecord_maintainRecord",help_text="进行当前维护消耗的备品记录")
result = models.CharField(max_length=32, name="result",null=True, blank=True, verbose_name="维护结果", help_text="进行此次维护消耗的结果")
desc = models.TextField(null=True, blank=True, name="desc", verbose_name="备注",help_text="当前信息未列出的字段项,可以在此字段描述.每一项用;隔开")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间", help_text="当前信息创建的时间,后台会自动填充此字段")
update_time = models.DateTimeField(auto_now=True, verbose_name="更新时间", help_text="当前信息最后的更新时间,后台会自动填充此字段")
create_user = models.CharField(max_length=32, name="create_user", verbose_name="创建账号", help_text="创建当前信息的账号名称")
auditor = models.CharField(max_length=32, name="auditor", verbose_name="审核账号", help_text="可对当前信息进行审核的账号名称")
alter = models.ManyToManyField(EquipmentAlterRecordModel, blank=True, name="alter", verbose_name="审核记录",
help_text="当前信息的审核记录")
def __str__(self):
return self.code
class Meta:
db_table = "MaintainRecordModel"
app_label = 'equipment'
verbose_name = "设备管理-维护信息记录"
verbose_name_plural = verbose_name
permissions = {("read_maintainrecordmodel", u"Can read 设备管理-维护信息记录"),
("admin_maintainrecordmodel", u"Can admin 设备管理-维护信息记录")}
class EquipmentStateModel(models.Model):
"""
设备状态信息
"""
id = models.AutoField(primary_key=True, unique=True)
name = models.CharField(max_length=32, name="name",null=True, blank=True, verbose_name="名称", help_text="设备状态信息名称(建议唯一)")
code = models.CharField(max_length=32, unique=True, name="code", verbose_name="编码",
help_text="设备状态信息编码(必须唯一)")
equipment = models.ForeignKey(EquipmentAccountModel, name="equipment", on_delete=models.CASCADE,verbose_name="设备",help_text="当前记录下的设备")
state = models.CharField(max_length=32,null=True, blank=True, name="type", verbose_name="状态类型", help_text="当前记录下设备的状态类型")
runTime = models.CharField(max_length=32, null=True, blank=True, name="runTime", verbose_name="运行时长", help_text="当前记录下设备的运行时长")
allTime = models.CharField(max_length=32, null=True, blank=True, name="allTime", verbose_name="总时长",help_text="当前记录下设备的总开机时长")
sum = models.CharField( max_length=32, null=True, blank=True, name="sum",verbose_name="完成数量",help_text="当前完成数量")
task = models.CharField(max_length=32, null=True, blank=True, name="task", verbose_name="任务号",help_text="当前加工的任务号")
util_rate = models.CharField(max_length=32,null=True, blank=True, name="util_rate", verbose_name="稼动率",help_text="当前设备稼动率")
comp_rate = models.CharField(max_length=32,null=True, blank=True, name="comp_rate", verbose_name="完成率", help_text="当前设备完成率")
handler = models.CharField(max_length=32, null=True, blank=True, name="handler",verbose_name="操作者",help_text="进行操作的人员是")
attribute1 = models.CharField(max_length=32, null=True, blank=True, name="attribute1", verbose_name="属性1", help_text="当前设备状态附加的属性1")
attribute2 = models.CharField(max_length=32, null=True, blank=True, name="attribute2", verbose_name="属性2", help_text="当前设备状态附加的属性2")
attribute3 = models.CharField(max_length=32, null=True, blank=True, name="attribute3", verbose_name="属性3", help_text="当前设备状态附加的属性3")
attribute4 = models.CharField(max_length=32, null=True, blank=True, name="attribute4", verbose_name="属性4", help_text="当前设备状态附加的属性4")
attribute5 = models.CharField(max_length=32, null=True, blank=True, name="attribute5", verbose_name="属性5", help_text="当前设备状态附加的属性5")
attribute6 = models.CharField(max_length=32, null=True, blank=True, name="attribute6", verbose_name="属性6", help_text="当前设备状态附加的属性6")
attribute7 = models.CharField(max_length=32, null=True, blank=True, name="attribute7", verbose_name="属性7", help_text="当前设备状态附加的属性7")
attribute8 = models.CharField(max_length=32, null=True, blank=True, name="attribute8", verbose_name="属性8", help_text="当前设备状态附加的属性8")
attribute9 = models.CharField(max_length=32, null=True, blank=True, name="attribute9", verbose_name="属性9", help_text="当前设备状态附加的属性9")
attribute10 = models.CharField(max_length=32, null=True, blank=True, name="attribute10", verbose_name="属性10", help_text="当前设备状态附加的属性10")
desc = models.TextField(null=True, blank=True, name="desc", verbose_name="备注", help_text="当前信息未列出的字段项,可以在此字段描述.每一项用;隔开")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间", help_text="当前信息创建的时间,后台会自动填充此字段")
update_time = models.DateTimeField(auto_now=True, verbose_name="更新时间", help_text="当前信息最后的更新时间,后台会自动填充此字段")
create_user = models.CharField(max_length=32, name="create_user", verbose_name="创建账号", help_text="创建当前信息的账号名称")
def __str__(self):
return self.code
class Meta:
db_table = "EquipmentStateModel"
app_label = 'equipment'
verbose_name = "设备管理-设备状态信息"
verbose_name_plural = verbose_name
permissions = {("read_equipmentstatemodel", u"Can read 设备管理-设备状态信息"),
("admin_equipmentstatemodel", u"Can admin 设备管理-设备状态信息")}
| 72.430939 | 143 | 0.704729 |
79446b65ed745f556180326f05ac779f8310e40c | 638 | py | Python | src/compas/datastructures/volmesh/bbox.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | src/compas/datastructures/volmesh/bbox.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | src/compas/datastructures/volmesh/bbox.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.geometry import bounding_box
__all__ = [
'volmesh_bounding_box',
]
def volmesh_bounding_box(volmesh):
"""Compute the (axis aligned) bounding box of a volmesh.
Parameters
----------
volmesh : :class:`compas.datastructures.VolMesh`
The volmesh data structure.
Returns
-------
list[[float, float, float]]
The 8 corner points of the bounding box.
"""
xyz = volmesh.vertices_attributes('xyz', vertices=list(volmesh.vertices()))
return bounding_box(xyz)
| 21.266667 | 79 | 0.69279 |
79446b85365ac00f9d70d829413085387b43bff9 | 3,682 | py | Python | tests/preprocessing/test_clean.py | krisjb/MHDataLearn | 59dcd4a6c3945b4ae6d6777834cbc801cbbf3dab | [
"MIT"
] | null | null | null | tests/preprocessing/test_clean.py | krisjb/MHDataLearn | 59dcd4a6c3945b4ae6d6777834cbc801cbbf3dab | [
"MIT"
] | null | null | null | tests/preprocessing/test_clean.py | krisjb/MHDataLearn | 59dcd4a6c3945b4ae6d6777834cbc801cbbf3dab | [
"MIT"
] | 1 | 2022-01-26T13:23:47.000Z | 2022-01-26T13:23:47.000Z | import pandas as pd
# import numpy as np
# from datetime import timedelta
# from datetime import datetime
from MHDataLearn.preprocessing.clean import age_check, \
gender_replace, \
marital_replace, \
accom_replace, \
employ_replace, \
mhclass_replace
def test_gender_replace():
test_df = pd.read_csv('https://raw.githubusercontent.com/krisjb/'
'MHDataLearn/main/Data/DummyData.csv')
test_df = gender_replace(test_df)
gender_uniquevalues = test_df["Gender"].nunique(dropna=True)
assert gender_uniquevalues == 3
def test_marital_replace():
test_df = pd.read_csv('https://raw.githubusercontent.com/krisjb/'
'MHDataLearn/main/Data/DummyData.csv')
test_df = marital_replace(test_df)
marital_uniquevalues = test_df["MaritalStatus"].nunique(dropna=True)
assert marital_uniquevalues == 2
def test_accom_replace():
test_df = pd.read_csv('https://raw.githubusercontent.com/krisjb/'
'MHDataLearn/main/Data/DummyData.csv')
test_df = accom_replace(test_df)
accom_uniquevalues = test_df["SettledAccommodationInd"].nunique(dropna=True)
assert accom_uniquevalues == 2
def test_employ_replace():
test_df = pd.read_csv('https://raw.githubusercontent.com/krisjb/'
'MHDataLearn/main/Data/DummyData.csv')
test_df = employ_replace(test_df)
employ_uniquevalues = test_df["EmployStatus"].nunique(dropna=True)
assert employ_uniquevalues == 2
def test_mhclass_replace():
test_df = pd.read_csv('https://raw.githubusercontent.com/krisjb/'
'MHDataLearn/main/Data/DummyData.csv')
test_df = mhclass_replace(test_df)
mhclass_uniquevalues = test_df["MHCareClusterSuperClass"].nunique(dropna=True)
assert mhclass_uniquevalues == 4
# class TestClean(unittest.TestCase):
# """
# This class provides test for all the functions inside the clean module.
# """
# def test_age_check1(self):
# """
# This is a test for the function age_check
# """
# unique_age = df["age_admit"].unique(dropna=False)
# self.assertGreaterEqual(a in unique_age, 16)
# def test_age_check2(self):
# """
# This is a test for the function age_check
# """
# unique_age = df["age_admit"].unique(dropna=False)
# self.assertLessEqual(a in unique_age, 110)
# def test_gender_replace(self):
# """
# This is a test for the function gender_replace
# """
# gender_uniquevalues = df["Gender"].nunique(dropna=True)
# self.assertEqual(gender_uniquevalues, 3)
# def test_marital_replace(self):
# """
# This is a test for the function marital_replace
# """
# marital_uniquevalues = df["MaritalStatus"].nunique(dropna=True)
# self.assertEqual(marital_uniquevalues, 2)
# def test_accom_replace(self):
# """
# This is a test for the function accom_replace
# """
# accom_uniquevalues = df["SettledAccommodationInd"].nunique(dropna=True)
# self.assertEqual(accom_uniquevalues, 2)
# def test_employ_replace(self):
# """
# This is a test for the function employ_replace
# """
# employ_uniquevalues = df["EmployStatus"].nunique(dropna=True)
# self.assertEqual(employ_uniquevalues, 2) | 34.092593 | 82 | 0.609451 |
79446c04ca26a30b47e50b8708212b758e762a23 | 790 | py | Python | pydbus_manager/publication.py | Seebz/pydbus-manager | 6b576b969cbda50521dca62a7df929167207f9fc | [
"WTFPL"
] | 2 | 2018-07-22T09:13:47.000Z | 2019-05-06T20:00:35.000Z | pydbus_manager/publication.py | Seebz/pydbus-manager | 6b576b969cbda50521dca62a7df929167207f9fc | [
"WTFPL"
] | 1 | 2019-07-16T06:56:42.000Z | 2019-07-16T21:35:19.000Z | pydbus_manager/publication.py | Seebz/pydbus-manager | 6b576b969cbda50521dca62a7df929167207f9fc | [
"WTFPL"
] | 1 | 2018-07-18T06:10:28.000Z | 2018-07-18T06:10:28.000Z | # -*- coding: utf-8 -*-
NODE_INFO = """
<node>
<interface name="org.freedesktop.DBus.ObjectManager">
<method name="GetManagedObjects">
<arg type="a{oa{sa{sv}}}" name="object_paths_interfaces_and_properties" direction="out"/>
</method>
<signal name="InterfacesAdded">
<arg type="o" name="object_path"/>
<arg type="a{sa{sv}}" name="interfaces_and_properties"/>
</signal>
<signal name="InterfacesRemoved">
<arg type="o" name="object_path"/>
<arg type="as" name="interfaces"/>
</signal>
</interface>
</node>
"""
class PublicationMixin(object):
__slots__ = ()
def publish(self):
"""Expose itself on the bus."""
bus = self.bus
bus_name = self.bus_name
path = self.path
publication = bus.publish(bus_name, (path, self, NODE_INFO))
return publication
| 24.6875 | 92 | 0.670886 |
79446cc7b0cfec6535a3e0b6cd5772eac5108595 | 4,443 | py | Python | wzk/files.py | DDDong2666/tum-adlr-ws20-02 | 2e439886e0287777589cd276d614fd03bea4ed0c | [
"MIT"
] | null | null | null | wzk/files.py | DDDong2666/tum-adlr-ws20-02 | 2e439886e0287777589cd276d614fd03bea4ed0c | [
"MIT"
] | null | null | null | wzk/files.py | DDDong2666/tum-adlr-ws20-02 | 2e439886e0287777589cd276d614fd03bea4ed0c | [
"MIT"
] | null | null | null | import os
import pickle
import shutil
import platform
import subprocess
__pickle_extension = '.pkl'
__open_cmd_dict = {'Linux': 'xdg-open',
'Darwin': 'open',
'Windows': 'start'}
def get_pythonpath():
try:
return os.environ['PYTHONPATH'].split(os.pathsep)
except KeyError:
return []
def safe_remove(file):
if os.path.exists(file):
os.remove(file)
else:
pass
def start_open(file):
open_cmd = __open_cmd_dict[platform.system()]
subprocess.Popen([f'{open_cmd} {file}'], shell=True)
def save_object2txt(obj, file_name):
if file_name[-4:] != '.txt' and '.' not in file_name:
file_name += '.txt'
with open(file_name, 'w') as f:
f.write(''.join(["%s: %s\n" % (k, v) for k, v in obj.__dict__.items()]))
def save_pickle(obj, file):
if file[-4:] != __pickle_extension:
file += __pickle_extension
with open(file, 'wb') as f:
pickle.dump(obj, f)
def load_pickle(file):
if file[-4:] != __pickle_extension:
file += __pickle_extension
with open(file, 'rb') as f:
ret_di = pickle.load(f)
return ret_di
def list_files(folder):
return [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]
def safe_create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def split_directory_file(file):
d, f = os.path.split(file)
d += '/'
return d, f
def ensure_final_slash(path):
if path[-1] != '/':
path += '/'
return path
def ensure_initial_slash(path):
if path[0] != '/':
path = '/' + path
return path
def ensure_initial_and_final_slash(path):
path = ensure_initial_slash(path=path)
path = ensure_final_slash(path=path)
return path
def point_extension_wrapper(ext):
if ext[0] != '.':
ext = '.' + ext
return ext
def ensure_file_extension(*, file, ext):
ext = point_extension_wrapper(ext)
if file[-len(ext)] != ext:
idx_dot = file.find('.')
if idx_dot != -1:
file = file[:idx_dot]
file += ext
return file
def rel2abs_path(path, abs_directory):
# abs_directory = '/Hello/HowAre/You/'
# path = 'Hello/HowAre/You/good.txt'
# path = 'good.txt'
abs_directory = ensure_initial_slash(path=abs_directory)
abs_directory = os.path.normpath(path=abs_directory)
path = ensure_initial_slash(path=path)
path = os.path.normpath(path=path)
if abs_directory in path:
return path
else:
return os.path.normpath(abs_directory + path)
def __read_head_tail(*, file, n=1, squeeze=True, head_or_tail):
s = os.popen(f"{head_or_tail} -n {n} {file}").read()
s = s.split('\n')[:-1]
if squeeze and len(s) == 1:
s = s[0]
return s
def read_head(file, n=1, squeeze=True):
return __read_head_tail(file=file, n=n, squeeze=squeeze, head_or_tail='head')
def read_tail(file, n=1, squeeze=True):
return __read_head_tail(file=file, n=n, squeeze=squeeze, head_or_tail='tail')
def copy2clipboard(file):
"""
https://apple.stackexchange.com/questions/15318/using-terminal-to-copy-a-file-to-clipboard
-> works only for mac!
"""
subprocess.run(['osascript',
'-e',
'set the clipboard to POSIX file "{}"'.format(file)])
# shutil.move("path/to/current/file.foo", "path/to/new/destination/for/file.foo")
def split_files_into_dirs(file_list, bool_fun, dir_list, base_dir=None, mode='dry'):
if base_dir is not None:
base_dir = os.path.normpath(base_dir)
else:
base_dir = ''
if file_list is None and base_dir:
file_list = os.listdir(base_dir)
print(f'Get file_list from {base_dir}')
for i, d_i in enumerate(dir_list):
d_i = os.path.normpath(d_i)
print(f"->{d_i}")
j = 0
while j < len(file_list):
f_j = file_list[j]
if bool_fun(f_j, i):
f_j = os.path.normpath(f_j)
f_j_new = f"{d_i}/{os.path.split(f_j)[-1]}"
if mode == 'wet':
shutil.move(f"{base_dir}/{f_j}", f_j_new)
print(f_j)
file_list.pop(j)
else:
j += 1
if mode != 'wet':
print()
print("'dry' mode is activated by default, to apply the changes use mode='wet')")
| 23.632979 | 94 | 0.595093 |
79446d47400281bbc70de69b1315466087e8a500 | 125 | py | Python | Practice/Regex/Introduction/Matching_Start_End.py | alexanderbauer89/HackerRank | 0fb6face083b0183692c9251ffe4bb635591393f | [
"MIT"
] | 1 | 2021-11-17T02:47:11.000Z | 2021-11-17T02:47:11.000Z | Practice/Regex/Introduction/Matching_Start_End.py | alexanderbauer89/HackerRank | 0fb6face083b0183692c9251ffe4bb635591393f | [
"MIT"
] | null | null | null | Practice/Regex/Introduction/Matching_Start_End.py | alexanderbauer89/HackerRank | 0fb6face083b0183692c9251ffe4bb635591393f | [
"MIT"
] | null | null | null | Regex_Pattern = r"^\d\w{4}[.]$" # Do not delete 'r'.
import re
print(str(bool(re.search(Regex_Pattern, input()))).lower())
| 20.833333 | 59 | 0.64 |
79446d498b339af896f2f0b52c2a3bd925aa38dc | 381 | py | Python | main/migrations/0007_banner_status.py | Dekatron322/IotexChart | 481e117278df8910facb17b0b9099921592e4a50 | [
"Apache-2.0"
] | 1 | 2021-12-22T23:02:59.000Z | 2021-12-22T23:02:59.000Z | main/migrations/0007_banner_status.py | IOTEXCHART/IotexChart | 593dddc4885daedc43af82ea4c388d1bf1938c70 | [
"Apache-2.0"
] | null | null | null | main/migrations/0007_banner_status.py | IOTEXCHART/IotexChart | 593dddc4885daedc43af82ea4c388d1bf1938c70 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.9 on 2021-12-04 23:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0006_banner_pub_date'),
]
operations = [
migrations.AddField(
model_name='banner',
name='status',
field=models.BooleanField(default=False),
),
]
| 20.052632 | 53 | 0.593176 |
79446df956eecceaee5069ab3455dba4b17baafd | 1,063 | py | Python | venv-howto/bin/pilfont.py | frknnlzts/howto | bba8b8080df480c2ee95a7f9913a7153cc02f65e | [
"Apache-2.0"
] | 2 | 2018-02-12T09:54:33.000Z | 2019-03-31T14:32:32.000Z | venv-howto/bin/pilfont.py | frknnlzts/howto | bba8b8080df480c2ee95a7f9913a7153cc02f65e | [
"Apache-2.0"
] | 8 | 2017-12-27T16:19:25.000Z | 2017-12-29T00:21:03.000Z | venv-howto/bin/pilfont.py | frknnlzts/howto | bba8b8080df480c2ee95a7f9913a7153cc02f65e | [
"Apache-2.0"
] | 1 | 2019-12-18T09:33:29.000Z | 2019-12-18T09:33:29.000Z | #!/home/cracker/Desktop/howto/venv-howto/bin/python3.5
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| 18.327586 | 68 | 0.605833 |
79446e1c371f33713514e0bb6d2d58b57035acd6 | 592 | py | Python | CORE/Tools/Helpers/generateComponentImplementation.py | pike4/CORE-Dev-Build-1.0 | 08376bae09a8bb3598f4aa8edfacd1ca1c45eaa3 | [
"MIT"
] | 3 | 2016-10-06T22:42:50.000Z | 2016-10-14T16:04:46.000Z | CORE/Tools/Helpers/generateComponentImplementation.py | pike4/CORE-Dev-Build-1.0 | 08376bae09a8bb3598f4aa8edfacd1ca1c45eaa3 | [
"MIT"
] | null | null | null | CORE/Tools/Helpers/generateComponentImplementation.py | pike4/CORE-Dev-Build-1.0 | 08376bae09a8bb3598f4aa8edfacd1ca1c45eaa3 | [
"MIT"
] | null | null | null | def genCompImpl(className, sourcePath):
sourceFile = open(sourcePath + ".cpp", "w+")
sourceFile.write("#include \"" + className + ".h\"\n\n")
sourceFile.write(className + "::" + className + "()\n")
sourceFile.write("{\n\n}\n");
sourceFile.write("void " + className + "::handle(Event e)\n")
sourceFile.write("{\n\n}\n")
sourceFile.write("void " + className + "::getText(Node* def)\n")
sourceFile.write("{\n\n}\n")
sourceFile.write("void " + className + "::get_data(DataSource* source)\n")
sourceFile.write("{\n\n}\n")
sourceFile.close()
| 45.538462 | 79 | 0.596284 |
79446efeada386507db2a75ba5af6611eaedd443 | 381 | py | Python | gale/general/errors.py | adamrpah/GALE | 94ab2613c5d53ea471f664a75c7d780a2689302f | [
"WTFPL"
] | null | null | null | gale/general/errors.py | adamrpah/GALE | 94ab2613c5d53ea471f664a75c7d780a2689302f | [
"WTFPL"
] | null | null | null | gale/general/errors.py | adamrpah/GALE | 94ab2613c5d53ea471f664a75c7d780a2689302f | [
"WTFPL"
] | null | null | null | '''
File: errors.py
Author: Adam Pah
Description:
Error Handling
'''
from __future__ import division, print_function
def generic_error_handler(message=''):
'''
Generic handler for custom thrown exceptions.
input:
m - message to display
'''
import sys
print( 'ERROR: Stopped Execution due to Incorrect Usage' )
print( message )
sys.exit()
| 19.05 | 62 | 0.671916 |
79446f19034528a9b79c78a5be967c7918a9f912 | 41,627 | py | Python | pytoshop/image_resources.py | Aixile/pytoshop | 2cc090dff8ef00ce8cfb97bf7a5fdde34b7f4cd4 | [
"BSD-3-Clause"
] | 1 | 2020-04-15T23:14:39.000Z | 2020-04-15T23:14:39.000Z | pytoshop/image_resources.py | Aixile/pytoshop | 2cc090dff8ef00ce8cfb97bf7a5fdde34b7f4cd4 | [
"BSD-3-Clause"
] | null | null | null | pytoshop/image_resources.py | Aixile/pytoshop | 2cc090dff8ef00ce8cfb97bf7a5fdde34b7f4cd4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The `ImageResources` section.
Image resource blocks are the basic building unit of several file
formats, including Photoshop's native file format, JPEG, and
TIFF. Image resources are used to store non-pixel data associated with
images, such as pen tool paths.
"""
from __future__ import unicode_literals, absolute_import
import six
import numpy as np
from . import docs
from . import enums
from . import util
from typing import Any, BinaryIO, Dict, List, Optional, Type, TYPE_CHECKING, Union # NOQA
if TYPE_CHECKING:
from . import core # NOQA
class _ImageResourceBlockMeta(type):
"""
A metaclass that builds a mapping of subclasses.
"""
mapping = {} # type: Dict[int, Type[ImageResourceBlock]]
def __new__(cls, name, parents, dct):
new_cls = type.__new__(cls, name, parents, dct)
if '_resource_id' in dct and isinstance(dct['_resource_id'], int):
resource_id = dct['_resource_id']
if resource_id in cls.mapping:
raise ValueError(
"Duplicate resource_id '{}'".format(
resource_id))
cls.mapping[resource_id] = new_cls
return new_cls
@six.add_metaclass(_ImageResourceBlockMeta)
class ImageResourceBlock(object):
"""
Stores a single image resource block.
``pytoshop`` currently doesn't deeply parse image resource
blocks. The raw data is merely retained for round-tripping.
"""
_resource_id = -1
@property
def name(self): # type: (...) -> unicode
"Name of image resource."
return self._name
@name.setter
def name(self, value): # type: (Union[bytes, unicode]) -> None
if isinstance(value, bytes):
value = value.decode('ascii')
if (not isinstance(value, six.text_type) or
len(value) > 255):
raise ValueError("name must be unicode string of length < 255")
self._name = value
@property
def resource_id(self): # type: (...) -> int
"Type of image resource."
return self._resource_id
def length(self, header): # type: (core.Header) -> int
data_length = self.data_length(header)
length = (
4 + 2 +
util.pascal_string_length(self.name, 2) +
4 + data_length
)
if data_length % 2 != 0:
length += 1
return length
length.__doc__ = docs.length # type: ignore
def total_length(self, header): # type: (core.Header) -> int
return self.length(header)
total_length.__doc__ = docs.total_length # type: ignore
def data_length(self, header): # type: (core.Header) -> int
raise NotImplementedError()
@classmethod
@util.trace_read
def read(cls, fd, header):
# type: (BinaryIO, core.Header) -> ImageResourceBlock
signature = fd.read(4)
if signature != b'8BIM':
raise ValueError('Invalid image resource block signature')
resource_id = util.read_value(fd, 'H')
name = util.read_pascal_string(fd, 2)
data_length = util.read_value(fd, 'I')
util.log(
"resource_id: {}, name: {}, data_length: {}",
resource_id, name, data_length
)
new_cls = _ImageResourceBlockMeta.mapping.get(
resource_id, GenericImageResourceBlock)
start = fd.tell()
result = new_cls.read_data(fd, resource_id, name, data_length, header)
end = fd.tell()
if end - start != data_length:
raise ValueError("{} read the wrong amount".format(new_cls))
if data_length % 2 != 0:
fd.read(1)
return result
read.__func__.__doc__ = docs.read # type: ignore
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
data_length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
raise NotImplementedError()
@util.trace_write
def write(self, fd, header):
# type: (BinaryIO, core.Header) -> None
fd.write(b'8BIM')
util.write_value(fd, 'H', self.resource_id)
util.write_pascal_string(fd, self.name, 2)
length = self.data_length(header)
util.write_value(fd, 'I', length)
start = fd.tell()
self.write_data(fd, header)
end = fd.tell()
if end - start != length:
raise ValueError(
"{} wrote the wrong amount".format(self.__class__))
if length % 2 != 0:
fd.write(b'\0')
write.__doc__ = docs.write # test: ignore
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
raise NotImplementedError()
class GenericImageResourceBlock(ImageResourceBlock):
def __init__(self, name='', resource_id=0, data=b''):
self.name = name
self.resource_id = resource_id
self.data = data
@property
def resource_id(self): # type: (...) -> int
"Type of image resource."
return self._resource_id
@resource_id.setter
def resource_id(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > (1 << 16)):
raise ValueError(
"resource_id must be a 16-bit positive integer"
)
self._resource_id = value
@property
def data(self): # type: (...) -> bytes
"Raw data of image resource."
return self._data
@data.setter
def data(self, value): # type: (bytes) -> None
if (not isinstance(value, bytes) or
len(value) > (1 << 32)):
raise ValueError("data must be a byte string")
self._data = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
data = fd.read(length)
return cls(resource_id=resource_id, name=name, data=data)
def data_length(self, header): # type: (core.Header) -> int
return len(self.data)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
fd.write(self.data)
class ImageResourceUnicodeString(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
value='' # type: unicode
): # type: (...) -> None
self.name = name
self.value = value
@property
def value(self): # type: (...) -> unicode
return self._value
@value.setter
def value(self, value): # type: (unicode) -> None
if (not isinstance(value, six.text_type) or
len(value) > (1 << 32)):
raise TypeError("value must be a unicode string")
self._value = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
data = fd.read(length)
value = util.decode_unicode_string(data)
return cls(
name=name, value=value
)
def data_length(self, header): # type: (core.Header) -> int
return util.unicode_string_length(self.value)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
fd.write(util.encode_unicode_string(self.value))
class LayersGroupInfo(ImageResourceBlock):
"""
Layers group information.
Indicates which layers are locked together.
"""
def __init__(self,
name='', # type: unicode
group_ids=[] # type: List[int]
): # type: (...) -> None
self.name = name
self.group_ids = group_ids
_resource_id = enums.ImageResourceID.layers_group_info
@property
def group_ids(self): # type: (...) -> List[int]
return self._group_ids
@group_ids.setter
def group_ids(self, value): # type: (List[int]) -> None
util.assert_is_list_of(value, int, min=0, max=65535)
self._group_ids = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
data = fd.read(length)
group_ids = np.frombuffer(data, '>u2').tolist()
return cls(name=name, group_ids=group_ids)
def data_length(self, header): # type: (core.Header) -> int
return len(self.group_ids * 2)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
data = np.array(self.group_ids, '>u2').tobytes()
fd.write(data)
class BorderInfo(ImageResourceBlock):
"""
Border information.
"""
def __init__(self,
name='', # type: unicode
border_width_num=0, # type: int
border_width_den=1, # type: int
unit=enums.Units.inches # type: int
): # type: (...) -> None
self.name = name
self.border_width_num = border_width_num
self.border_width_den = border_width_den
self.unit = unit
_resource_id = enums.ImageResourceID.border_info
@property
def border_width_num(self): # type: (...) -> int
"Border width numerator"
return self._border_width_num
@border_width_num.setter
def border_width_num(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > 65535):
raise ValueError(
"border_width_num must be integer in range 0-65535"
)
self._border_width_num = value
@property
def border_width_den(self): # type: (...) -> int
"Border width denominator"
return self._border_width_den
@border_width_den.setter
def border_width_den(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 1 or value > 65535):
raise ValueError(
"border_width_den must be integer in range 1-65535"
)
self._border_width_den = value
@property
def unit(self): # type: (...) -> int
"Unit. See `enums.Units`."
return self._unit
@unit.setter
def unit(self, value): # type: (int) -> None
if value not in list(enums.Units): # type: ignore
raise ValueError("Invalid unit.")
self._unit = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
num, den, unit = util.read_value(fd, 'HHH')
return cls(
name=name, border_width_num=num, border_width_den=den,
unit=unit)
def data_length(self, header): # type: (core.Header) -> int
return 6
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(
fd, 'HHH', self.border_width_num,
self.border_width_den, self.unit
)
class BackgroundColor(ImageResourceBlock):
"""
Background color.
"""
def __init__(self,
name='', # type: unicode
color_space=enums.ColorSpace.rgb, # type: int
color=[] # type: List[int]
): # type: (...) -> None
self.name = name
self.color_space = color_space
self.color = color
_resource_id = enums.ImageResourceID.background_color
@property
def color_space(self): # type: (...) -> int
"The color space. See `enums.ColorSpace`"
return self._color_space
@color_space.setter
def color_space(self, value): # type: (int) -> None
if value not in list(enums.ColorSpace): # type: ignore
raise ValueError("Invalid color space.")
self._color_space = value
@property
def color(self): # type: (...) -> List[int]
"""
The color data. If the color data does not require 4 values,
the extra values are undefined and should be included as
zeros.
"""
return self._color
@color.setter
def color(self, value): # type: (List[int]) -> None
util.assert_is_list_of(value, int, -32767, 65536)
if len(value) < 1 or len(value) > 4:
raise ValueError("Color must be of length 1-4")
self._color = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
space_id, a, b, c, d = util.read_value(fd, 'HHHHH')
if space_id == enums.ColorSpace.lab:
b -= 32767
c -= 32767
return cls(
name=name, color_space=space_id, color=[a, b, c, d]
)
def data_length(self, header): # type: (core.Header) -> int
return 10
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
color = self.color[:]
color.extend([0] * (4 - len(color)))
a, b, c, d = color
if self.color_space == enums.ColorSpace.lab:
b += 32767
c += 32767
util.write_value(
fd, 'HHHHH', self.color_space, a, b, c, d
)
class PrintFlags(ImageResourceBlock):
"""
Print flags.
"""
def __init__(self,
name='', # type: unicode
labels=False, # type: bool
crop_marks=False, # type: bool
color_bars=False, # type: bool
registration_marks=False, # type: bool
negative=False, # type: bool
flip=False, # type: bool
interpolate=False, # type: bool
caption=False, # type: bool
print_flags=False # type: bool
): # type: (...) -> None
self.name = name
self.labels = labels
self.crop_marks = crop_marks
self.color_bars = color_bars
self.registration_marks = registration_marks
self.negative = negative
self.flip = flip
self.interpolate = interpolate
self.caption = caption
self.print_flags = print_flags
_resource_id = enums.ImageResourceID.print_flags
@property
def labels(self): # type: (...) -> bool
"labels"
return self._labels
@labels.setter
def labels(self, value): # type: (Any) -> None
self._labels = bool(value)
@property
def crop_marks(self): # type: (...) -> bool
"crop marks"
return self._crop_marks
@crop_marks.setter
def crop_marks(self, value): # type: (Any) -> None
self._crop_marks = bool(value)
@property
def color_bars(self): # type: (...) -> bool
"color bars"
return self._color_bars
@color_bars.setter
def color_bars(self, value): # type: (Any) -> None
self._color_bars = bool(value)
@property
def registration_marks(self): # type: (...) -> bool
"registration marks"
return self._registration_marks
@registration_marks.setter
def registration_marks(self, value): # type: (Any) -> None
self._registration_marks = bool(value)
@property
def negative(self): # type: (...) -> bool
"negative"
return self._negative
@negative.setter
def negative(self, value): # type: (Any) -> None
self._negative = bool(value)
@property
def flip(self): # type: (...) -> bool
"flip"
return self._flip
@flip.setter
def flip(self, value): # type: (Any) -> None
self._flip = bool(value)
@property
def interpolate(self): # type: (...) -> bool
"interpolate"
return self._interpolate
@interpolate.setter
def interpolate(self, value): # type: (Any) -> None
self._interpolate = bool(value)
@property
def caption(self): # type: (...) -> bool
"caption"
return self._caption
@caption.setter
def caption(self, value): # type: (Any) -> None
self._caption = bool(value)
@property
def print_flags(self): # type: (...) -> bool
"print flags"
return self._print_flags
@print_flags.setter
def print_flags(self, value): # type: (Any) -> None
self._print_flags = bool(value)
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
vals = util.read_value(fd, 'BBBBBBBBB')
vals = [bool(x) for x in vals]
return cls(
name=name, labels=vals[0], crop_marks=vals[1],
color_bars=vals[2], registration_marks=vals[3],
negative=vals[4], flip=vals[5], interpolate=vals[6],
caption=vals[7], print_flags=vals[8]
)
def data_length(self, header): # type: (core.Header) -> int
return 9
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
vals = [
self.labels, self.crop_marks, self.color_bars,
self.registration_marks, self.negative, self.flip,
self.interpolate, self.caption, self.print_flags
]
int_vals = [(x and 255 or 0) for x in vals]
util.write_value(fd, 'BBBBBBBBB', *int_vals)
class GuideResourceBlock(object):
def __init__(self,
location=0, # type: int
direction=enums.GuideDirection.vertical # type: int
): # type: (...) -> None
self.location = location
self.direction = direction
@property
def location(self): # type: (...) -> int
"Location of guide in document coordinates."
return self._location
@location.setter
def location(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > (1 << 32)):
raise ValueError("location must be a 32-bit unsigned int")
self._location = value
@property
def direction(self): # type: (...) -> int
"Guide direction. See `enums.GuideDirection`."
return self._direction
@direction.setter
def direction(self, value): # type: (int) -> None
if value not in list(enums.GuideDirection): # type: ignore
raise ValueError("Invalid guide direction")
self._direction = value
@classmethod
@util.trace_read
def read(cls, fd, header):
# type: (BinaryIO, core.Header) -> GuideResourceBlock
location, direction = util.read_value(fd, 'IB')
return cls(location=location, direction=direction)
@util.trace_write
def write(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'IB', self.location, self.direction)
def data_length(self, header): # type: (core.Header) -> int
return 5
class GridAndGuidesInfo(ImageResourceBlock):
"""
Grid and guides resource.
"""
def __init__(self,
name='', # type: unicode
grid_hori=0, # type: int
grid_vert=0, # type: int
guides=[] # type: List[GuideResourceBlock]
): # type: (...) -> None
self.name = name
self.grid_hori = grid_hori
self.grid_vert = grid_vert
self.guides = guides
_resource_id = enums.ImageResourceID.grid_and_guides_info
@property
def version(self): # type: (...) -> int
return 1
@property
def grid_hori(self): # type: (...) -> int
"Document-specific grid (horizontal). In 1/32 pt."
return self._grid_hori
@grid_hori.setter
def grid_hori(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > (1 << 32)):
raise ValueError("grid_hori must be a 32-bit unsigned int")
self._grid_hori = value
@property
def grid_vert(self): # type: (...) -> int
"Document-specific grid (vertical). In 1/32 pt."
return self._grid_vert
@grid_vert.setter
def grid_vert(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > (1 << 32)):
raise ValueError("grid_vert must be a 32-bit unsigned int")
self._grid_vert = value
@property
def guides(self): # type: (...) -> List[GuideResourceBlock]
"Guides. See `GuideResourceBlock`."
return self._guides
@guides.setter
def guides(self, value): # type: (List[GuideResourceBlock]) -> None
util.assert_is_list_of(value, GuideResourceBlock)
self._guides = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
version, grid_hori, grid_vert, nguides = util.read_value(
fd, 'IIII')
if version != 1:
raise ValueError(
"Unknown version {} in grid and guides info block.".format(
version))
guides = []
for i in range(nguides):
guides.append(GuideResourceBlock.read(fd, header))
return cls(
name=name, grid_hori=grid_hori, grid_vert=grid_vert,
guides=guides
)
def data_length(self, header): # type: (core.Header) -> int
return 16 + (5 * len(self.guides))
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(
fd, 'IIII', self.version,
self.grid_hori, self.grid_vert,
len(self.guides)
)
for guide in self.guides:
guide.write(fd, header)
class CopyrightFlag(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
copyright=False # type: bool
): # type: (...) -> None
self.name = name
self.copyright = copyright
_resource_id = enums.ImageResourceID.copyright_flag
@property
def copyright(self): # type: (...) -> bool
"Is copyrighted?"
return self._copyright
@copyright.setter
def copyright(self, value): # type: (Any) -> None
self._copyright = bool(value)
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
copyright = bool(util.read_value(fd, 'B'))
return cls(
name=name, copyright=copyright
)
def data_length(self, header): # type: (core.Header) -> int
return 1
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'B', self.copyright and 255 or 0)
class Url(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
url=b'' # type: bytes
): # type: (...) -> None
self.name = name
self.url = url
_resource_id = enums.ImageResourceID.url
@property
def url(self): # type: (...) -> bytes
"URL"
return self._url
@url.setter
def url(self, value): # type: (bytes) -> None
if not isinstance(value, bytes):
raise TypeError("url must be bytes string")
self._url = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
url = fd.read(length)
return cls(
name=name, url=url
)
def data_length(self, header): # type: (core.Header) -> int
return len(self.url)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
fd.write(self.url)
class GlobalAngle(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
angle=0 # type: int
): # type: (...) -> None
self.name = name
self.angle = angle
_resource_id = enums.ImageResourceID.global_angle
@property
def angle(self): # type: (...) -> int
"Global light angle for the effect layer"
return self._angle
@angle.setter
def angle(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < -360 or value > 360):
raise ValueError(
"angle must be an int in range -360 to 360"
)
self._angle = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
angle = util.read_value(fd, 'i')
return cls(
name=name, angle=angle
)
def data_length(self, header): # type: (core.Header) -> int
return 4
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'i', self.angle)
class EffectsVisible(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
visible=False # type: bool
): # type: (...) -> None
self.name = name
self.visible = visible
_resource_id = enums.ImageResourceID.effects_visible
@property
def visible(self): # type: (...) -> bool
"Are effects visible?"
return self._visible
@visible.setter
def visible(self, value): # type: (Any) -> None
self._visible = bool(value)
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
visible = bool(util.read_value(fd, 'B'))
return cls(
name=name, visible=visible
)
def data_length(self, header): # type: (core.Header) -> int
return 1
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'B', self.visible and 255 or 0)
class DocumentSpecificIdsSeedNumber(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
base_value=0 # type: int
): # type: (...) -> None
self.name = name
self.base_value = base_value
_resource_id = enums.ImageResourceID.document_specific_ids_seed_number
@property
def base_value(self): # type: (...) -> int
"Base value"
return self._base_value
@base_value.setter
def base_value(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > (1 << 32)):
raise ValueError("base_value must be a 32-bit integer")
self._base_value = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
base_value = bool(util.read_value(fd, 'I'))
return cls(
name=name, base_value=base_value
)
def data_length(self, header): # type: (core.Header) -> int
return 4
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'I', self.base_value)
class UnicodeAlphaNames(ImageResourceUnicodeString):
_resource_id = enums.ImageResourceID.unicode_alpha_names
class GlobalAltitude(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
altitude=0 # type: int
): # type: (...) -> None
self.name = name
self.altitude = altitude
_resource_id = enums.ImageResourceID.global_altitude
@property
def altitude(self): # type: (...) -> int
"Global altitude"
return self._altitude
@altitude.setter
def altitude(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > (1 << 32)):
raise ValueError("altitude must be a 32-bit integer")
self._altitude = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
altitude = util.read_value(fd, 'I')
return cls(
name=name, altitude=altitude
)
def data_length(self, header): # type: (core.Header) -> int
return 4
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'I', self.altitude)
class WorkflowUrl(ImageResourceUnicodeString):
_resource_id = enums.ImageResourceID.workflow_url
class AlphaIdentifiers(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
identifiers=[] # type: List[int]
): # type: (...) -> None
self.name = name
self.identifiers = identifiers
_resource_id = enums.ImageResourceID.alpha_identifiers
@property
def identifiers(self): # type: (...) -> List[int]
"Alpha indentifiers"
return self._identifiers
@identifiers.setter
def identifiers(self, value): # type: (List[int]) -> None
util.assert_is_list_of(value, int, min=0, max=(1 << 32))
self._identifiers = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
buf = fd.read(length)
identifiers = list(np.frombuffer(buf, np.uint32))
return cls(
name=name, identifiers=identifiers
)
def data_length(self, header): # type: (core.Header) -> int
return 4 + (len(self.identifiers) * 4)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'I', len(self.identifiers))
for identifier in self.identifiers:
util.write_value(fd, 'I', identifier)
class VersionInfo(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
version=0, # type: int
has_real_merged_data=False, # type: bool
writer='', # type: unicode
reader='', # type: unicode
file_version=0 # type: int
): # type: (...) -> None
self.name = name
self.version = version
self.has_real_merged_data = has_real_merged_data
self.writer = writer
self.reader = reader
self.file_version = file_version
_resource_id = enums.ImageResourceID.version_info
@property
def version(self): # type: (...) -> int
"version"
return self._version
@version.setter
def version(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > (1 << 32)):
raise ValueError("version must be a 32-bit integer")
self._version = value
@property
def has_real_merged_data(self): # type: (...) -> bool
"has real merged data?"
return self._has_real_merged_data
@has_real_merged_data.setter
def has_real_merged_data(self, value): # type: (Any) -> None
self._has_real_merged_data = bool(value)
@property
def writer(self): # type: (...) -> unicode
"writer name"
return self._writer
@writer.setter
def writer(self, value): # type: (unicode) -> None
if not isinstance(value, six.text_type):
raise TypeError("writer must be a Unicode string")
self._writer = value
@property
def reader(self): # type: (...) -> unicode
"reader name"
return self._reader
@reader.setter
def reader(self, value): # type: (unicode) -> None
if not isinstance(value, six.text_type):
raise TypeError("reader must be a Unicode string")
self._reader = value
@property
def file_version(self): # type: (...) -> int
"file version"
return self._file_version
@file_version.setter
def file_version(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > (1 << 32)):
raise ValueError("file_version must be a 32-bit integer")
self._file_version = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
version, has_real_merged_data = util.read_value(fd, 'IB')
has_real_merged_data = bool(has_real_merged_data)
writer = util.read_unicode_string(fd)
reader = util.read_unicode_string(fd)
file_version = util.read_value(fd, 'I')
return cls(
name=name, version=version,
has_real_merged_data=has_real_merged_data, writer=writer,
reader=reader, file_version=file_version
)
def data_length(self, header): # type: (core.Header) -> int
return (
4 + 1 +
util.unicode_string_length(self.writer) +
util.unicode_string_length(self.reader) +
4)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'IB', self.version, self.has_real_merged_data)
util.write_unicode_string(fd, self.writer)
util.write_unicode_string(fd, self.reader)
util.write_value(fd, 'I', self.file_version)
class PrintScale(ImageResourceBlock):
def __init__(self,
name='', # type: unicode
style=enums.PrintScaleStyle.centered, # type: int
x=0.0, # type: float
y=0.0, # type: float
scale=0.0 # type: float
): # type: (...) -> None
self.name = name
self.style = style
self.x = x
self.y = y
self.scale = scale
_resource_id = enums.ImageResourceID.print_scale
@property
def style(self): # type: (...) -> int
"Style. See `enums.PrintScaleStyle`."
return self._style
@style.setter
def style(self, value): # type: (int) -> None
if value not in list(enums.PrintScaleStyle): # type: ignore
raise ValueError("Invalid print scale style")
self._style = value
@property
def x(self): # type: (...) -> float
"x location"
return self._x
@x.setter
def x(self, value): # type: (float) -> None
if not isinstance(value, float):
raise TypeError("x must be a float")
self._x = value
@property
def y(self): # type: (...) -> float
"y location"
return self._y
@y.setter
def y(self, value): # type: (float) -> None
if not isinstance(value, float):
raise TypeError("y must be a float")
self._y = value
@property
def scale(self): # type: (...) -> float
"scale"
return self._scale
@scale.setter
def scale(self, value): # type: (float) -> None
if not isinstance(value, float):
raise TypeError("scale must be a float")
self._scale = value
@classmethod
def read_data(cls,
fd, # type: BinaryIO
resource_id, # type: int
name, # type: unicode
length, # type: int
header # type: core.Header
): # type: (...) -> ImageResourceBlock
style, x, y, scale = util.read_value(fd, 'Hfff')
return cls(
name=name, style=style, x=x, y=y, scale=scale
)
def data_length(self, header): # type: (core.Header) -> int
return (2 + 4 + 4 + 4)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'Hfff', self.style, self.x, self.y, self.scale)
class ImageResources(object):
"""
The image resource block section.
"""
def __init__(self,
blocks=[] # type: List[ImageResourceBlock]
): # type: (...) -> None
self.blocks = blocks
@property
def blocks(self): # type: (...) -> List[ImageResourceBlock]
"List of all `ImageResourceBlock` items."
return self._blocks
@blocks.setter
def blocks(self, value): # type: (List[ImageResourceBlock]) -> None
util.assert_is_list_of(value, ImageResourceBlock)
self._blocks = value
def length(self, header): # type: (core.Header) -> int
return sum(block.total_length(header) for block in self.blocks)
length.__doc__ = docs.length # type: ignore
def total_length(self, header): # type: (core.Header) -> int
return 4 + self.length(header)
total_length.__doc__ = docs.total_length # type: ignore
def get_block(self, resource_id):
# type: (int) -> Optional[ImageResourceBlock]
"""
Get the first block with the given resource id.
"""
for block in self.blocks:
if block.resource_id == resource_id:
return block
return None
@classmethod
@util.trace_read
def read(cls, fd, header):
# type: (BinaryIO, core.Header) -> ImageResources
length = util.read_value(fd, 'I')
end = fd.tell() + length
util.log("length: {}, end: {}", length, end)
blocks = []
while fd.tell() < end:
blocks.append(ImageResourceBlock.read(fd, header))
if fd.tell() != end:
raise ValueError(
"read the wrong amount reading image resource blocks")
return cls(blocks=blocks)
read.__func__.__doc__ = docs.read
@util.trace_write
def write(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'I', self.length(header))
for block in self.blocks:
block.write(fd, header)
write.__doc__ = docs.write
| 32.169243 | 90 | 0.53732 |
79446f22b90f95605d8b4f1cbcfefc63f70098db | 5,497 | py | Python | pysit/gallery/marmousi2.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | null | null | null | pysit/gallery/marmousi2.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | null | null | null | pysit/gallery/marmousi2.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | 1 | 2020-06-13T07:13:07.000Z | 2020-06-13T07:13:07.000Z | import numpy as np
from pysit import *
from pysit.gallery.gallery_base import PrecomputedGalleryModel
__all__ = ['MarmousiModel2', 'marmousi2']
class MarmousiModel2(PrecomputedGalleryModel):
""" Marmousi II community velocity model.
"""
# Names
model_name = "Marmousi2"
# A sanitized name for filesystem work
fs_full_name = "marmousi2"
fs_short_name = "marm2"
# Available data
supported_physics = ['acoustic', 'variable-density-acoustic', 'elastic']
supported_physical_parameters = ['density', 'vp', 'vs']
# Descriptive data
valid_dimensions = (2,)
@property #read only
def dimension(self):
return 2
# File information
_local_parameter_filenames = { 'vp' : 'vp_marmousi-ii.segy.gz',
'vs' : 'vs_marmousi-ii.segy.gz',
'density' : 'density_marmousi-ii.segy.gz'}
_parameter_scale_factor = { 'vp' : 1000.0,
'vs' : 1000.0,
'density' : 1.0}
_vp_file_sources = ['http://www.agl.uh.edu/downloads/vp_marmousi-ii.segy.gz']
_vs_file_sources = ['http://www.agl.uh.edu/downloads/vs_marmousi-ii.segy.gz']
_density_file_sources = ['http://www.agl.uh.edu/downloads/density_marmousi-ii.segy.gz']
_remote_file_sources = {'vp' : _vp_file_sources,
'vs' : _vs_file_sources,
'density' : _density_file_sources}
_model_transposed = False
# Model specification
base_physical_origin = np.array([0.0, 0.0])
base_physical_size = np.array([17000.0, 3500.0])
base_physical_dimensions_units = ('m', 'm')
base_pixels = np.array([13601, 2801])
base_pixel_scale = np.array([1.25, 1.25])
base_pixel_units = ('m', 'm')
# Water properties specify the way the water masking is handled
# (None, ) indicates no water
# ('depth', <depth: float>) specifies that a preset depth from the base_physical_origin is to be used
# ('mask', ) indicates that the specified mask name should be used
water_properties = ('depth', 473.0) #m, see Marmousi2, an elasticc upgrade for Marmousi
_initial_configs = {'smooth_width': {'sigma':1000.0},
'smooth_low_pass': {'freq':1./1000.},
'constant': {'velocity': 3000.0},
'gradient': {'min':1500.0, 'max':3000}}
_scale_map = {'full': np.array([ 1.25, 1.25]),
'large': np.array([ 2.50, 2.50]),
'medium': np.array([ 5.00, 5.00]),
'small': np.array([10.00, 10.00]),
'mini': np.array([20.00, 20.00]),}
# old marmousi mini-square had dx=24m
patches = { 'original' : {'origin': np.array([3950, 440.0]),
'size': np.array([9200.0,3000.0])},
'mini-square' : {'origin': np.array([3950+195*24.0, 440.0]),
'size': np.array([128*24.0, 128*24.0])},
'left-layers-square' : {'origin': np.array([1500.0, 0.0]),
'size': np.array([3000.0, 3000.0])},
'right-layers-square' : {'origin': np.array([11630.0, 0.0]),
'size': np.array([3000.0, 3000.0])},
'fracture-square' : {'origin': np.array([8630.0, 0.0]),
'size': np.array([3000.0, 3000.0])},
}
def download(parameter='all'):
if parameter == 'all':
for p in MarmousiModel2.supported_physical_parameters:
MarmousiModel2._download_and_prepare(p)
for p in MarmousiModel2.supported_masks:
MarmousiModel2._download_and_prepare(p)
else:
MarmousiModel2._download_and_prepare(parameter)
def marmousi2(patch=None, compact=False, **kwargs):
model_config = dict(physics='acoustic',
origin=None,
size=None,
pixel_scale='mini',
pixels=None,
initial_model_style='smooth_low_pass',
initial_config={},
fix_water_layer=True,)
# Make any changes
model_config.update(kwargs)
if patch in MarmousiModel2.patches:
model_config.update(MarmousiModel2.patches[patch])
C, C0, m, d = MarmousiModel2(**model_config).get_setup()
if compact:
# we have to specify in the PML that we want to use the compact version of Helmholtz
# operator (without auxiliary fields)
m.x.lbc.domain_bc.compact = True
return C, C0, m, d
if __name__ == '__main__':
import matplotlib.pyplot as plt
M = MarmousiModel2(initial_model_style='smooth_low_pass', pixel_scale='mini')
C, C0, m, d = M.get_setup()
# C, C0, m, d = marmousi2(patch='mini-square')
fig = plt.figure()
fig.add_subplot(2,1,1)
vis.plot(C, m)
fig.add_subplot(2,1,2)
vis.plot(C0, m)
plt.show()
M = MarmousiModel2(initial_model_style='smooth_low_pass', pixel_scale='medium')
C, C0, m, d = M.get_setup()
# C, C0, m, d = marmousi2(patch='mini-square')
fig = plt.figure()
fig.add_subplot(2,1,1)
vis.plot(C, m)
fig.add_subplot(2,1,2)
vis.plot(C0, m)
plt.show() | 35.464516 | 106 | 0.549209 |
79446f497ddab2e1251a1da96413ab0052521544 | 51 | py | Python | tests/test.py | sudaning/Floga | d0662daf826275d58c857259e2e6a83cc777adcf | [
"MIT"
] | 2 | 2017-03-15T03:58:26.000Z | 2020-06-27T08:11:51.000Z | tests/test.py | sudaning/Freg | f142af88f79abef6b6954f41264d233a946737aa | [
"MIT"
] | null | null | null | tests/test.py | sudaning/Freg | f142af88f79abef6b6954f41264d233a946737aa | [
"MIT"
] | 3 | 2017-05-21T13:57:26.000Z | 2021-12-29T07:51:08.000Z | import os
if __name__ == "__main__":
print("OK")
| 10.2 | 26 | 0.647059 |
79446fc266b9f3161b36e3ea0da66d0d0ddc281a | 13,076 | py | Python | script/relativeFootPositionQuasiFlat.py | ori-drs/solo-rbprm | 2c8ade18f36a691b9e432302ae6af0724fa2d0f2 | [
"BSD-2-Clause"
] | null | null | null | script/relativeFootPositionQuasiFlat.py | ori-drs/solo-rbprm | 2c8ade18f36a691b9e432302ae6af0724fa2d0f2 | [
"BSD-2-Clause"
] | null | null | null | script/relativeFootPositionQuasiFlat.py | ori-drs/solo-rbprm | 2c8ade18f36a691b9e432302ae6af0724fa2d0f2 | [
"BSD-2-Clause"
] | 2 | 2021-06-01T09:17:36.000Z | 2021-12-13T11:29:02.000Z | from hpp.gepetto import Viewer
from hpp.gepetto import ViewerFactory
from numpy import array, zeros, ones
from numpy.linalg import norm
from solo_rbprm.solo import Robot
from hpp.corbaserver.rbprm.tools.display_tools import *
#from plot_polytopes import *
from pinocchio import Quaternion
NUM_SAMPLES = 6000
IT_DISPLAY_PROGRESS = NUM_SAMPLES / 10
MIN_DIST_BETWEEN_FEET_Y = 0.02
MIN_DIST_BETWEEN_FEET_X = 0.02
MAX_DIST_BETWEEN_FEET_X = 0.2
MAX_DIST_BETWEEN_FEET_Z = 0.2
MIN_HEIGHT_COM = 0.13
# margin used to constrain the com y position : if it's on the left of the left foot or on the right of the right foot
# for more than this margin, we reject this sample:
MARGIN_FEET_SIDE = 0.01
fullBody = Robot ()
fullBody.setConstrainedJointsBounds()
fullBody.setJointBounds ("root_joint", [-20,20, -20, 20, -20, 20])
dict_heuristic = {fullBody.rLegId:"static", fullBody.lLegId:"static", fullBody.rArmId:"fixedStep04", fullBody.lArmId:"fixedStep04"}
fullBody.loadAllLimbs(dict_heuristic,"ReferenceConfiguration",nbSamples=12)
#~ from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
from hpp.corbaserver import ProblemSolver
nbSamples = 1
ps = ProblemSolver( fullBody )
vf = ViewerFactory (ps)
v = vf.createViewer()
rootName = 'root_joint'
zero = [0.,0.,0.]
rLegId = fullBody.rLegId
rLeg = fullBody.rleg
rfoot = fullBody.rfoot
rLegOffset = fullBody.offset[:]
lLegOffset = fullBody.offset[:]
rArmOffset = fullBody.offset[:]
lArmOffset = fullBody.offset[:]
lLegId = fullBody.lLegId
lLeg = fullBody.lleg
lfoot = fullBody.lfoot
#make sure this is 0
q_0 = fullBody.getCurrentConfig ()
zeroConf = [0,0,0, 0, 0, 0, 1.]
q_0[0:7] = zeroConf
fullBody.setCurrentConfig (q_0)
effectors = [fullBody.rfoot, fullBody.lfoot, fullBody.rhand, fullBody.lhand,]
limbIds = [fullBody.rLegId, fullBody.lLegId, fullBody.rArmId, fullBody.lArmId]
offsets = [array(rLegOffset), array(lLegOffset), array(rArmOffset), array(lArmOffset)]
import numpy as np
compoints = [[] for _ in effectors]
#~ compoints = [[[0.012471792486262121, 0.0015769611415203033, 0.8127583093263778]],[[0.012471792486262121, 0.0015769611415203033, 0.8127583093263778]]]
points = [ {} for _ in effectors]
for i, eff in enumerate(effectors):
for j, otherEff in enumerate(effectors):
if i != j:
points[i][otherEff] = []
success = 0
fails = 0
from hpp.corbaserver.rbprm.rbprmstate import State, StateHelper
from scipy.spatial import ConvexHull
from constants_and_tools import hull_to_obj
from scipy.optimize import linprog
#static eq is com is convex combination of pos (projected)
def staticEq(positions, com):
sizeX = len(positions)
E = zeros((3,sizeX))
for i, pos in enumerate(positions):
E[:2,i] = pos[:2]
e = array([com[0], com[1], 1.])
E[2,:] = ones(sizeX)
try:
res = linprog(ones(sizeX), A_ub=None, b_ub=None, A_eq=E, b_eq=e, bounds=[(0.,1.) for _ in range(sizeX)], method='interior-point', callback=None, options={'presolve': True})
return res['success']
except:
return False
#returns true of one of the point is inside the convex hulls of the others. We do not want that
def pointInsideHull(positions):
for i, pos in enumerate(positions):
others = positions[:i] + positions[i+1:]
if staticEq(others, pos):
return True
return False
def genFlat(init = False):
q = fullBody.shootRandomConfig()
if init:
q = fullBody.referenceConfig[::]
q[0:7] = zeroConf
fullBody.setCurrentConfig(q)
#~ v(q)
positions = [fullBody.getJointPosition(foot)[:3] for foot in effectors]
s = State(fullBody, q = q, limbsIncontact = limbIds)
succ = True
for effId, pos in zip(limbIds,positions):
s, succ = StateHelper.addNewContact(s, effId, pos, [0.,0.,1.], num_max_sample = 0)
if not succ:
break
# ~ posrf = fullBody.getJointPosition(rfoot)[:3]
# ~ poslf = fullBody.getJointPosition(lfoot)[:3]
# ~ print ("limbsIds ", limbIds)
# ~ s = State(fullBody, q = q, limbsIncontact = limbIds)
# ~ s, succ = StateHelper.addNewContact(s, rLegId, posrf, [0.,0.,1.], num_max_sample = 0)
# ~ if succ:
# ~ s, succ = StateHelper.addNewContact(s, lLegId, poslf, [0.,0.,1.], num_max_sample = 0)
if succ:
# ~ succ = fullBody.isConfigValid(q)[0] and norm (array(posrf[:2]) - array(poslf[:2]) ) >= 0.3
succ = fullBody.isConfigValid(q)[0]
#assert that in static equilibrium
if succ:
fullBody.setCurrentConfig(q)
succ = staticEq(positions, fullBody.getCenterOfMass())
if not succ:
v(q)
if succ:
succ = not pointInsideHull(positions)
if not succ:
print ("************* contacts crossing", not succ)
v(q)
#~ if succ and norm (array(posrf[:2]) - array(poslf[:2]) ) <= 0.1:
# ~ if succ and norm (array(posrf) - array(poslf) ) <= 0.1:
v(s.q())
return s.q(), succ, s, positions
def printFootPositionRelativeToOther(nbConfigs):
for i in range(0, nbConfigs):
if i > 0 and not i % IT_DISPLAY_PROGRESS:
print(int((i * 100) / nbConfigs), " % done")
q, succ, s, pos = genFlat(i==0)
if succ:
global success
success += 1
addCom = True
for j, effectorName in enumerate(effectors):
for otheridx, (oeffectorName, limbId) in enumerate(zip(effectors,limbIds)):
if otheridx != j:
fullBody.setCurrentConfig(q)
pos_other = fullBody.getJointPosition(oeffectorName)
pos = fullBody.getJointPosition(effectorName)
p = array(pos_other[:3]) - array(pos[:3]).tolist()
# ~ qtr = q[:]
# ~ qtr[:3] = [qtr[0] - pos_other[0], qtr[1] - pos_other[1], qtr[2] - pos_other[2]]
# ~ fullBody.setCurrentConfig(qtr)
# ~ qEffector = fullBody.getJointPosition(effectorName)
# check current joint pos is now zero
# ~ q0 = Quaternion(qEffector[6], qEffector[3], qEffector[4], qEffector[5])
# ~ rot = q0.matrix() # compute rotation matrix world -> local
# ~ p = qEffector[0:3] # (0,0,0) coordinate expressed in effector fram
# ~ rm = np.zeros((4, 4))
# ~ for k in range(0, 3):
# ~ for l in range(0, 3):
# ~ rm[k, l] = rot[k, l]
# ~ for m in range(0, 3):
# ~ rm[m, 3] = qEffector[m]
# ~ rm[3, 3] = 1
# ~ invrm = np.linalg.inv(rm)
# ~ p = invrm.dot([0, 0, 0., 1])
if (MAX_DIST_BETWEEN_FEET_Z > abs(p[2])):
if (MIN_DIST_BETWEEN_FEET_Y <= abs(p[1])):
if (MIN_DIST_BETWEEN_FEET_X <= abs(p[0])): #this is not what we want to do in theory but it works well in fact
points[j][oeffectorName].append(p[:3])
else:
addCom = False
else:
addCom = False
else:
print ('rejecting ',effectorName, ' ', oeffectorName , p, abs(p[2]))
# ~ print ('pos_other', pos_other)
# ~ print ('old_pos', old_pos)
addCom = False
v(q)
# ~ if (j == 0 and p[1] > MIN_DIST_BETWEEN_FEET_Y and abs(p[0]) < MAX_DIST_BETWEEN_FEET_X):
# ~ points[j].append(p[:3])
# ~ elif (j == 1 and p[1] < -MIN_DIST_BETWEEN_FEET_Y and abs(p[0]) < MAX_DIST_BETWEEN_FEET_X):
# ~ points[j].append(p[:3])
# ~ else:
# ~ addCom =
# now compute coms
fullBody.setCurrentConfig(q)
com = array(fullBody.getCenterOfMass())
print ('com ', com)
# ~ for x in range(0, 3):
# ~ q[x] = -com[x]
for j, effectorName in enumerate(effectors):
pos = fullBody.getJointPosition(effectorName)
rp = array(com) - array(pos[:3]).tolist()
# ~ qEffector = fullBody.getJointPosition(effectorName)
# ~ q0 = Quaternion(qEffector[6], qEffector[3], qEffector[4], qEffector[5])
# ~ rot = q0.matrix() # compute rotation matrix world -> local
# ~ p = qEffector[0:3] # (0,0,0) coordinate expressed in effector fram
# ~ rm = np.zeros((4, 4))
# ~ for k in range(0, 3):
# ~ for l in range(0, 3):
# ~ rm[k, l] = rot[k, l]
# ~ for m in range(0, 3):
# ~ rm[m, 3] = qEffector[m]
# ~ rm[3, 3] = 1
# ~ invrm = np.linalg.inv(rm)
# ~ p = invrm.dot([0, 0, 0, 1])
# ~ # add offset
# ~ rp = array(p[:3] - offsets[j]).tolist()
if (rp[2] < MIN_HEIGHT_COM):
addCom = False
print ("reject min heught")
if addCom:
compoints[j].append(rp)
# ~ if j == 1:
# ~ if rp[1] < MARGIN_FEET_SIDE:
# ~ compoints[j].append(rp)
# ~ else:
# ~ if rp[1] > -MARGIN_FEET_SIDE:
# ~ compoints[j].append(rp)
else:
global fails
fails += 1
# print(fullBody.isConfigValid(q)[1])
# for j in range(0,len(limbIds)):
# f1=open('./'+str(limbIds[j])+'_com.erom', 'w+')
# for p in points[j]:
# f1.write(str(p[0]) + "," + str(p[1]) + "," + str(p[2]) + "\n")
# f1.close()
s = State(fullBody, q = fullBody.referenceConfig, limbsIncontact = [fullBody.limbs_names[0]])
v(s.q())
#~ printRootPosition(rLegId, rfoot, nbSamples)
#~ printRootPosition(lLegId, lfoot, nbSamples)
#~ printRootPosition(rarmId, rHand, nbSamples)
#~ printRootPosition(larmId, lHand, nbSamples)
printFootPositionRelativeToOther(6000)
print ("successes ", success )
print ("fails ", fails )
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from plot_polytopes import plot_hull
# ~ for effector, comData, pointsData in zip(effectors, compoints, points):
# ~ for effector, limbId, comData, pointsData in zip(effectors[:1],limbIds[1:], compoints[:1], points[:1]):
for effector, limbId, comData, pointsData in zip(effectors,limbIds, compoints, points):
#comData_array = np.array(comData)
#print("com data shape : ", comData_array.shape)
hcom = ConvexHull(comData)
hull_to_obj(hcom,comData,"COM_constraints_in_"+str(limbId)+"_effector_frame_quasi_static.obj")
fig = plt.figure()
fig.suptitle("COM_constraints_in_"+str(limbId)+"_effector_frame_quasi_static.obj", fontsize=16)
plot_hull(hcom, comData, array(comData), color = "r", plot = False, fig = fig, ax = None)
fig = plt.figure()
fig.suptitle(str(limbId), fontsize=16)
# ~ axes = [221,222,223,224]
ax = None
# ~ for (oEffector, pts), axId in zip(pointsData.items(), axes):
for (oEffector, pts) in pointsData.items():
# ~ ax = fig.add_subplot(axId, projection="3d")
hpts = ConvexHull(pts)
hull_to_obj(hpts,pts,str(oEffector)+"_constraints_in_" +str(limbId)+".obj")
print ("ax ", ax)
ax = plot_hull(hpts, pts, array(pts), color = "b", plot = False, fig = fig, ax = ax)
print("effector ", limbId, )
print("oEffector ", oEffector, )
plt.show(block = False)
# ~ hcomRF = ConvexHull(compoints[0])
# ~ hcomLF = ConvexHull(compoints[1])
# ~ hull_to_obj(hcomRF,compoints[0],"anymal_COM_constraints_in_RF_effector_frame.obj")
# ~ hull_to_obj(hcomLF,compoints[1],"anymal_COM_constraints_in_LF_effector_frame.obj")
# ~ hptsRF = ConvexHull(points[0])
# ~ hptsLF = ConvexHull(points[1])
# ~ hull_to_obj(hptsRF,points[0],"anymal_LF_constraints_in_RF.obj")
# ~ hull_to_obj(hptsLF,points[1],"anymal_RF_constraints_in_LF.obj")
# ~ for k in range(2):
# ~ hcom = ConvexHull(compoints[k])
# ~ plot_hull(hcom, compoints[k], array(compoints[k]))
# ~ hpts = ConvexHull(points[k])
# ~ plot_hull(hpts, points[k], array(points[k]), color = "b", plot = k == 1 and True)
| 41.643312 | 180 | 0.556057 |
79446fdf94802002f40fe1a1d80b8e061d04c39b | 6,714 | py | Python | binding/python/porcupine.py | shivasiddharth/porcupine | f637350a1e7c1a4d060f596144a15ab8a6960edb | [
"Apache-2.0"
] | null | null | null | binding/python/porcupine.py | shivasiddharth/porcupine | f637350a1e7c1a4d060f596144a15ab8a6960edb | [
"Apache-2.0"
] | null | null | null | binding/python/porcupine.py | shivasiddharth/porcupine | f637350a1e7c1a4d060f596144a15ab8a6960edb | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018-2021 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import os
from ctypes import *
from enum import Enum
class Porcupine(object):
"""
Python binding for Porcupine wake word engine. It detects utterances of given keywords within an incoming stream of
audio in real-time. It processes incoming audio in consecutive frames and for each frame emits the detection result.
The number of samples per frame can be attained by calling `.frame_length`. The incoming audio needs to have a
sample rate equal to `.sample_rate` and be 16-bit linearly-encoded. Porcupine operates on single-channel audio.
"""
class PicovoiceStatuses(Enum):
SUCCESS = 0
OUT_OF_MEMORY = 1
IO_ERROR = 2
INVALID_ARGUMENT = 3
STOP_ITERATION = 4
KEY_ERROR = 5
INVALID_STATE = 6
RUNTIME_ERROR = 7
ACTIVATION_ERROR = 8
ACTIVATION_LIMIT_REACHED = 9
ACTIVATION_THROTTLED = 10
ACTIVATION_REFUSED = 11
_PICOVOICE_STATUS_TO_EXCEPTION = {
PicovoiceStatuses.OUT_OF_MEMORY: MemoryError,
PicovoiceStatuses.IO_ERROR: IOError,
PicovoiceStatuses.INVALID_ARGUMENT: ValueError,
PicovoiceStatuses.STOP_ITERATION: StopIteration,
PicovoiceStatuses.KEY_ERROR: KeyError,
PicovoiceStatuses.INVALID_STATE: ValueError,
PicovoiceStatuses.RUNTIME_ERROR: RuntimeError,
PicovoiceStatuses.ACTIVATION_ERROR: RuntimeError,
PicovoiceStatuses.ACTIVATION_LIMIT_REACHED: PermissionError,
PicovoiceStatuses.ACTIVATION_THROTTLED: PermissionError,
PicovoiceStatuses.ACTIVATION_REFUSED: PermissionError
}
class CPorcupine(Structure):
pass
def __init__(self, access_key, library_path, model_path, keyword_paths, sensitivities):
"""
Constructor.
:param access_key: AccessKey obtained from Picovoice Console.
:param library_path: Absolute path to Porcupine's dynamic library.
:param model_path: Absolute path to the file containing model parameters.
:param keyword_paths: Absolute paths to keyword model files.
:param sensitivities: Sensitivities for detecting keywords. Each value should be a number within [0, 1]. A
higher sensitivity results in fewer misses at the cost of increasing the false alarm rate.
"""
if not access_key:
raise ValueError("access_key should be a non-empty string.")
if not os.path.exists(library_path):
raise IOError("Couldn't find Porcupine's dynamic library at '%s'." % library_path)
library = cdll.LoadLibrary(library_path)
if not os.path.exists(model_path):
raise IOError("Couldn't find model file at '%s'." % model_path)
if len(keyword_paths) != len(sensitivities):
raise ValueError("Number of keywords does not match the number of sensitivities.")
for x in keyword_paths:
if not os.path.exists(os.path.expanduser(x)):
raise IOError("Couldn't find keyword file at '%s'." % x)
for x in sensitivities:
if not (0 <= x <= 1):
raise ValueError('A sensitivity value should be within [0, 1].')
init_func = library.pv_porcupine_init
init_func.argtypes = [
c_char_p,
c_char_p,
c_int,
POINTER(c_char_p),
POINTER(c_float),
POINTER(POINTER(self.CPorcupine))]
init_func.restype = self.PicovoiceStatuses
self._handle = POINTER(self.CPorcupine)()
status = init_func(
access_key.encode('utf-8'),
model_path.encode('utf-8'),
len(keyword_paths),
(c_char_p * len(keyword_paths))(*[os.path.expanduser(x).encode('utf-8') for x in keyword_paths]),
(c_float * len(keyword_paths))(*sensitivities),
byref(self._handle))
if status is not self.PicovoiceStatuses.SUCCESS:
raise self._PICOVOICE_STATUS_TO_EXCEPTION[status]()
self._delete_func = library.pv_porcupine_delete
self._delete_func.argtypes = [POINTER(self.CPorcupine)]
self._delete_func.restype = None
self.process_func = library.pv_porcupine_process
self.process_func.argtypes = [POINTER(self.CPorcupine), POINTER(c_short), POINTER(c_int)]
self.process_func.restype = self.PicovoiceStatuses
version_func = library.pv_porcupine_version
version_func.argtypes = []
version_func.restype = c_char_p
self._version = version_func().decode('utf-8')
self._frame_length = library.pv_porcupine_frame_length()
self._sample_rate = library.pv_sample_rate()
def delete(self):
"""Releases resources acquired by Porcupine."""
self._delete_func(self._handle)
def process(self, pcm):
"""
Processes a frame of the incoming audio stream and emits the detection result.
:param pcm: A frame of audio samples. The number of samples per frame can be attained by calling
`.frame_length`. The incoming audio needs to have a sample rate equal to `.sample_rate` and be 16-bit
linearly-encoded. Porcupine operates on single-channel audio.
:return: Index of observed keyword at the end of the current frame. Indexing is 0-based and matches the ordering
of keyword models provided to the constructor. If no keyword is detected then it returns -1.
"""
if len(pcm) != self.frame_length:
raise ValueError("Invalid frame length. expected %d but received %d" % (self.frame_length, len(pcm)))
result = c_int()
status = self.process_func(self._handle, (c_short * len(pcm))(*pcm), byref(result))
if status is not self.PicovoiceStatuses.SUCCESS:
raise self._PICOVOICE_STATUS_TO_EXCEPTION[status]()
return result.value
@property
def version(self):
"""Version"""
return self._version
@property
def frame_length(self):
"""Number of audio samples per frame."""
return self._frame_length
@property
def sample_rate(self):
"""Audio sample rate accepted by Picovoice."""
return self._sample_rate
| 39.034884 | 120 | 0.674561 |
79446ffddb6e950971f67866a17fda2813924fd4 | 779 | py | Python | ABC073/ABC073d.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC073/ABC073d.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC073/ABC073d.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | # ABC073d
import sys
import itertools
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n, m, _ = map(int, input().split())
d = [[float("inf") for i in range(n)] for i in range(n)]
for i in range(n):
d[i][i] = 0 # 自身のところに行くコストは0
r = list(map(int, input().split()))
def warshall_floyd(d):
#d[i][j]: iからjへの最短距離
for k in range(n):
for i in range(n):
for j in range(n):
d[i][j] = min(d[i][j], d[i][k] + d[k][j])
return d
for _ in range(m):
a, b, c = map(int, input().split())
d[a-1][b-1] = c
d[b-1][a-1] = c
warshall_floyd(d)
ans = 10**9
for i in itertools.permutations(r, len(r)):
t = 0
for j in range(1, len(i)):
t += d[i[j]-1][i[j-1]-1]
#print(i, t)
ans = min(ans, t)
print(ans)
| 21.054054 | 57 | 0.532734 |
79447126d15ed901a59fc8799acbc5f8d99bb736 | 31,731 | py | Python | lib/datasets/imdb.py | fwtan/who_where | 8bb7081862d49bb288c70c0245ad2bf95b9f3732 | [
"Unlicense",
"MIT"
] | 13 | 2018-04-16T05:00:38.000Z | 2022-03-25T03:16:33.000Z | lib/datasets/imdb.py | fwtan/who_where | 8bb7081862d49bb288c70c0245ad2bf95b9f3732 | [
"Unlicense",
"MIT"
] | 4 | 2018-07-30T02:38:19.000Z | 2022-03-30T23:51:08.000Z | lib/datasets/imdb.py | fwtan/who_where | 8bb7081862d49bb288c70c0245ad2bf95b9f3732 | [
"Unlicense",
"MIT"
] | 7 | 2018-03-27T14:09:04.000Z | 2021-04-19T11:51:59.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# Modified by Fuwen Tan @ U.Va (2017)
# --------------------------------------------------------
import os, sys, math
import cv2, cPickle, copy
import os.path as osp
import numpy as np
import scipy.sparse
from config import cfg
import matplotlib.pyplot as plt
import datasets.ds_utils as ds_utils
class imdb(object):
"""Image database."""
def __init__(self, name):
self._name = name
self._num_classes = 0
self._classes = []
self._image_index = []
self._roidb = None
self._objdb = None
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@classes.setter
def classes(self, val):
self._classes = val
@property
def image_index(self):
return self._image_index
@property
def roidb(self):
return self._roidb
@property
def objdb(self):
return self._objdb
@roidb.setter
def roidb(self, val):
self._roidb = val
@objdb.setter
def objdb(self, val):
self._objdb = val
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def append_flipped_images(self):
num_samples = len(self.roidb)
for i in range(num_samples):
entry = copy.deepcopy(self.roidb[i])
boxes = entry['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = entry['width'] - oldx2 - 1
boxes[:, 2] = entry['width'] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry['boxes'] = boxes
entry['flipped'] = True
# if entry.get('all_boxes', None):
# all_boxes = entry['all_boxes'].copy()
# oldx1 = all_boxes[:, 0].copy()
# oldx2 = all_boxes[:, 2].copy()
# all_boxes[:, 0] = entry['width'] - oldx2 - 1
# all_boxes[:, 2] = entry['width'] - oldx1 - 1
# assert (all_boxes[:, 2] >= all_boxes[:, 0]).all()
# entry['all_boxes'] = all_boxes
# all_boxes = entry['all_boxes'].copy()
# oldx1 = all_boxes[:, 0].copy()
# oldx2 = all_boxes[:, 2].copy()
# all_boxes[:, 0] = entry['width'] - oldx2 - 1
# all_boxes[:, 2] = entry['width'] - oldx1 - 1
# assert (all_boxes[:, 2] >= all_boxes[:, 0]).all()
# entry['all_boxes'] = all_boxes
self.roidb.append(entry)
#self._image_index = self._image_index * 2
def permute_roidb_indices(self):
self.roidb_perm = np.random.permutation(range(len(self.roidb)))
self.roidb_cur = 0
def permute_objdb_indices(self):
self.objdb_perm = np.random.permutation(range(len(self.objdb)))
self.objdb_cur = 0
def get_max_sequence_length(self):
self.maxSequenceLength = np.amax(np.array([r['boxes'].shape[0] for r in self.roidb]))
return self.maxSequenceLength
####################################################################
# Visualization
def draw_roidb_bboxes(self, output_dir, roidb=None):
ds_utils.maybe_create(output_dir)
ds_utils.maybe_create(osp.join(output_dir, 'roidb_boxes'))
if roidb is None:
roidb = self._roidb
for i in xrange(len(roidb)):
roi = roidb[i]
im_path = roi['image']
bboxes = roi['boxes'].copy()
clses = roi['clses']
# image data, flip if necessary
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
if roi['flipped']:
# print('flipped %d'%i)
img = cv2.flip(img, 1)
img, offset_x, offset_y = \
ds_utils.create_squared_image(img, cfg.PIXEL_MEANS)
bboxes[:, 0] += offset_x; bboxes[:, 1] += offset_y
bboxes[:, 2] += offset_x; bboxes[:, 3] += offset_y
fontScale = 0.0007 * math.sqrt(2 * img.shape[0] * img.shape[0])
for j in xrange(bboxes.shape[0]):
bb = bboxes[j, :].astype(np.int16)
cls = self.classes[clses[j]]
cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]),
(0, 255, 0), 1)
cv2.putText(img, '{:}_{:}'.format(j, cls),
(bb[0], bb[1] - 2),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (0, 0, 255), 1)
output_path = osp.join(output_dir, 'roidb_bboxes', osp.basename(im_path))
cv2.imwrite(output_path, img)
print i
def draw_objdb_bboxes(self, output_dir, objdb=None):
ds_utils.maybe_create(output_dir)
ds_utils.maybe_create(osp.join(output_dir, 'objdb_boxes'))
if objdb is None:
objdb = self._objdb
for i in xrange(len(objdb)):
obj = objdb[i]
im_path = obj['image']
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
box = obj['box']
cls = obj['cls']
aid = obj['obj_id']
if obj['flipped']:
# print('flipped %d'%i)
img = cv2.flip(img, 1)
img, offset_x, offset_y = \
ds_utils.create_squared_image(img, cfg.PIXEL_MEANS)
box[0] += offset_x; box[1] += offset_y
box[2] += offset_x; box[3] += offset_y
bb = box.astype(np.int)
fontScale = 0.0007 * math.sqrt(2 * img.shape[0] * img.shape[0])
cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]),
(0, 255, 0), 1)
cv2.putText(img, '{:}_{:}'.format(j, cls),
(bb[0], bb[1] - 2), cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (0, 0, 255), 1)
im_name, im_ext = osp.splitext(osp.basename(im_path))
output_path = osp.join(output_dir, 'objdb_boxes', im_name+'_'+str(aid).zfill(12)+im_ext)
cv2.imwrite(output_path, img)
print i
def draw_images(self, output_dir, roidb=None):
ds_utils.maybe_create(output_dir)
output_dir = osp.join(output_dir, 'images')
ds_utils.maybe_create(output_dir)
if roidb is None:
roidb = self._roidb
for i in xrange(len(roidb)):
roi = roidb[i]
im_path = roi['image']
# image data, flip if necessary
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
# img, offset_x, offset_y = \
# ds_utils.create_squared_image(img, cfg.PIXEL_MEANS)
# img = cv2.resize(img, (512, 512))
output_path = osp.join(output_dir, osp.basename(im_path))
cv2.imwrite(output_path, img)
print i, osp.basename(im_path)
####################################################################
# Minibatch
def get_rnn_minibatch(self, max_seq_len, square=True, vis=False):
#######################################################################
# rename the config parameters to make the codes look clear
batch_size = cfg.TRAIN.BATCH_SIZE
resolution = cfg.RESOLUTION
grid_shape = cfg.GRID_SHAPE
#######################################################################
# indices of the minibatch
if self.roidb_cur + batch_size >= len(self.roidb):
self.permute_roidb_indices()
db_inds = self.roidb_perm[self.roidb_cur : self.roidb_cur + batch_size]
self.roidb_cur += batch_size
#######################################################################
#######################################################################
# to be returned
objects = []; centers = []; ratios = []; masks = []
# normalized xywh representation
bboxes = np.zeros((batch_size, max_seq_len, 4), dtype=np.float32)
# grid box offset
deltas = np.zeros((batch_size, max_seq_len, 4), dtype=np.float32)
images = np.zeros((batch_size, resolution[0], \
resolution[1], resolution[2]), dtype=np.float32)
#######################################################################
for i in xrange(batch_size):
rois = self.roidb[db_inds[i]]
im_path = rois['image']
width = rois['width']
height = rois['height']
gt_boxes = rois['boxes'].copy()
gt_cats = rois['clses'].copy()
areas = rois['seg_areas']
# number of instances should not exceed max_seq_len
num_instances = min(gt_boxes.shape[0], max_seq_len)
# image data, flip if necessary
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
if rois['flipped']:
# print('flipped %d'%i)
img = cv2.flip(img, 1)
# sort the objects in the sequence based on their areas
order = np.argsort(areas)[::-1]
gt_boxes = gt_boxes[order, :]
gt_cats = gt_cats[order]
areas = areas[order]
# print areas
# [x1, y1, x2, y2] to [x, y, w, h]
gt_boxes = ds_utils.xyxy_to_xywh(gt_boxes)
# if we need square images
if square:
img, offset_x, offset_y = \
ds_utils.create_squared_image(img, cfg.PIXEL_MEANS)
gt_boxes[:,0] += offset_x
gt_boxes[:,1] += offset_y
width = height = img.shape[0]
# normalize
gt_boxes = ds_utils.normalize_xywh(gt_boxes, width, height)
# truncate the sequences
gt_boxes = gt_boxes[:num_instances, :]
# discreted output positions
grid_indices = ds_utils.xywh_to_index(gt_boxes, \
grid_shape[1], grid_shape[0])
# deltas between grid boxes and ground truth boxes
grid_boxes = ds_utils.index_to_xywh(grid_indices, \
grid_shape[1], grid_shape[0])
grid_deltas = ds_utils.bbox_transform(grid_boxes, gt_boxes)
# images of the same shape
images[i, :, :, :] = cv2.resize(img, (resolution[1], resolution[0]))
# use the last 'num_instances' objects
bboxes[i, :num_instances, :] = np.expand_dims(gt_boxes, axis=0)
# grid offsets
deltas[i, :num_instances, :] = np.expand_dims(grid_deltas, axis=0)
# object indicators
objects.append(gt_cats[:num_instances].tolist())
# masks for loss function
masks.append(np.ones((num_instances, )).tolist())
# grid centers and sizes
centers.append(grid_indices[:, 0].tolist())
ratios.append(grid_indices[:, 1].tolist())
# padding
objects = pad_sequences(objects, maxlen=max_seq_len,
padding='post', truncating='post', value=0.)
centers = pad_sequences(centers, maxlen=max_seq_len,
padding='post', truncating='post', value=0.)
ratios = pad_sequences(ratios, maxlen=max_seq_len,
padding='post', truncating='post', value=0.)
masks = pad_sequences(masks, maxlen=max_seq_len,
padding='post', truncating='post', value=0.)
if vis:
output_dir = osp.abspath(osp.join(cfg.ROOT_DIR, 'output', \
cfg.EXP_DIR, self.name, \
'rnn_minibatch'))
if not osp.exists(output_dir):
os.makedirs(output_dir)
for i in xrange(batch_size):
rois = self.roidb[db_inds[i]]
im_name, im_ext = osp.splitext(osp.basename(rois['image']))
msk = masks[i, :]
# ground truth boxes
ibb = bboxes[i, :, :].copy()
iid = objects[i, :].copy()
iim = images[i, :, :, :].copy()
# grid bboxes
grid_indices = np.vstack((centers[i,:], ratios[i,:])).transpose()
gbb = ds_utils.index_to_xywh(grid_indices, grid_shape[1], grid_shape[0])
# regressed bboxes
rbb = ds_utils.bbox_transform_inv(gbb, deltas[i,:,:])
# Denormalize
ibb = ds_utils.denormalize_xywh(ibb, resolution[1], resolution[0])
gbb = ds_utils.denormalize_xywh(gbb, resolution[1], resolution[0])
rbb = ds_utils.denormalize_xywh(rbb, resolution[1], resolution[0])
ibb = ds_utils.xywh_to_xyxy(ibb, resolution[1], resolution[0])
gbb = ds_utils.xywh_to_xyxy(gbb, resolution[1], resolution[0])
rbb = ds_utils.xywh_to_xyxy(rbb, resolution[1], resolution[0])
# fontScale = 0.0007 * math.sqrt(float(\
# resolution[0]*resolution[0]+resolution[1]*resolution[1]))
for j in xrange(ibb.shape[0]):
if msk[j] == 0:
break
id = iid[j]
cls = self.classes[id]
# ground truth boxes
bb = ibb[j, :].astype(np.int16)
cv2.rectangle(iim, (bb[0], bb[1]), (bb[2], bb[3]), \
(0, 255, 0), 2)
# grid boxes
bb = gbb[j, :].astype(np.int16)
cv2.rectangle(iim, (bb[0], bb[1]), (bb[2], bb[3]), \
(255, 0, 0), 1)
# regressed boxes
bb = rbb[j, :].astype(np.int16)
cv2.rectangle(iim, (bb[0], bb[1]), (bb[2], bb[3]), \
(0, 0, 255), 1)
# cv2.putText(iim, '{:}_{:}'.format(j, cls), \
# (bb[0], bb[1] - 2), \
# cv2.FONT_HERSHEY_SIMPLEX, \
# fontScale, (0, 0, 255), 1)
output_path = osp.join(output_dir, '%06d_'%i+im_name+'.jpg')
cv2.imwrite(output_path, iim)
return images, objects, bboxes, deltas, centers, ratios, masks
def get_obj_minibatch(self):
#######################################################################
# rename the config parameters to make the codes look clear
batch_size = cfg.TRAIN.BATCH_SIZE
#######################################################################
# indices of the minibatch
if self.objdb_cur + batch_size >= len(self.objdb):
self.permute_objdb_indices()
db_inds = self.objdb_perm[self.objdb_cur : self.objdb_cur + batch_size]
self.objdb_cur += batch_size
#######################################################################
minibatch = [self.objdb[x] for x in db_inds]
return minibatch
def get_minibatch(self, square=True):
# outputs: resized images, normalized xywhs, grids
batch_size = cfg.TRAIN.BATCH_SIZE
grid_shape = cfg.GRID_SHAPE
resolution = cfg.RESOLUTION
#######################################################################
# indices of the minibatch
if self.objdb_cur + batch_size >= len(self.objdb):
self.permute_objdb_indices()
db_inds = self.objdb_perm[self.objdb_cur : self.objdb_cur + batch_size]
self.objdb_cur += batch_size
#######################################################################
images = np.zeros((batch_size, resolution[0], \
resolution[1], resolution[2]), dtype=np.float32)
grids = np.zeros((batch_size, 2))
boxes = np.zeros((batch_size, 4))
for i in range(batch_size):
obj = self.objdb[db_inds[i]]
im_path = obj['background']
width = obj['width']
height = obj['height']
box = obj['box'].copy()
# image data, flip if necessary
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
if obj['flipped']:
# print('flipped %d'%i)
img = cv2.flip(img, 1)
xywh = ds_utils.xyxy_to_xywh(box.reshape((1,4))).squeeze()
# if we need square images
if square:
img, offset_x, offset_y = \
ds_utils.create_squared_image(img, cfg.PIXEL_MEANS)
xywh[0] += offset_x
xywh[1] += offset_y
width = height = img.shape[0]
nxywh = ds_utils.normalize_xywh(xywh.reshape((1,4)), width, height).squeeze()
# discreted output positions
grid = ds_utils.boxes_to_indices(nxywh.reshape((1,4)), grid_shape).squeeze()
# images of the same shape
images[i, :, :, :] = cv2.resize(img, (resolution[1], resolution[0]))
grids[i, :] = grid
boxes[i, :] = nxywh
return images, boxes, grids
def get_background_minibatch(self, square=True):
# outputs: resized images, layouts, normalized xywhs, grids
batch_size = cfg.TRAIN.BATCH_SIZE
resolution = cfg.PREDICT_RESOLUTION
grid_shape = cfg.GRID_SHAPE
#######################################################################
# indices of the minibatch
if self.objdb_cur + batch_size >= len(self.objdb):
self.permute_objdb_indices()
db_inds = self.objdb_perm[self.objdb_cur : self.objdb_cur + batch_size]
self.objdb_cur += batch_size
#######################################################################
images = np.zeros((batch_size, resolution[0], \
resolution[1], resolution[2]), dtype=np.float32)
layouts = np.zeros((batch_size, resolution[0], \
resolution[1], resolution[2]), dtype=np.float32)
grids = np.zeros((batch_size, 2))
boxes = np.zeros((batch_size, 4))
for i in range(batch_size):
obj = self.objdb[db_inds[i]]
im_path = obj['bg_image']
lo_path = obj['bg_layout']
width = obj['width']
height = obj['height']
box = obj['box'].copy()
# image data, flip if necessary
im = cv2.imread(im_path, cv2.IMREAD_COLOR)
lo = cv2.imread(lo_path, cv2.IMREAD_COLOR)
if obj['flipped']:
# print('flipped %d'%i)
im = cv2.flip(im, 1)
lo = cv2.flip(lo, 1)
xywh = ds_utils.xyxy_to_xywh(box.reshape((1,4))).squeeze()
# if we need square images
if square:
im, ox, oy = \
ds_utils.create_squared_image(im, cfg.PIXEL_MEANS)
xywh[0] += ox
xywh[1] += oy
width = height = im.shape[0]
lo, ox, oy = \
ds_utils.create_squared_image(lo, cfg.PIXEL_MEANS)
nxywh = ds_utils.normalize_xywh(xywh.reshape((1,4)), width, height).squeeze()
# discreted output positions
grid = ds_utils.boxes_to_indices(nxywh.reshape((1,4)), grid_shape).squeeze()
# images of the same shape
images[i] = cv2.resize(im, (resolution[1], resolution[0]))
layouts[i] = cv2.resize(lo, (resolution[1], resolution[0]))
grids[i, :] = grid
boxes[i, :] = nxywh
# print im_path, grid
return images, layouts, boxes, grids
def get_scene_minibatch(self, square=True):
# outputs: resized images, layouts, segmentations, normalized xywhs, grids
batch_size = cfg.TRAIN.BATCH_SIZE
resolution = cfg.PREDICT_RESOLUTION
grid_shape = cfg.GRID_SHAPE
num_clses = self.num_classes-1
#######################################################################
# indices of the minibatch
if self.objdb_cur + batch_size >= len(self.objdb):
self.permute_objdb_indices()
db_inds = self.objdb_perm[self.objdb_cur : self.objdb_cur + batch_size]
self.objdb_cur += batch_size
#######################################################################
images = np.zeros((batch_size, resolution[0], \
resolution[1], resolution[2]), dtype=np.float32)
scenes = np.zeros((batch_size, resolution[0], \
resolution[1], num_clses), dtype=np.float32)
segs = np.zeros((batch_size, resolution[0], \
resolution[1], resolution[2]), dtype=np.float32)
grids = np.zeros((batch_size, 2))
boxes = np.zeros((batch_size, 4))
for i in range(batch_size):
obj = self.objdb[db_inds[i]]
im_path = obj['background']
seg_path = obj['out_seg']
width = obj['width']
height = obj['height']
box = obj['box'].copy()
all_boxes = obj['all_boxes'].copy().reshape((-1,4)).astype(np.int)
all_clses = obj['all_clses'].copy().flatten()
# image data, flip if necessary
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
seg = cv2.imread(seg_path, cv2.IMREAD_COLOR)
if obj['flipped']:
# print('flipped %d'%i)
img = cv2.flip(img, 1)
seg = cv2.flip(seg, 1)
xywh = ds_utils.xyxy_to_xywh(box.reshape((1,4))).squeeze()
ex_box = box.copy().flatten().astype(np.int)
# if we need square images
if square:
img, offset_x, offset_y = \
ds_utils.create_squared_image(img, cfg.PIXEL_MEANS)
xywh[0] += offset_x
xywh[1] += offset_y
ex_box[0] += offset_x
ex_box[1] += offset_y
ex_box[2] += offset_x
ex_box[3] += offset_y
all_boxes[:, 0] += offset_x
all_boxes[:, 1] += offset_y
all_boxes[:, 2] += offset_x
all_boxes[:, 3] += offset_y
width = height = img.shape[0]
seg, offset_x, offset_y = \
ds_utils.create_squared_image(seg, cfg.PIXEL_MEANS)
nxywh = ds_utils.normalize_xywh(xywh.reshape((1,4)), width, height).squeeze()
# discreted output positions
grid = ds_utils.boxes_to_indices(nxywh.reshape((1,4)), grid_shape).squeeze()
# images of the same shape
images[i] = cv2.resize(img, (resolution[1], resolution[0]))
segs[i] = cv2.resize(seg, (resolution[1], resolution[0]))
factor = float(resolution[0])/width
all_boxes = (factor * all_boxes).astype(np.int)
ex_box = (factor * ex_box).astype(np.int)
scenes[i] = ds_utils.create_scenes(resolution[1], resolution[0], all_boxes, all_clses, ex_box=ex_box, n_cls=num_clses)
grids[i, :] = grid
boxes[i, :] = nxywh
return images, scenes, segs, boxes, grids
####################################################################
# Statistic
def draw_binary_correlation_stat_graph(self, output_dir, roidb=None):
# Create the output directory if necessary
if not osp.exists(output_dir):
os.makedirs(output_dir)
if not osp.exists(osp.join(output_dir, 'images')):
os.makedirs(osp.join(output_dir, 'images'))
# Cache files
present_cache_file = osp.join(self.cache_path, self.name + '_present_stats.pkl')
correlation_cache_file = osp.join(self.cache_path, self.name + '_correlation_stats.pkl')
# Load cache files if they exist
if osp.exists(present_cache_file) and osp.exists(correlation_cache_file):
with open(present_cache_file, 'rb') as fid:
present_stats = cPickle.load(fid)
print '{} present stats loaded from {}'.format(self.name, present_cache_file)
with open(correlation_cache_file, 'rb') as fid:
correlation_stats = cPickle.load(fid)
print '{} correlation stats loaded from {}'.format(self.name, correlation_cache_file)
# Otherwise, create them
else:
if roidb == None:
roidb = self.roidb
num_rois = len(roidb)
# present_stats: the number of pairs
present_stats = np.zeros((self.num_classes, self.num_classes))
correlation_stats = [[ np.zeros((6, 0)) for j in xrange(self.num_classes) ] \
for i in xrange(self.num_classes) ]
for i in xrange(num_rois):
rois = roidb[i]
im_width = float(rois['width'])
im_height = float(rois['height'])
bboxes = rois['boxes'].copy()
classes = rois['clses']
# At least 2 objects
if bboxes.shape[0] < 2:
continue
# Assume squared images
max_dim = np.maximum(im_width, im_height)
nfactor = np.array([max_dim, max_dim, \
max_dim, max_dim]).reshape((1,4))
# Change representations from xyxy to xywh
bboxes = ds_utils.xyxy_to_xywh(bboxes)
# Normalize
bboxes = np.divide(bboxes, nfactor)
# Area
areas = np.multiply(bboxes[:, 2], bboxes[:, 3]).squeeze()
# Aspect ratio
ratios = np.divide(bboxes[:, 2], bboxes[:, 3]).squeeze()
for j in xrange(bboxes.shape[0] - 1):
cls1 = classes[j]
bbox1 = bboxes[j, :].squeeze()
for k in xrange(j + 1, bboxes.shape[0]):
cls2 = classes[k]
bbox2 = bboxes[k, :].squeeze()
offset = bbox2[:2] - bbox1[:2]
correlation21 = np.array([offset[0], offset[1],
areas[j], areas[k],
ratios[j], ratios[k]]).reshape((6,1))
correlation12 = np.array([-offset[0], -offset[1],
areas[k], areas[j],
ratios[k], ratios[j]]).reshape((6,1))
correlation_stats[cls1][cls2] = \
np.hstack((correlation_stats[cls1][cls2], correlation21))
correlation_stats[cls2][cls1] = \
np.hstack((correlation_stats[cls2][cls1], correlation12))
present_stats[cls1, cls2] += 1
present_stats[cls2, cls1] += 1
print i
with open(present_cache_file, 'wb') as fid:
cPickle.dump(present_stats, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote present stats to {}'.format(present_cache_file)
with open(correlation_cache_file, 'wb') as fid:
cPickle.dump(correlation_stats , fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote correlation stats to {}'.format(correlation_cache_file)
plt.switch_backend('agg')
for i in xrange(1, self.num_classes):
for j in xrange(1, self.num_classes):
correlation = correlation_stats[i][j]
fig = plt.figure()
plt.hist2d(correlation[0, :], correlation[1, :], 20, range=[[-1.0, 1.0], [-1.0, 1.0]])
plt.colorbar()
plt.xlim([-1.0, 1.0])
plt.ylim([-1.0, 1.0])
plt.title('offset: %s vs %s'%(self.classes[i], self.classes[j]))
plt.grid(True)
fig.savefig(os.path.join(output_dir, 'images/offset_%02d_%02d.jpg' % (i, j)), bbox_inches='tight')
plt.close(fig)
fig = plt.figure()
plt.hist2d(correlation[2, :], correlation[3, :], 20, range=[[0, 0.05], [0, 0.05]])
plt.colorbar()
plt.xlim([0, 0.05])
plt.ylim([0, 0.05])
plt.title('area: %s vs %s'%(self.classes[i], self.classes[j]))
plt.grid(True)
fig.savefig(osp.join(output_dir, 'images/area_%02d_%02d.jpg' % (i, j)), bbox_inches='tight')
plt.close(fig)
fig = plt.figure()
plt.hist2d(correlation[4, :], correlation[5, :], 20, range=[[0, 4.0], [0, 4.0]])
plt.colorbar()
plt.xlim([0, 4.0])
plt.ylim([0, 4.0])
plt.title('aspect ratio: %s vs %s'%(self.classes[i], self.classes[j]))
plt.grid(True)
fig.savefig(osp.join(output_dir, 'images/ratio_%02d_%02d.jpg' % (i, j)), bbox_inches='tight')
plt.close(fig)
im1 = cv2.resize(cv2.imread(osp.join(output_dir, 'images/offset_%02d_%02d.jpg' % (i, j))), (648, 545))
im2 = cv2.resize(cv2.imread(osp.join(output_dir, 'images/area_%02d_%02d.jpg' % (i, j))), (648, 545))
im3 = cv2.resize(cv2.imread(osp.join(output_dir, 'images/ratio_%02d_%02d.jpg' % (i, j))), (648, 545))
im = np.zeros((545, 648 * 3, 3), dtype=np.int16)
im[:, : 648, :] = im1
im[:, 648:2*648, :] = im2
im[:, 2*648:3*648, :] = im3
cv2.imwrite(osp.join(output_dir, 'images/%02d_%02d.jpg' % (i, j)), im)
print i,j
def create_binary_correlation_stat_html(self, output_dir, roidb=None):
from html import HTML
# Create the directory if necessary
if not osp.exists(output_dir):
os.makedirs(output_dir)
present_cache_file = osp.join(self.cache_path, self.name + '_present_stats.pkl')
assert os.path.exists(present_cache_file)
with open(present_cache_file, 'rb') as fid:
present_stats = cPickle.load(fid)
print '{} present stats loaded from {}'.format(self.name, present_cache_file)
config_html = HTML()
config_table = config_html.table(border='1')
for i in xrange(self.num_classes):
r = config_table.tr
if i == 0:
r.th('---')
else:
r.th('%s'%self.classes[i])
for j in xrange(1, self.num_classes):
c = r.td
if i == 0:
c.a('%s'%self.classes[j])
else:
c.a('%d'%int(present_stats[i, j]), href='images/%02d_%02d.jpg'%(i,j))
html_file = open(osp.join(output_dir, 'coco_offsets_table.html'), 'w')
print >> html_file, config_table
html_file.close()
| 39.077586 | 130 | 0.498598 |
794471b8acacaf3358bf22c39633226af4c04b33 | 4,304 | py | Python | Views/Application.py | Jack-Dane/clipboard-copy | 2853ce2540298a26a51bc0f627d9e965562dd86d | [
"MIT"
] | null | null | null | Views/Application.py | Jack-Dane/clipboard-copy | 2853ce2540298a26a51bc0f627d9e965562dd86d | [
"MIT"
] | 1 | 2021-10-04T20:54:46.000Z | 2021-10-05T21:34:18.000Z | Views/Application.py | Jack-Dane/clipboard-copy | 2853ce2540298a26a51bc0f627d9e965562dd86d | [
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import ttk
class StaticViewCreator():
@staticmethod
def initialiseView(model, clipboardQueue):
"""
Intialises the Application View
:param clipboardStack: a list of clipboard items
:param controller: the controller which is calling the view
"""
root = tk.Tk()
view = Application(model, clipboardQueue, master=root)
view.mainloop()
class Application(tk.Frame, StaticViewCreator):
def __init__(self, model, clipboardQueue, master=None):
super().__init__(master)
self.master = master
self.master.configure(background="#2b2b2b")
self.master.title("Clipboard")
self.master.resizable(width=False, height=False)
self.master.iconphoto(False, tk.PhotoImage(file="Assets/icon.png"))
self.model = model
self.clipboardStack = self.model.getData()
self.configureStyles()
self.pack()
self.configureTreeWidget()
self.configureButtons()
self.clipboardQueue = clipboardQueue
self.master.after(100, self.checkForUpdated)
def configureStyles(self):
"""
Configure style to be used
"""
self.style = ttk.Style()
self.style.configure("DarkTheme.TButton", foreground="#e3e3e3", background="#2b2b2b")
self.style.map("DarkTheme.TButton", background=[("active", "#007804")])
self.style.configure("DarkTheme.TFrame", foreground="#e3e3e3", background="#2b2b2b")
def configureButtons(self):
"""
Create and configure buttons: confirm & cancel
"""
# button button frame
self.buttonFrame = ttk.Frame(self.master, style="DarkTheme.TFrame")
self.buttonFrame.pack(padx=5, pady=5)
# confirm button
self.confirmButton = ttk.Button(self.buttonFrame, command=self.confirmClick, text="Confirm",
style="DarkTheme.TButton")
self.confirmButton.grid(row=0, column=0, padx=5)
# cancel button
self.cancelButton = ttk.Button(self.buttonFrame, command=self.cancelClick, text="Cancel",
style="DarkTheme.TButton")
self.cancelButton.grid(row=0, column=1, padx=5)
def configureTreeWidget(self):
"""
Create the TreeWidget which stores all the clipboard values
"""
treeViewColumns = ("#1",)
self.treeView = ttk.Treeview(self.master, columns=treeViewColumns, show="headings",
takefocus=False)
self.treeView.heading("#1", text="Clipboard Text")
self.attachTreeWidget()
self.treeView.pack(padx=5, pady=5)
def attachTreeWidget(self):
"""
Attach the tree widget to the clipboard list in the model
"""
self.clipboardStack = self.model.clipboardStack
for clipboard in self.model.getData():
self.treeView.insert("", tk.END, values=(clipboard,))
def confirmClick(self):
"""
Create the Copy and Close Buttons
"""
selectionValue = self.treeView.item(self.treeView.selection()[0])["values"][0]
self.model.newClipboardValue(selectionValue)
self.master.destroy()
def cancelClick(self):
"""
Close the window
"""
self.master.destroy()
def clearTreeWidget(self):
"""
Remove all the items from the treeWidget
"""
for item in self.treeView.get_children():
self.treeView.delete(item)
def addAnotherClipboard(self, copyItem):
"""
Add another clipboard item to the application while live running
"""
self.treeView.insert("", tk.END, values=(copyItem, ))
def checkForUpdated(self):
"""
If new item has been added to the clipboard queue, update the list items
"""
if not self.clipboardQueue.empty():
self.update()
self.clipboardQueue.clear()
self.master.after(100, self.checkForUpdated)
def update(self):
"""
Called when the model is updated, instead of updating incase of other CRUD methods
"""
self.clearTreeWidget()
self.attachTreeWidget()
| 32.360902 | 100 | 0.608271 |
794471c8af8a47bfb9a6d8f60b13eb24ac782737 | 536 | py | Python | pseudo_labels/voc12/extend_data.py | saramsv/CCT | 27b4fd838a174a3c0fca582aa163e5bd426b055a | [
"MIT"
] | null | null | null | pseudo_labels/voc12/extend_data.py | saramsv/CCT | 27b4fd838a174a3c0fca582aa163e5bd426b055a | [
"MIT"
] | null | null | null | pseudo_labels/voc12/extend_data.py | saramsv/CCT | 27b4fd838a174a3c0fca582aa163e5bd426b055a | [
"MIT"
] | null | null | null | #! /user/bin python3
import sys
file_ = sys.argv[1]
data = open(file_, 'r').readlines()
for line in data:
if line.split(":")[1].strip() =='fullbody':
print(line.split(":")[0].strip() + ":" + "legs")
print(line.split(":")[0].strip() + ":" + "foot")
print(line.split(":")[0].strip() + ":" + "arm")
print(line.split(":")[0].strip() + ":" + "hand")
print(line.split(":")[0].strip() + ":" + "torso")
print(line.split(":")[0].strip() + ":" + "head")
else:
print(line.strip())
| 31.529412 | 57 | 0.483209 |
79447270bc6683ef0e148e3e264c2f583e11b9df | 6,103 | py | Python | openmdao_extensions/salib_doe_driver.py | OneraHub/openmdao_extensions | 4d67315db71a572c1807c2e3fb24e176c3dab845 | [
"Apache-2.0"
] | 7 | 2019-03-04T19:34:21.000Z | 2021-11-18T13:32:51.000Z | openmdao_extensions/salib_doe_driver.py | OneraHub/openmdao_extensions | 4d67315db71a572c1807c2e3fb24e176c3dab845 | [
"Apache-2.0"
] | null | null | null | openmdao_extensions/salib_doe_driver.py | OneraHub/openmdao_extensions | 4d67315db71a572c1807c2e3fb24e176c3dab845 | [
"Apache-2.0"
] | 1 | 2019-10-18T13:06:25.000Z | 2019-10-18T13:06:25.000Z | """
Driver for running model on design of experiments cases using Salib sampling methods
"""
import numpy as np
from openmdao.api import DOEDriver, OptionsDictionary
from openmdao.drivers.doe_generators import DOEGenerator
SALIB_NOT_INSTALLED = False
try:
from SALib.sample import morris as ms
from SALib.sample import saltelli
except ImportError:
SALIB_NOT_INSTALLED = True
class SalibDOEGenerator(DOEGenerator):
def __init__(self):
if SALIB_NOT_INSTALLED:
raise RuntimeError(
"SALib library is not installed. \
cf. https://salib.readthedocs.io/en/latest/getting-started.html"
)
self._cases = np.array([])
self._pb = None
self.called = False
def __call__(self, design_vars, model=None):
bounds = []
names = []
for name, meta in design_vars.items():
size = meta["size"]
meta_low = meta["lower"]
meta_high = meta["upper"]
for j in range(size):
if isinstance(meta_low, np.ndarray):
p_low = meta_low[j]
else:
p_low = meta_low
if isinstance(meta_high, np.ndarray):
p_high = meta_high[j]
else:
p_high = meta_high
display_name = name.split(".")[-1]
if size > 1:
display_name += str(j)
names.append(display_name)
bounds.append((p_low, p_high))
self._pb = {
"num_vars": len(names),
"names": names,
"bounds": bounds,
"groups": None,
}
self._compute_cases()
self.called = True
sample = []
for i in range(self._cases.shape[0]):
j = 0
for name, meta in design_vars.items():
size = meta["size"]
sample.append((name, self._cases[i, j : j + size]))
j += size
yield sample
def _compute_cases(self):
raise RuntimeError("Have to be implemented in subclass.")
def get_cases(self):
if not self.called:
raise RuntimeError("Have to run the driver before getting cases")
return self._cases
def get_salib_problem(self):
if not self.called:
raise RuntimeError(
"Have to run the driver before getting the SALib problem"
)
return self._pb
class SalibMorrisDOEGenerator(SalibDOEGenerator):
def __init__(self, n_trajs=2, n_levels=4):
super(SalibMorrisDOEGenerator, self).__init__()
# number of trajectories to apply morris method
self.n_trajs = n_trajs
# number of grid levels
self.n_levels = n_levels
def _compute_cases(self):
self._cases = ms.sample(self._pb, self.n_trajs, self.n_levels)
class SalibSobolDOEGenerator(SalibDOEGenerator):
def __init__(self, n_samples=1000, calc_second_order=True):
super(SalibSobolDOEGenerator, self).__init__()
# number of samples to generate
self.n_samples = n_samples
# whether calculing second order indices
self.calc_second_order = calc_second_order
def _compute_cases(self):
self._cases = saltelli.sample(self._pb, self.n_samples, self.calc_second_order)
class SalibDOEDriver(DOEDriver):
"""
Baseclass for SALib design-of-experiments Drivers
"""
def __init__(self, **kwargs):
super(SalibDOEDriver, self).__init__()
if SALIB_NOT_INSTALLED:
raise RuntimeError(
"SALib library is not installed. \
cf. https://salib.readthedocs.io/en/latest/getting-started.html"
)
self.options.declare(
"sa_method_name",
default="Morris",
values=["Morris", "Sobol"],
desc="either Morris or Sobol",
)
self.options.declare(
"sa_doe_options",
types=dict,
default={},
desc="options for given SMT sensitivity analysis method",
)
self.options.update(kwargs)
self.sa_settings = OptionsDictionary()
if self.options["sa_method_name"] == "Morris":
self.sa_settings.declare(
"n_trajs",
types=int,
default=2,
desc="number of trajectories to apply morris method",
)
self.sa_settings.declare(
"n_levels", types=int, default=4, desc="number of grid levels"
)
self.sa_settings.update(self.options["sa_doe_options"])
n_trajs = self.sa_settings["n_trajs"]
n_levels = self.sa_settings["n_levels"]
self.options["generator"] = SalibMorrisDOEGenerator(n_trajs, n_levels)
elif self.options["sa_method_name"] == "Sobol":
self.sa_settings.declare(
"n_samples",
types=int,
default=500,
desc="number of samples to generate",
)
self.sa_settings.declare(
"calc_second_order",
types=bool,
default=True,
desc="calculate second-order sensitivities ",
)
self.sa_settings.update(self.options["sa_doe_options"])
n_samples = self.sa_settings["n_samples"]
calc_snd = self.sa_settings["calc_second_order"]
self.options["generator"] = SalibSobolDOEGenerator(n_samples, calc_snd)
else:
raise RuntimeError(
"Bad sensitivity analysis method name '{}'".format(
self.options["sa_method_name"]
)
)
def _set_name(self):
self._name = "SALib_DOE_" + self.options["sa_method_name"]
def get_cases(self):
return self.options["generator"].get_cases()
def get_salib_problem(self):
return self.options["generator"].get_salib_problem()
| 33.168478 | 96 | 0.566607 |
794472b4bcf6e123d790dd82c85ad04c11539684 | 391 | py | Python | ex69.py | FernandaMakiHirose/programas-jupyter | 40ebfc820fefceb14293715104641ef184acfff4 | [
"MIT"
] | null | null | null | ex69.py | FernandaMakiHirose/programas-jupyter | 40ebfc820fefceb14293715104641ef184acfff4 | [
"MIT"
] | null | null | null | ex69.py | FernandaMakiHirose/programas-jupyter | 40ebfc820fefceb14293715104641ef184acfff4 | [
"MIT"
] | 1 | 2021-06-09T22:33:11.000Z | 2021-06-09T22:33:11.000Z | # Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores ‘M’ ou ‘F’. Caso esteja errado, peça a digitação novamente até ter um valor correto.
sexo = str(input('Informe seu sexo: [M/F] ')).strip().upper()[0]
while sexo not in 'MmFf':
sexo = str(input('Dados inválidos. Informe seu sexo novamente: [M/F] ' )).strip().upper()
print(f'Sexo {sexo} registrado com sucesso')
| 55.857143 | 159 | 0.695652 |
79447515fe1becbcb2771438aab29e2357a75f43 | 1,516 | py | Python | piston/web.py | sebwebdesigner/piston-local-node | 85d6f3a74b0f2b822e15099d6626a74764840018 | [
"MIT"
] | null | null | null | piston/web.py | sebwebdesigner/piston-local-node | 85d6f3a74b0f2b822e15099d6626a74764840018 | [
"MIT"
] | null | null | null | piston/web.py | sebwebdesigner/piston-local-node | 85d6f3a74b0f2b822e15099d6626a74764840018 | [
"MIT"
] | null | null | null | import re
from .utils import strfdelta, strfage
from .storage import configStorage as configStore
from .web_app import app, socketio
from .web_steem import WebSteem
from . import web_views, web_assets, web_socketio
import logging
log = logging.getLogger(__name__)
steem = WebSteem().getSteem()
@app.template_filter('age')
def _jinja2_filter_age(date, fmt=None):
""" Format a datatime as age
"""
return strfage(date, fmt)
@app.template_filter('excert')
def _jinja2_filter_datetime(data):
""" Extract an excert of a post
"""
words = data.split(" ")
return " ".join(words[:100])
@app.template_filter('parseBody')
def _jinja2_filter_parseBody(body):
""" Pre-process the body of a post before
showing in the UI
"""
body = re.sub(
r"^(https?:.*/(.*\.(jpg|png|gif))\??.*)",
r"\n\n",
body, flags=re.MULTILINE)
return body
@app.template_filter('currency')
def _jinja2_filter_currency(value):
""" Format the crypto tokens properly
:param float value: The amount to format as string
"""
return "{:,.3f}".format(value)
def run(port, host):
""" Run the Webserver/SocketIO and app
"""
socketio.run(app,
debug=configStore.get("web:debug"),
host=host,
port=port)
# FIXME: Don't use .run()
# from gevent.wsgi import WSGIServer
# from yourapplication import app
# http_server = WSGIServer(('', 5000), app)
# http_server.serve_forever()
| 24.852459 | 58 | 0.639842 |
7944756609635070c072cea0bd71ef2c74a1cb52 | 3,717 | py | Python | app.py | UsernameForGerman/worker_cms_flask | 9503a46ca7253411b72a371f28f5f14cb0653e75 | [
"Apache-2.0"
] | null | null | null | app.py | UsernameForGerman/worker_cms_flask | 9503a46ca7253411b72a371f28f5f14cb0653e75 | [
"Apache-2.0"
] | null | null | null | app.py | UsernameForGerman/worker_cms_flask | 9503a46ca7253411b72a371f28f5f14cb0653e75 | [
"Apache-2.0"
] | null | null | null | import sqlite3
from flask import Flask
from flask_bootstrap import Bootstrap
from werkzeug import exceptions
app = Flask(__name__)
app.url_map.strict_slashes = False
Bootstrap(app)
DBNAME = 'main.db'
def prepare_db():
connection = sqlite3.connect(DBNAME)
cursor = connection.cursor()
statements = [
"""
CREATE TABLE IF NOT EXISTS author(
id INTEGER UNIQUE NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
birth_date TEXT NOT NULL
);
""",
"""
CREATE TABLE IF NOT EXISTS death(
id INTEGER UNIQUE NOT NULL PRIMARY KEY,
death TEXT NOT NULL,
FOREIGN KEY (id) REFERENCES author(id) ON DELETE CASCADE
);
""",
"""
CREATE TABLE IF NOT EXISTS country(
id INTEGER UNIQUE NOT NULL PRIMARY KEY,
name TEXT NOT NULL
);
""",
"""
CREATE TABLE IF NOT EXISTS type(
id INTEGER UNIQUE NOT NULL PRIMARY KEY,
name TEXT NOT NULL
);
""",
"""
CREATE TABLE IF NOT EXISTS work(
id INTEGER UNIQUE NOT NULL PRIMARY KEY,
release_date TEXT NOT NULL,
title TEXT NOT NULL,
type_id INTEGER NOT NULL,
FOREIGN KEY (type_id) REFERENCES type(id) ON DELETE CASCADE
);
""",
"""
CREATE TABLE IF NOT EXISTS country_to_author(
country_id INTEGER NOT NULL,
author_id INTEGER NOT NULL,
FOREIGN KEY (country_id) REFERENCES country(id) ON DELETE CASCADE,
FOREIGN KEY (author_id) REFERENCES author(id) ON DELETE CASCADE
);
""",
"""
CREATE TABLE IF NOT EXISTS country_to_work(
country_id INTEGER NOT NULL,
work_id INTEGER NOT NULL,
FOREIGN KEY (country_id) REFERENCES country(id) ON DELETE CASCADE,
FOREIGN KEY (work_id) REFERENCES work(id) ON DELETE CASCADE
);
""",
"""
CREATE TABLE IF NOT EXISTS author_to_work(
author_id INTEGER NOT NULL,
work_id INTEGER NOT NULL,
FOREIGN KEY (author_id) REFERENCES author(id) ON DELETE CASCADE,
FOREIGN KEY (work_id) REFERENCES work(id) ON DELETE CASCADE
);
""",
"""
CREATE INDEX IF NOT EXISTS country_to_author_country_id_idx
ON country_to_author(country_id);
""",
"""
CREATE INDEX IF NOT EXISTS country_to_author_author_id_idx
ON country_to_author(author_id);
""",
"""
CREATE INDEX IF NOT EXISTS country_to_work_country_id_idx
ON country_to_work(country_id);
""",
"""
CREATE INDEX IF NOT EXISTS country_to_work_work_id_idx
ON country_to_work(work_id);
""",
"""
CREATE INDEX IF NOT EXISTS author_to_work_author_id_idx
ON author_to_work(author_id);
""",
"""
CREATE INDEX IF NOT EXISTS author_to_work_work_id_idx
ON author_to_work(work_id);
""",
"""
CREATE INDEX IF NOT EXISTS work_type_id_idx
ON work(type_id);
"""
]
for statement in statements:
cursor.execute(statement)
if __name__ == "__main__":
# Need to make sure Flask knows about its views before we run
# the app, so we import them. We could do it earlier, but there's
# a risk that we may run into circular dependencies, so I do it at the
# last minute here.
from views import *
prepare_db()
app.app.run(debug=True, host='0.0.0.0', port=5030) # 0.0.0.0 означает «все адреса IPv4 на локальном компьютере».
| 30.719008 | 117 | 0.583804 |
7944759f1401425a4c13765a4eeed6a428f7ab7f | 2,074 | py | Python | models/tfidf_knn.py | jianguoz/DNNC-few-shot-intent | ea6f39a225d62f87c05f3628206b06dcaeb56f01 | [
"MIT"
] | 30 | 2020-11-13T22:21:51.000Z | 2022-03-02T03:46:46.000Z | models/tfidf_knn.py | jianguoz/DNNC-few-shot-intent | ea6f39a225d62f87c05f3628206b06dcaeb56f01 | [
"MIT"
] | 5 | 2021-04-14T04:48:59.000Z | 2021-08-04T11:22:43.000Z | models/tfidf_knn.py | jianguoz/DNNC-few-shot-intent | ea6f39a225d62f87c05f3628206b06dcaeb56f01 | [
"MIT"
] | 7 | 2020-11-17T08:56:40.000Z | 2022-03-30T06:22:48.000Z | # Copyright 2020, Salesforce.com, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import torch
class TfidfKnn:
def __init__(self, example_sentences):
self.cache(example_sentences)
def cache(self, example_sentences):
if example_sentences is None:
return
self.tfidf = TfidfVectorizer(strip_accents="unicode",
stop_words="english")
self.cached_features = self.tfidf.fit_transform(example_sentences)
def predict(self, text):
distances = None
for t in text:
text_features = self.tfidf.transform([t])
dists = cosine_similarity(text_features, self.cached_features, "cosine").ravel()
dists = torch.FloatTensor(dists).unsqueeze(0)
if distances is None:
distances = dists
else:
distances = torch.cat((distances, dists), dim = 0)
return distances
| 51.85 | 462 | 0.712633 |
7944775738543f79c2b838dea606a85448c88251 | 4,149 | py | Python | t2t_bert/task_module/global_batch_norm.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 34 | 2018-12-19T01:00:57.000Z | 2021-03-26T09:36:37.000Z | t2t_bert/task_module/global_batch_norm.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 11 | 2018-12-25T03:37:59.000Z | 2021-08-25T14:43:58.000Z | t2t_bert/task_module/global_batch_norm.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 9 | 2018-12-27T08:00:44.000Z | 2020-06-08T03:05:14.000Z | # import tensorflow.compat.v1 as tf
import tensorflow as tf
from tensorflow.python.tpu import tpu_function # pylint: disable=g-direct-tensorflow-import
BATCH_NORM_EPSILON = 1e-5
class BatchNormalization(tf.layers.BatchNormalization):
"""Batch Normalization layer that supports cross replica computation on TPU.
This class extends the keras.BatchNormalization implementation by supporting
cross replica means and variances. The base class implementation only computes
moments based on mini-batch per replica (TPU core).
For detailed information of arguments and implementation, refer to:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization
"""
def __init__(self, fused=False, **kwargs):
"""Builds the batch normalization layer.
Arguments:
fused: If `False`, use the system recommended implementation. Only support
`False` in the current implementation.
**kwargs: input augments that are forwarded to
tf.layers.BatchNormalization.
"""
if fused in (True, None):
raise ValueError('The TPU version of BatchNormalization does not support '
'fused=True.')
super(BatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
return tf.tpu.cross_replica_sum(t) / tf.cast(num_shards, t.dtype)
def _moments(self, inputs, reduction_axes, keep_dims):
"""Compute the mean and variance: it overrides the original _moments."""
shard_mean, shard_variance = super(BatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims)
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards and num_shards > 1:
# Each group has multiple replicas: here we compute group mean/variance by
# aggregating per-replica mean/variance.
group_mean = self._cross_replica_average(shard_mean)
group_variance = self._cross_replica_average(shard_variance)
# Group variance needs to also include the difference between shard_mean
# and group_mean.
mean_distance = tf.square(group_mean - shard_mean)
group_variance += self._cross_replica_average(mean_distance)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
def batch_norm_relu(FLAGS, inputs, is_training, relu=True, init_zero=False,
center=True, scale=True, data_format='channels_last'):
"""Performs a batch normalization followed by a ReLU.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training: `bool` for whether the model is training.
relu: `bool` if False, omits the ReLU operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
center: `bool` whether to add learnable bias factor.
scale: `bool` whether to add learnable scaling factor.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = -1
if FLAGS.global_bn:
bn_foo = BatchNormalization(
axis=axis,
momentum=FLAGS.batch_norm_decay,
epsilon=BATCH_NORM_EPSILON,
center=center,
scale=scale,
fused=False,
gamma_initializer=gamma_initializer)
inputs = bn_foo(inputs, training=is_training)
else:
inputs = tf.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=FLAGS.batch_norm_decay,
epsilon=BATCH_NORM_EPSILON,
center=center,
scale=scale,
training=is_training,
fused=True,
gamma_initializer=gamma_initializer)
if relu:
inputs = tf.nn.relu(inputs)
return inputs | 39.141509 | 92 | 0.712461 |
7944777745d4fd49c685fa4730c9b18e10829960 | 1,329 | py | Python | health_predict/setup.py | rockey2016/MachineLearning | 9b0a57ea66f4ec7a233b81f54a456604235b3679 | [
"Apache-2.0"
] | null | null | null | health_predict/setup.py | rockey2016/MachineLearning | 9b0a57ea66f4ec7a233b81f54a456604235b3679 | [
"Apache-2.0"
] | null | null | null | health_predict/setup.py | rockey2016/MachineLearning | 9b0a57ea66f4ec7a233b81f54a456604235b3679 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import io
from setuptools import setup
#with io.open('ReadMe.md','rt',encoding='utf8') as f:
# readme = f.read()
setup(
name='healthpredict', #pypi中的名称,pip或者easy_install安装时使用的名称
version='0.1.2',
#url='https://www.palletsprojects.com/p/flask/',
#project_urls=OrderedDict((
#('Documentation', 'http://flask.pocoo.org/docs/'),
#('Code', 'https://github.com/pallets/flask'),)),
#license='BSD',
author='sxx',
author_email='[email protected]',
maintainer='sxx',
maintainer_email='[email protected]',
description='A simple tool for predicting health score.',
# long_description=readme,
packages=['health_predict'],#代码import package名称
package_data = {
'health_predict':['data/*.csv','data/*.hdf5','models/*.h5']},
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[ # 需要安装的依赖
'numpy>=1.9.1',
'pandas>=0.22.0',
'tensorflow>=1.6.0',
'keras>=2.1.5',
],
classifiers=[ # 程序的所属分类列表
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'],
entry_points={
'console_scripts': [
'test = health.test:main',
],
},
) | 30.906977 | 73 | 0.595937 |
794477acc88cb0a82a868a4df8bcdd46f500f85b | 3,110 | py | Python | test/unit/test_schema_validation.py | KTH/aspen | 3be9b55d21dfd950d1a82b2cf4f464cd1f1e9757 | [
"MIT"
] | null | null | null | test/unit/test_schema_validation.py | KTH/aspen | 3be9b55d21dfd950d1a82b2cf4f464cd1f1e9757 | [
"MIT"
] | 8 | 2019-10-10T08:03:02.000Z | 2022-01-11T11:28:58.000Z | test/unit/test_schema_validation.py | KTH/aspen | 3be9b55d21dfd950d1a82b2cf4f464cd1f1e9757 | [
"MIT"
] | null | null | null | __author__ = '[email protected]'
import unittest
import traceback
import mock
import requests
from test import mock_test_data
from modules.steps.report_success import ReportSuccess
from modules.util import data_defs, reporter_service, environment
class TestSchemaValidation(unittest.TestCase):
@unittest.skipIf(environment.get_env(environment.SKIP_VALIDATION_TESTS),
'SKIP_VALIDATION_TESTS set')
def test_validate_deployment(self):
validation_url = environment.get_with_default_string(
environment.VALIDATE_DEPLOYMENT_URL,
'https://app.kth.se/jsonschema/dizin/deployment'
)
step = ReportSuccess()
pipeline_data = mock_test_data.get_pipeline_data()
deployment_json = step.create_deployment_json(pipeline_data)
result = requests.post(validation_url, json=deployment_json, allow_redirects=False)
self.assertEqual(result.json(), {})
self.assertEqual(result.status_code, 200)
@unittest.skipIf(environment.get_env(environment.SKIP_VALIDATION_TESTS),
'SKIP_VALIDATION_TESTS set')
def test_validate_recommendation(self):
validation_url = environment.get_with_default_string(
environment.VALIDATE_RECOMMENDATION_URL,
'https://app.kth.se/jsonschema/dizin/recommendation'
)
rec_obj = reporter_service.create_recommedation_object(
'kth-azure-app',
'The recommendation',
'#channel1,#channel2'
)
result = requests.post(validation_url, json=rec_obj, allow_redirects=False)
self.assertEqual(result.json(), {})
self.assertEqual(result.status_code, 200)
@unittest.skipIf(environment.get_env(environment.SKIP_VALIDATION_TESTS),
'SKIP_VALIDATION_TESTS set')
def test_validate_error(self):
validation_url = environment.get_with_default_string(
environment.VALIDATE_ERROR_URL,
'https://app.kth.se/jsonschema/dizin/error'
)
pipeline_data = {
data_defs.STACK_FILE_PARSED_CONTENT:
mock_test_data.get_parsed_stack_content()
}
combined_labels = reporter_service.get_combined_service_labels(pipeline_data)
# Expected error (with slack channels)
error = mock_test_data.get_mock_deployment_error()
error_object = reporter_service.create_error_object(error, combined_labels, False)
result = requests.post(validation_url, json=error_object, allow_redirects=False)
self.assertEqual(result.json(), {})
self.assertEqual(result.status_code, 200)
# Unexpected error (with stack trace)
traceback.format_exc = mock.Mock(return_value='Stack\ntrace')
error = mock_test_data.get_mock_deployment_error(expected=False)
error_object = reporter_service.create_error_object(error, combined_labels, False)
result = requests.post(validation_url, json=error_object, allow_redirects=False)
self.assertEqual(result.json(), {})
self.assertEqual(result.status_code, 200)
| 45.735294 | 91 | 0.705145 |
7944785da4256f8a03a6df515e04745be5fc43a0 | 3,162 | py | Python | src/testbench/gmmset.py | juliia5m/knu_voice | 1f5d150ded23af4c152b8d20f1ab4ecec77b40e1 | [
"Apache-2.0"
] | 717 | 2015-01-03T15:25:46.000Z | 2022-03-30T12:45:45.000Z | src/testbench/gmmset.py | juliia5m/knu_voice | 1f5d150ded23af4c152b8d20f1ab4ecec77b40e1 | [
"Apache-2.0"
] | 91 | 2015-03-19T09:25:23.000Z | 2021-05-19T08:51:26.000Z | src/testbench/gmmset.py | juliia5m/knu_voice | 1f5d150ded23af4c152b8d20f1ab4ecec77b40e1 | [
"Apache-2.0"
] | 315 | 2015-01-21T00:06:00.000Z | 2022-03-29T08:13:36.000Z | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# $File: gmmset.py
# $Date: Sun Feb 22 20:17:14 2015 +0800
# $Author: Xinyu Zhou <zxytim[at]gmail[dot]com>
import operator
import multiprocessing
from collections import defaultdict
import numpy as np
from gmm.python.pygmm import GMM
class GMMSet(object):
def __init__(self, gmm_order=32, ubm=None,
reject_threshold=10,
**kwargs):
self.kwargs = kwargs
self.gmms = []
self.ubm = ubm
self.reject_threshold = reject_threshold
if ubm is not None:
self.gmm_order = ubm.get_nr_mixtures()
else:
self.gmm_order = gmm_order
self.y = []
def fit_new(self, x, label):
self.y.append(label)
gmm = GMM(self.gmm_order, **self.kwargs)
gmm.fit(x, self.ubm)
self.gmms.append(gmm)
def cluster_by_label(self, X, y):
Xtmp = defaultdict(list)
for ind, x in enumerate(X):
label = y[ind]
Xtmp[label].extend(x)
yp, Xp = zip(*Xtmp.iteritems())
return Xp, yp
def auto_tune_parameter(self, X, y):
if self.ubm is None:
return
# TODO
def fit(self, X, y):
X, y = self.cluster_by_label(X, y)
for ind, x in enumerate(X):
self.fit_new(x, y[ind])
self.auto_tune_parameter(X, y)
def gmm_score(self, gmm, x):
return np.sum(gmm.score(x))
def predict_one_scores(self, x):
return [self.gmm_score(gmm, x) for gmm in self.gmms]
def predict_one(self, x):
scores = self.predict_one_scores(x)
return self.y[max(enumerate(scores), key=operator.itemgetter(1))[0]]
def predict(self, X):
return map(self.predict_one, X)
def predict_one_with_rejection(self, x):
assert self.ubm is not None, \
"UBM must be given prior to conduct reject prediction."
scores = self.predict_one_scores(x)
x_len = len(x) # normalize score
scores = map(lambda v: v / x_len, scores)
max_tup = max(enumerate(scores), key=operator.itemgetter(1))
ubm_score = self.gmm_score(self.ubm, x) / x_len
#print scores, ubm_score
if max_tup[1] - ubm_score < self.reject_threshold:
#print max_tup[1], ubm_score, max_tup[1] - ubm_score
return None
return self.y[max_tup[0]]
def predict_with_reject(self, X):
return map(self.predict_one_with_rejection, X)
def load_gmm(self, label, fname):
self.y.append(label)
gmm = GMM.load(fname)
for key, val in self.kwargs.iteritems():
exec("gmm.{0} = val".format(key))
self.gmms.append(gmm)
class GMMSetPyGMM(GMMSet):
def predict_one(self, x):
scores = [gmm.score_all(x) / len(x) for gmm in self.gmms]
p = sorted(scores)
#print scores, p[-1] - p[-2]
return self.y[max(enumerate(scores), key=operator.itemgetter(1))[0]]
def before_pickle(self):
self.gmms = [x.dumps() for x in self.gmms]
def after_pickle(self):
self.gmms = [GMM.loads(x) for x in self.gmms]
# vim: foldmethod=marker
| 28.745455 | 76 | 0.59709 |
794479052d43892ec769936798b1aebf02addd33 | 1,339 | py | Python | examples/IRT/EM/IRT.py | zelo2/EduCDM | d725dc50ec677dfe409d88a3ffea6dce8effad62 | [
"Apache-2.0"
] | 36 | 2021-04-28T03:22:03.000Z | 2022-03-30T16:54:44.000Z | examples/IRT/EM/IRT.py | zelo2/EduCDM | d725dc50ec677dfe409d88a3ffea6dce8effad62 | [
"Apache-2.0"
] | 21 | 2021-03-18T14:10:11.000Z | 2022-01-29T14:12:45.000Z | examples/IRT/EM/IRT.py | zelo2/EduCDM | d725dc50ec677dfe409d88a3ffea6dce8effad62 | [
"Apache-2.0"
] | 36 | 2021-03-17T14:43:18.000Z | 2022-03-29T07:52:26.000Z | # coding: utf-8
# 2021/5/2 @ liujiayu
import logging
import numpy as np
import pandas as pd
from EduCDM import EMIRT
train_data = pd.read_csv("../../../data/a0910/train.csv")
valid_data = pd.read_csv("../../../data/a0910/valid.csv")
test_data = pd.read_csv("../../../data/a0910/test.csv")
stu_num = max(max(train_data['user_id']), max(test_data['user_id']))
prob_num = max(max(train_data['item_id']), max(test_data['item_id']))
R = -1 * np.ones(shape=(stu_num, prob_num))
R[train_data['user_id']-1, train_data['item_id']-1] = train_data['score']
test_set = []
for i in range(len(test_data)):
row = test_data.iloc[i]
test_set.append({'user_id':int(row['user_id'])-1, 'item_id':int(row['item_id'])-1, 'score':row['score']})
logging.getLogger().setLevel(logging.INFO)
cdm = EMIRT(R, stu_num, prob_num, dim=1, skip_value=-1) # IRT, dim > 1 is MIRT
cdm.train(lr=1e-3, epoch=2)
cdm.save("irt.params")
cdm.load("irt.params")
rmse, mae = cdm.eval(test_set)
print("RMSE, MAE are %.6f, %.6f" % (rmse, mae))
# ---incremental training
new_data = [{'user_id': 0, 'item_id': 2, 'score': 0.0}, {'user_id': 1, 'item_id': 1, 'score': 1.0}]
cdm.inc_train(new_data, lr=1e-3, epoch=2)
# ---evaluate user's state
stu_rec = np.random.randint(-1, 2, size=prob_num)
dia_state = cdm.transform(stu_rec)
print("user's state is " + str(dia_state))
| 31.880952 | 109 | 0.668409 |
7944790fe856ead5a0030ba84b8e20f6e719599a | 1,150 | py | Python | tests/core/admin.py | narunbabu/django-import-export | a8506db680bce21feb8f143592e53cea1c46e337 | [
"BSD-2-Clause"
] | null | null | null | tests/core/admin.py | narunbabu/django-import-export | a8506db680bce21feb8f143592e53cea1c46e337 | [
"BSD-2-Clause"
] | null | null | null | tests/core/admin.py | narunbabu/django-import-export | a8506db680bce21feb8f143592e53cea1c46e337 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.contrib import admin
from import_export.resources import ModelResource
from import_export.admin import ImportExportMixin, ImportMixin, ExportActionModelAdmin
from .models import Book, Category, Author, Child, Census, CensusRest
class CensusAdmin(ImportExportMixin, admin.ModelAdmin):
pass
class CensusRestAdmin(ImportExportMixin, admin.ModelAdmin):
pass
class ChildAdmin(ImportMixin, admin.ModelAdmin):
pass
class BookResource(ModelResource):
class Meta:
model = Book
def for_delete(self, row, instance):
return self.fields['name'].clean(row) == ''
class BookAdmin(ImportExportMixin, admin.ModelAdmin):
list_filter = ['categories', 'author']
resource_class = BookResource
class CategoryAdmin(ExportActionModelAdmin):
pass
class AuthorAdmin(ImportMixin, admin.ModelAdmin):
pass
admin.site.register(Book, BookAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Child, ChildAdmin)
admin.site.register(Census, CensusAdmin)
admin.site.register(CensusRest, CensusRestAdmin) | 25 | 86 | 0.78087 |
79447a3d6ad6f59b89b837c63e9b291ce6d64f13 | 2,096 | py | Python | tests/unit/bigtop_harness.py | buggtb/layer-apache-bigtop-base | 65cbc24f1803423da2128d43907d02f094550c64 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-09-19T21:01:46.000Z | 2018-09-19T21:01:46.000Z | tests/unit/bigtop_harness.py | buggtb/layer-apache-bigtop-base | 65cbc24f1803423da2128d43907d02f094550c64 | [
"ECL-2.0",
"Apache-2.0"
] | 51 | 2016-04-15T21:33:22.000Z | 2018-08-07T22:04:11.000Z | tests/unit/bigtop_harness.py | buggtb/layer-apache-bigtop-base | 65cbc24f1803423da2128d43907d02f094550c64 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2016-04-14T23:23:07.000Z | 2018-10-02T21:50:12.000Z | #
# bigtop_harness.py -- Harness for out unit tests
#
import logging
import mock
import os
import unittest
from path import Path
LOGGER = logging.getLogger()
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.DEBUG)
class Harness(unittest.TestCase):
'''
Harness for our unit tests. Automatically patches out options, and
sets up hooks to make patching out hookenv.status_get easier.
'''
#
# Status related stuff
#
@property
def statuses(self):
if not hasattr(self, '_statuses'):
self._statuses = []
return self._statuses
@property
def last_status(self):
'''
Helper for mocked out status list.
Returns the last status set, or a (None, None) tuple if no
status has been set.
'''
if not self.statuses:
return (None, None)
return self.statuses[-1]
def status_set(self, status, message):
'''Set our mock status.'''
self.statuses.append((status, message))
#
# Misc Helpers
#
def log(self, msg):
'''
Print a given message to STDOUT.
'''
self._log.debug(msg)
#
# init, setUp, tearDown
#
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self._log = LOGGER
def setUp(self):
'''
Give each test access to a clean set of mocks.
'''
self._patchers = []
# Patch out options
self._patchers.append(mock.patch(
'apache_bigtop_base.layer.options', create=True))
self._patchers.append(mock.patch(
'charms.layer.apache_bigtop_base.layer.options', create=True))
for patcher in self._patchers:
patcher.start()
def tearDown(self):
'''Clean up our mocks.'''
for patcher in self._patchers:
patcher.stop()
# Clean up state db
cwd = os.getcwd()
state_db = Path(cwd) / '.unit-state.db'
if state_db.exists():
state_db.remove()
| 21.387755 | 74 | 0.583969 |
79447b07a68ac398e5951b0ecaea40c825b6dba5 | 15,623 | py | Python | syft/nn.py | morgangiraud/PySyft | 79e0505a6e9ec2d78287d8c19b27bbac28455780 | [
"Apache-2.0"
] | 1 | 2018-03-19T01:40:01.000Z | 2018-03-19T01:40:01.000Z | syft/nn.py | morgangiraud/PySyft | 79e0505a6e9ec2d78287d8c19b27bbac28455780 | [
"Apache-2.0"
] | null | null | null | syft/nn.py | morgangiraud/PySyft | 79e0505a6e9ec2d78287d8c19b27bbac28455780 | [
"Apache-2.0"
] | null | null | null | import syft.controller as controller
from syft.utils import Progress
from syft import FloatTensor
import sys, time
import numpy as np
import syft.metrics
class Model():
def __init__(self, id=None):
self.sc = controller
self.params = False
self.type = None
self._layer_type = None
self.id = id
self.type = "model"
self.output_shape = "(dynamic)"
def init(self,layer_type,params=[]):
self.type = "model"
self._layer_type = layer_type
self.sc = controller
self.id = -1
self.id = int(self.sc.send_json(self.cmd("create",[self._layer_type] + params)))
def discover(self):
self._layer_type = self.layer_type()
if(self._layer_type == 'linear'):
return Linear(id = self.id)
elif(self._layer_type == 'sigmoid'):
return Sigmoid(id = self.id)
elif(self._layer_type == 'crossentropyloss'):
return CrossEntropyLoss(id = self.id)
elif(self._layer_type == 'tanh'):
return Tanh(id = self.id)
elif(self._layer_type == 'dropout'):
return Dropout(id = self.id)
elif(self._layer_type == 'softmax'):
return Softmax(id = self.id)
elif(self._layer_type == 'logsoftmax'):
return LogSoftmax(id = self.id)
elif(self._layer_type == 'relu'):
return ReLU(id = self.id)
elif(self._layer_type == 'log'):
return Log(id = self.id)
elif(self._layer_type == 'policy'):
return Policy(id = self.id)
elif(self._layer_type == 'sequential'):
return Sequential(id = self.id)
else:
sys.stderr.write("Attempted to discover the type - but it wasn't supported. Has the layer type '"
+ self._layer_type + "' been added to the discover() method in nn.py?")
def __call__(self,*args):
if(len(args) == 1):
return self.forward(args[0])
elif(len(args) == 2):
return self.forward(args[0],args[1])
elif(len(args) == 3):
return self.forward(args[0],args[1], args[2])
def parameters(self):
return self.sc.no_params_func(self.cmd, "params",return_type='FloatTensor_list', delete_after_use=False)
def num_parameters(self):
return self.sc.no_params_func(self.cmd,"param_count",return_type='int')
def models(self):
return self.sc.no_params_func(self.cmd, "models",return_type='Model_list')
def set_id(self,new_id):
self.sc.params_func(self.cmd,"set_id",[new_id], return_type='string')
self.id = new_id
return self
def fit(self, input, target, criterion, optim, batch_size, iters=15, log_interval=200, metrics=[], verbose=True):
if(type(input) == list):
input = np.array(input).astype('float')
if(type(input) == np.array or type(input) == np.ndarray):
input = FloatTensor(input,autograd=True, delete_after_use=True)
if(type(target) == list):
target = np.array(target).astype('float')
if(type(target) == np.array or type(target) == np.ndarray):
target = FloatTensor(target,autograd=True, delete_after_use=True)
num_batches = self.sc.params_func(self.cmd,"prepare_to_fit",[input.id, target.id, criterion.id, optim.id, batch_size], return_type='int')
print("Number of Batches:" + str(num_batches))
if(verbose):
progress_bars = list()
progress_bars.append(Progress(0,iters-1))
start = time.time()
loss = 100000
for iter in range(iters):
if(verbose):
progress_bars.append(Progress(0,num_batches))
iter_start = time.time()
for log_i in range(0,num_batches,log_interval):
prev_loss = float(loss)
_loss = self.sc.params_func(self.cmd,"fit",[log_i, min(log_i+log_interval,num_batches),1], return_type='float')
if(_loss != '0'):
loss = _loss
if(loss == 'NaN' or prev_loss == 'NaN'):
if(verbose):
progress_bars[0].danger()
progress_bars[-1].danger()
break
elif(float(loss) > prev_loss):
if(verbose):
progress_bars[0].info()
progress_bars[-1].info()
else:
if(verbose):
progress_bars[0].normal()
progress_bars[-1].normal()
elapsed = time.time() - iter_start
pace = elapsed / (log_i+1)
remaining = int((num_batches - log_i - 1) * pace)
if(remaining > 60):
remaining = str(int(remaining/60)) + "m" + str(remaining%60) + "s"
else:
remaining = str(remaining) + "s"
if(verbose):
progress_bars[-1].update(log_i+1,[('',remaining),('loss',str(loss)),("batch",str(log_i)+"-"+str(min(log_i+log_interval,num_batches)))])
if(verbose):
progress_bars[-1].success()
progress_bars[-1].update(num_batches,[('',str(time.time() - iter_start)),('loss',str(loss)),("batch",str(log_i)+"-"+str(min(log_i+log_interval,num_batches)))])
elapsed = time.time() - start
pace = elapsed / (iter+1)
remaining = int((iters - iter - 1) * pace)
if(remaining > 60):
remaining = str(int(remaining/60)) + "m" + str(remaining%60) + "s"
else:
remaining = str(remaining) + "s"
if(verbose):
progress_bars[0].update(iter,[('',remaining),('loss',loss)])
if(loss == 'NaN'):
break
if(verbose):
progress_bars[0].success()
return loss
def summary(self, verbose=True, return_instead_of_print = False):
layer_type = self.layer_type() + "_" + str(self.id) + " (" + str(type(self)).split("'")[1].split(".")[-1] + ")"
if(type(self.output_shape) == int):
output_shape = str((None,self.output_shape))
else:
output_shape = str(self.output_shape)
n_param = str(self.num_parameters())
output = layer_type + " "*(29-len(layer_type)) + output_shape + " "*(26-len(output_shape)) + n_param + "\n"
if(verbose):
single = "_________________________________________________________________\n"
header = "Layer (type) Output Shape Param # \n"
double = "=================================================================\n"
total_params = "Total params: " + "{:,}".format(self.num_parameters()) + "\n"
trainable_params = "Trainable params: " + "{:,}".format(self.num_parameters()) + "\n"
non_trainable_params = "Non-trainable params: 0" + "\n"
output = single + header + double + output + double + total_params + trainable_params + non_trainable_params + single
if(return_instead_of_print):
return output
print(output)
def __len__(self):
return len(self.models())
def __getitem__(self,idx):
return self.parameters()[idx]
def activation(self):
return self.sc.no_params_func(self.cmd, "activation",return_type='FloatTensor', delete_after_use=False)
def layer_type(self):
return self.sc.no_params_func(self.cmd,"model_type",return_type='string')
def cmd(self,function_call, params = []):
cmd = {
'functionCall': function_call,
'objectType': self.type,
'objectIndex': self.id,
'tensorIndexParams': params}
return cmd
def forward(self, input):
return self.sc.params_func(self.cmd, "forward", [input.id], return_type='FloatTensor', delete_after_use=False)
def evaluate(self, test_input, test_target, criterion, batch_size=128, metrics=[], verbose=True):
if(type(test_input) == list):
test_input = np.array(test_input).astype('float')
if(type(test_input) == np.array or type(test_input) == np.ndarray):
test_input = FloatTensor(test_input, autograd=True, delete_after_use=True)
if(type(test_target) == list):
test_target = np.array(test_target).astype('float')
if(type(test_target) == np.array or type(test_target) == np.ndarray):
test_target = FloatTensor(test_target, autograd=True, delete_after_use=True)
test_size = test_target.shape()[0] - (test_target.shape()[0] % batch_size) # Discard test examples that don't fit in the batches
y_true = test_target.to_numpy()[:test_size]
loss, predictions = self.sc.params_func(self.cmd, "evaluate", [test_input.id, test_target.id, criterion.id, batch_size], return_type='FloatTensor_list')
loss = loss.to_numpy()[0]
if verbose:
print("Test loss = {}".format(loss))
y_pred = predictions.to_numpy()[:test_size]
metrics_dict = {}
for metric in metrics:
if (callable(metric)):
metrics_dict[metric.__name__] = metric
else:
metrics_dict[metric] = syft.metrics.get(metric)
metrics_result = {}
for metric_name, metric_fn in metrics_dict.items():
result = metric_fn(y_true, y_pred)
metrics_result[metric_name] = result
if verbose:
print("{} = {}".format(metric_name, result))
return loss, metrics_result
def to_json(self):
return self.sc.no_params_func(self.cmd, "config", return_type='string')
def __repr__(self,verbose=True):
if(verbose):
output = ""
output += self.__repr__(False) + "\n"
for p in self.parameters():
output += "\t W:" + p.__repr__(verbose=False)
activation = self.activation()
if(activation is not None):
output += "\t A:" + activation.__repr__(verbose=False) + "\n"
return output
else:
return "<syft.nn."+self._layer_type+" at " + str(self.id) + ">"
class Policy(Model):
#super(Policy, self).__init__()
def __init__(self, model, optimizer, state_type='discrete'):
self.init("policy",[model.id, optimizer.id])
self.model = model
self.state_type = state_type
self.optimizer = optimizer
def sample(self, input):
return self.sc.params_func(self.cmd,"sample",[input.id],return_type='IntTensor')
def parameters(self):
return self.model.parameters()
def __call__(self,*args):
if(self.state_type == 'discrete'):
if(len(args) == 1):
return self.sample(args[0])
elif(len(args) == 2):
return self.sample(args[0],args[1])
elif(len(args) == 3):
return self.sample(args[0],args[1], args[2])
elif(self.state_type == 'continuous'):
if(len(args) == 1):
return self.forward(args[0])
elif(len(args) == 2):
return self.forward(args[0],args[1])
elif(len(args) == 3):
return self.forward(args[0],args[1], args[2])
else:
print("Error: State type " + self.state_type + " unknown")
def history(self):
raw_history = self.sc.params_func(self.cmd,"get_history",[],return_type="string")
history_idx = list(map(lambda x:list(map(lambda y:int(y),x.split(","))),raw_history[2:-1].split("],[")))
losses = list()
rewards = list()
for loss,reward in history_idx:
if(loss != -1):
losses.append(self.sc.get_tensor(loss))
else:
losses.append(None)
if(reward != -1):
rewards.append(self.sc.get_tensor(reward))
else:
rewards.append(None)
return losses,rewards
class Sequential(Model):
def __init__(self, layers=None, id=None):
super(Sequential, self).__init__()
if (id is not None):
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "sequential"
else:
self.init("sequential")
if(layers is not None):
for layer in layers:
self.add(layer)
def add(self, model):
self.sc.params_func(self.cmd,"add",[model.id], delete_after_use=False)
def summary(self):
single = "_________________________________________________________________\n"
header = "Layer (type) Output Shape Param # \n"
double = "=================================================================\n"
total_params = "Total params: " + "{:,}".format(self.num_parameters()) + "\n"
trainable_params = "Trainable params: " + "{:,}".format(self.num_parameters()) + "\n"
non_trainable_params = "Non-trainable params: 0" + "\n"
output = single + header + double
mods = self.models()
for m in mods[:-1]:
output += m.summary(verbose=False, return_instead_of_print=True)
output += single
output += mods[-1].summary(verbose=False, return_instead_of_print=True)
output += double
output += total_params + trainable_params + non_trainable_params + single
print(output)
def __repr__(self):
output = ""
for m in self.models():
output += m.__repr__()
return output
def __getitem__(self,idx):
return self.models()[idx]
class Linear(Model):
def __init__(self, input_dim=0, output_dim=0, id=None, initializer="Xavier"):
super(Linear, self).__init__()
if(id is None):
self.init("linear",[input_dim, output_dim, initializer])
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "linear"
params = self.parameters()
self.output_shape = int(params[0].shape()[-1])
self.input_shape = int(params[0].shape()[0])
class ReLU(Model):
def __init__(self, id=None):
super(ReLU, self).__init__()
if(id is None):
self.init("relu")
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "relu"
class Dropout(Model):
def __init__(self, rate=0.5, id=None):
super(Dropout, self).__init__()
if(id is None):
self.init("dropout",params=[rate])
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "dropout"
class Sigmoid(Model):
def __init__(self, id=None):
super(Sigmoid, self).__init__()
if(id is None):
self.init("sigmoid")
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "sigmoid"
class Softmax(Model):
def __init__(self, dim=1, id=None):
super(Softmax, self).__init__()
if(id is None):
self.init("softmax",params=[dim])
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "softmax"
class LogSoftmax(Model):
def __init__(self, dim=1, id=None):
super(LogSoftmax, self).__init__()
if(id is None):
self.init("logsoftmax",params=[dim])
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "logsoftmax"
class Log(Model):
def __init__(self, id=None):
super(Log, self).__init__()
if(id is None):
self.init("log")
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "log"
class Tanh(Model):
def __init__(self, id=None):
super(Tanh, self).__init__()
if(id is None):
self.init("tanh")
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "tanh"
class MSELoss(Model):
def __init__(self, id=None):
super(MSELoss, self).__init__()
if (id is None):
self.init("mseloss")
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "mseloss"
def forward(self, input, target):
return self.sc.params_func(self.cmd, "forward", [input.id, target.id], return_type='FloatTensor', delete_after_use=False)
class CategoricalCrossEntropy(Model):
def __init__(self, id=None):
super(CategoricalCrossEntropy, self).__init__()
if (id is None):
self.init("categorical_crossentropy")
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "categorical_crossentropy"
def forward(self, input, target):
return self.sc.params_func(self.cmd, "forward", [input.id, target.id], return_type='FloatTensor', delete_after_use=False)
class NLLLoss(Model):
def __init__(self, id=None):
super(NLLLoss, self).__init__()
if (id is None):
self.init("nllloss")
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "nllloss"
def forward(self, input, target):
return self.sc.params_func(self.cmd, "forward", [input.id, target.id], return_type='FloatTensor', delete_after_use=False)
class CrossEntropyLoss(Model):
# TODO backward() to be implemented: grad = target - prediction
# TODO backward(): until IntegerTensor is available assume a one-hot vector is passed in.
def __init__(self, dim=1, id=None):
super(CrossEntropyLoss, self).__init__()
if(id is None):
self.init("crossentropyloss",params=[dim])
else:
self.id = id
self.sc = controller
self.type = "model"
self._layer_type = "crossentropyloss"
def forward(self, input, target):
return self.sc.params_func(self.cmd, "forward", [input.id, target.id], return_type='FloatTensor', delete_after_use=False) | 29.871893 | 163 | 0.664661 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.