max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
scivision_treecrown_plugin/__init__.py | ots22/scivision-treecrown-plugin | 0 | 12789351 | <gh_stars>0
from .model import DeepForest, DetectreeRGB
| 1.023438 | 1 |
Python/write_Points.py | joaonizer/sLLGS | 0 | 12789352 | <filename>Python/write_Points.py
#!/usr/bin/env python3
import numpy as np
def write_Points(w , l , dx , d_or , cortes_y , i):
# Cria os vetores 'px', 'py' e 'd_or' a partir das entradas:
# w - largura da partícula_i
# l - comprimento da partícula_i
# d - distância entre a partícula i e i-1
# d_or - vetor com centros de partículas
# i - índice da partícula atual.
##
px = np.zeros(4)
py = np.zeros(4)
# Descreve os pontos em X
x1 = -w[i]/2
x2 = w[i]/2
x3 = x2
x4 = x1
# Descreve os pontos em Y, sem cortes.
y1 = l[i]/2
y2 = l[i]/2
y3 = -l[i]/2
y4 = -l[i]/2
# Concatena os valores em 'px' e 'py'
px = [x1 , x2 , x3 , x4]
py = [y1 , y2 , y3 , y4] - cortes_y
if i==1:
d_or[i,:]=[0 , 0 , 0] # primeira partícula na origem
else:
d_or[i,:]=[w[i-1]/2 + w[i]/2 + d_or [i-1,0] + dx[i] , 0 , 0] #
# soma w/2 da particula anterior
# com w/2 da particula atual
# a distancia em x da origem acumulada
# mais a distancia entre as duas partículas 'd'
# OBS: Atualmente somente em X
return px , py , d_or
| 3.078125 | 3 |
whack_a_bug_api/__init__.py | timothyakinyelu/whack-a-bug-api | 0 | 12789353 | <gh_stars>0
from flask import Flask
from whack_a_bug_api.helpers.load_config import loadConfig
from flask_login import LoginManager
login_manager = LoginManager()
def createApp():
app = Flask(__name__, instance_relative_config=True)
mode = app.env
Config = loadConfig(mode)
app.config.from_object(Config)
from whack_a_bug_api.db import db
db.init_app(app)
login_manager.init_app(app)
from whack_a_bug_api.models import bugs, projects
with app.app_context():
#add route blueprints
from whack_a_bug_api.routes.auth_routes import auth_routes
app.register_blueprint(auth_routes.auth)
db.create_all()
return app | 1.945313 | 2 |
setup.py | anonmyous-author/anonymous-code | 0 | 12789354 | from setuptools import find_packages, setup
with open("README.md") as readme_file:
readme = readme_file.read()
with open("requirements.txt") as reqs_file:
requirements = reqs_file.read().split("\n")
setup(
name="braincode",
version="0.1.0",
description="an investigation of computer program representations.",
long_description=readme,
author="anonymous1 anonymous1",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(where="braincode"),
install_requires=requirements,
python_requires=">=3.7",
)
| 1.578125 | 2 |
doc/LectureNotes/_build/jupyter_execute/chapter7.py | Shield94/Physics321 | 0 | 12789355 | #!/usr/bin/env python
# coding: utf-8
# # Non-inertial Frames, Translation and Rotating Coordinate Systems
#
#
# Let us quickly remind ourselves about the definition of a so-called **inertial frame of reference**.
#
# An inertial frame of reference in classical physics (and in special
# relativity as well) possesses the property that in this frame of reference a
# body with zero net force acting upon it does not accelerate; that is,
# such a body is at rest or moving at a constant velocity. If we recall the definition of Newton's first law, this is essentially its description.
#
# An inertial frame of reference can be defined in analytical terms as a
# frame of reference that describes time and space homogeneously,
# isotropically, and in a time-independent manner.
#
# Conceptually, the physics of a system in an inertial frame has no
# causes external to the system. An inertial frame of reference may also
# be called an inertial reference frame, inertial frame, Galilean
# reference frame, or inertial space.
#
# All inertial frames are in a state of constant, rectilinear motion
# with respect to one another; an accelerometer moving with any of them
# would detect zero acceleration. Measurements in one inertial frame can
# be converted to measurements in another by a simple transformation
# (the Galilean transformation in Newtonian physics and the Lorentz
# transformation in special relativity). In general relativity, in any
# region small enough for the curvature of spacetime and tidal forces to
# be negligible, one can find a set of inertial frames that
# approximately describe that region.
#
# In a non-inertial reference frame in classical physics and special
# relativity, the physics of a system vary depending on the acceleration
# of that frame with respect to an inertial frame, and the usual
# physical forces must be supplemented by fictitious forces. In
# contrast, systems in general relativity don't have external causes,
# because of the principle of geodesic motion.
#
# In classical physics, for example, a ball dropped towards the ground
# does not go exactly straight down because the Earth is rotating, which
# means the frame of reference of an observer on Earth is not
# inertial. The physics must account for the Coriolis effect—in this
# case thought of as a force—to predict the horizontal motion. Another
# example of such a fictitious force associated with rotating reference
# frames is the centrifugal effect, or centrifugal force. We will here,
# in addition to the abovementioned example of the Coriolis effect study
# a classic case in classical mechanics, namely Focoault's pendulum.
#
#
# ## Galilean Transformations
#
# In many of the examples studied till now we have restricted our
# attention to the motion of a particle (or a system of particles) as
# seen from an inertial frame of reference. An inertial reference frame
# moves at constant velocity with respect to other reference frames. We
# can formalize this relationship as a coordinate transformation (known
# as a Galilean transformation) between say two given frames, one
# labeled $S_0$ and the other one labeled $S$. In our discussions here we will refer to the frame $S_0$ as the reference frame.
#
# We could consider for example an object in a car, where the car moves with a
# constant velocity with respect to the system $S_0$. We then throw this
# object up in the air and study it's motion with respect to the two chosen frames.
# We denote the position of this object in the
# car relative to the car's frame as $\boldsymbol{r}_S(t)$. We have included an explicit
# time dependence here. The position of the car relative to the
# reference frame $S_0$ is $\boldsymbol{R}(t)$ and the position of the object in
# the car relative to $S_0$ is $\boldsymbol{r}_{S_0}(t)$.
#
# The following relations between the positions
# link the various variables that describe the object in the two frames
# $$
# \boldsymbol{r}_{S_0}(t) = \boldsymbol{r}_{S}(t) + \boldsymbol{R}(t).
# $$
# We will stay with Newtonian mechanics, meaning that we do not consider
# relatistic effects. This means also that the time we measure in $S_0$
# is the same as the time we measure in $S$. This approximation is
# reasonable as long as the two frames do not move very fast relative to
# each other.
#
# We can then compute the time derivatives and obtain the corresponding velocities
# $$
# \dot{\boldsymbol{r}}_{S_0}(t) = \boldsymbol{v}_{S_0}=\dot{\boldsymbol{r}}_{S} + \dot{\boldsymbol{R}},
# $$
# or
# $$
# \boldsymbol{v}_{S_0}=\boldsymbol{v}_{S} + \boldsymbol{u},
# $$
# with $\boldsymbol{u}=\dot{\boldsymbol{R}}$.
#
# If our system $S$ moves at constant velocity, we have that the
# accelerations in the two systems equal each other since
# $\ddot{\boldsymbol{R}}=0$ and we have
# $$
# \boldsymbol{a}_{S_0}=\boldsymbol{a}_{S}.
# $$
# The above equations are examples of what we call a homogeneous
# Galilean transformation. In an inertial frame, an object moves with
# constant velocity (i.e., has zero acceleration) if there are no forces
# acting on it. When we are not in an inertial frame, there will be
# spurious (or fictitious) accelerations arising from the acceleration
# of the reference frame. These effects can be seen in simple every-day
# situations such as sitting in a vehicle that is accelerating or
# rounding a corner. Or think of yourself sitting in a seat of an
# aircraft that accelerates rapidly during takeoff. You feel a force
# which pushes you back in the seat. Similarly, if you stand in a bus
# which suddenly brakes (negative acceleration), you feel a force which
# may make you fall forward unless you hold yourself. In these
# situations, loose objects will appear to accelerate relative to the
# observer or vehicle.
#
#
# Our next step is thus to study an accelerating frame. Thereafter we
# will study reference frames that are rotating. This will introduce
# forces like the Coriolis force and the well-known centrifugal
# force. We will use these to study again an object which falls towards
# the Earth (the Earth rotates around its axis). This will lead to a
# correction to the object's acceleration twoards the Earth.
#
# Finally, we bring together acceleration and rotation and end the
# discussion here with a classic in classical mechanics, namely
# Focault's pendulum.
#
#
# ## Accelerating Frames (No Rotation)
#
# We consider first the effect of uniformly accelerating reference
# frames. We will hereafter label this frame with a subscript
# $S$. Assume now that this reference systems accelerates with an
# acceleration $\boldsymbol{a}_{S_0}$ relative to an inertial reference frame,
# which we will label with a subscript $S_0$. The accelerating frame
# has a velocity $\boldsymbol{v}_{S_0}$ with respect to the inertial frame.
#
# The figure here
# <!-- dom:FIGURE: [figslides/rotation.png, width=600 frac=0.6] -->
# <!-- begin figure -->
# <img src="figslides/rotation.png" width=600><p style="font-size: 0.9em"><i>Figure 1: </i></p><!-- end figure -->
#
#
# shows the relation between the two
# frames. The position of an object in frame $S$ relative to $S_0$ is
# labeled as $\boldsymbol{r}_{S_0}$. Seen from this inertial frame, an object in
# the accelerating frame obeys Newton's second law
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# m\frac{d^2\boldsymbol{r}_{S_0}}{dt^2}=\boldsymbol{F}.
# \label{_auto1} \tag{1}
# \end{equation}
# $$
# Here $\boldsymbol{F}$ is the net force on an object in the accelerating frame
# seen from the inertial frame.
#
# If we on the other hand wish to study the motion of this object (say a
# ball in an accelerating car) relative to the accelerating frame, we
# need to define its position relative to this frame. We label this
# position as $\boldsymbol{r}_{S}$.
#
# Using the definition of velocity as the time derivative of position
# and the standard vector addition of velocities, we can define the
# velocity relative to $S_0$ as
# $$
# \dot{\boldsymbol{r}}_{S_0}=\dot{\boldsymbol{r}}_{S}+\boldsymbol{v}_{S_0}.
# $$
# The left hand side in the last equation defines the object's velocity
# relative to the inertial frame. The right hand side says this is the
# object's velocity relative to the accelerating frame plus the velocity
# of the accelerating frame with respect to the inertial frame. If we
# now take the second derivative of the above equation,
# we have the corresponding accelerations
# $$
# \ddot{\boldsymbol{r}}_{S_0}=\ddot{\boldsymbol{r}}_{S}+\boldsymbol{a}_{S_0}.
# $$
# Multiplying with the mass of a given object, we can rewrite Newton's
# law in the accelerating frame as
# $$
# m\ddot{\boldsymbol{r}}_{S}=\boldsymbol{F}-\boldsymbol{a}_{S_0}.
# $$
# We see that we have again Newton's second law except that we added a
# correction which defines an effective acceleration compared to the
# equation seen in the inertial frame. We can thus continue to use
# Newton's law in the accelerating frame provided we correct the
# equation of motion with what is often called a fictitious force. This
# often also called an inertial force or an effective force.
#
# **Add example about pendulum in train car**
#
# ## Rotating Frames
#
#
# If you are on Earth's surface and if your reference frame is fixed
# with the surface, this is an example of an accelerating frame, where
# the acceleration, as we will show below, is $\Omega^2 r$, where
# $r\equiv\sqrt{x^2+y^2}$, and $\Omega$ is the angular velocity
# of Earth's rotation. The acceleration is inward toward the axis of
# rotation, so the additional contribution to the apparent acceleration
# of gravity is outward in the $x-y$ plane. In contrast the usual
# acceleration $\boldsymbol{g}$ is radially inward pointing toward the origin.
#
# We will now deal with motion in a rotating frame and relate this to an
# inertial frame. The outcome of our derivations will be effective
# forces (or inertial forces) like the abovementioned acceleration (from
# the centrifugal force) and the Coriolis force term.
#
# For a reference frame that rotates with respect to an inertial frame,
# **Euler's theorem** is central here. It states that the most general
# displacement (motion) of a rigid body with a one point fixed (we
# normally approximate a rigid body with a mass center) is a rotation
# about some fixed axis. In different words, the most general motion of
# any body relative to a fixed point $O$ is a rotation abotu some axis
# through the same point $O$. This means that for a specific rotation
# about a given point $O$ we only to specify the direction of the axis
# about which the rotation occurs with the corresponding angle of
# rotation. As we will see below, the direction of the angle of rotation
# can be specified by a unit vector $\boldsymbol{e}$ in the rotating frame and
# the rate of rotation per unit time. The latter defines the angular
# velocity $\Omega$. We will define these quantities more rigorously below.
# At the end of this section we will also prove Euler's theorem.
#
# What we will show here is that Newton's laws for an object in the rotating frame is given by
# $$
# m\ddot{\boldsymbol{r}}_{S}=\boldsymbol{F}+m\boldsymbol{r}\times\dot{\boldsymbol{\Omega}}+2m\boldsymbol{v}_S\times\boldsymbol{\Omega}+m\left(\boldsymbol{\Omega}\times\boldsymbol{r}\right)\times\boldsymbol{\Omega}.
# $$
# The first term to the right is the force we defined in the inertial
# system, that is $m\ddot{\boldsymbol{r}}_{S_0}=\boldsymbol{F}$. The second term is the
# angular acceleration of the rotating reference frame, a quantity which
# in many cases is set to zero since we assume that the angular velocity
# is constant as function of time. The third terms is the Coriolis force, that
# is
# $$
# \boldsymbol{F}_{\mathrm{Coriolis}}=2m\boldsymbol{v}_S\times\boldsymbol{\Omega},
# $$
# while the last term is going to give us the standard centrifugal force
# $$
# \boldsymbol{F}_{\mathrm{Centrifugal}}=m\left(\boldsymbol{\Omega}\times\boldsymbol{r}\right)\times\boldsymbol{\Omega}.
# $$
# Let us derive these terms, following much of the same procedure as we
# did for an accelerating reference frame. The figure here (to come)
# shows the two reference systems $S$ and $S_0$.
#
#
# We define a general vector $\boldsymbol{A}$. It could represent the position,
# a given force, the velocity and other quantities of interest for
# studies of the equations of motion.
#
# We let this vector to be defined by three orthogonal (we assume motion
# in three dimensions) unit vectors $\boldsymbol{e}_i$, that is we have
# $$
# \boldsymbol{A}=A_1\boldsymbol{e}_1+A_2\boldsymbol{e}_2+A_3\boldsymbol{e}_3=\sum_iA_i\boldsymbol{e}_i.
# $$
# These unit vectors are fixed in the rotating frame, that is their time
# derivatives are zero. However, for an observer in the inertial frame
# $S_0$, however these unit vectors are rotating and may thus have an
# explicit time dependence.
#
# Since we want to find an expression for the equations of motion in the
# inertial frame and the rotating frame, we need expressions for the
# time derivative of a vector $\boldsymbol{A}$ in these two frames. Since the
# unit vectors are assumed to be fixed in $S$, we have
# $$
# \dot{\boldsymbol{A}}_S=\sum_i\frac{dA_i}{dt}\boldsymbol{e}_i=\sum_i\dot{dA_i}\boldsymbol{e}_i.
# $$
# In the inertial frame $S_0$ we have
# $$
# \dot{\boldsymbol{A}}_{S_0}=\sum_i\dot{dA_i}\boldsymbol{e}_i+\sum_i A_i\left(\dot{\boldsymbol{e}}_i\right)_{S_0}.
# $$
# We will show below that
# $$
# \left(\dot{\boldsymbol{e}}_i\right)_{S_0}=\Omega\times\boldsymbol{e}_i,
# $$
# where $\Omega$ is the angular velocity (to be derived below). This
# means we can write the derivative of an arbitrary vector $\boldsymbol{A}$ in
# the inertial frame $S_0$ as (the vector is defined in the rotating
# frame),
# $$
# \dot{\boldsymbol{A}}_{S_0}=\sum_i\dot{dA_i}\boldsymbol{e}_i+\sum_i A_i\left(\dot{\boldsymbol{e}}_i\right)_{S_0}=\dot{\boldsymbol{A}}_S+\sum_i A_i(\Omega\times\boldsymbol{e}_i)=\dot{\boldsymbol{A}}_S+\boldsymbol{\Omega}\times\boldsymbol{A}.
# $$
# This is a very useful relation which relates the derivative of any
# vector $\boldsymbol{A}$ measured in the inertial frame $S_0$ to the
# correspoding derivative in a rotating frame $S$.
#
# If we now let $\boldsymbol{A}$ be the position and the velocity vectors, we
# can derive the equations of motion in the rotating frame in terms of
# the same equations of motion in the inertial frame $S_0$.
#
# Let us start with the position $\boldsymbol{r}$.
#
# We have
# $$
# \dot{\boldsymbol{r}}_{S_0}=\dot{\boldsymbol{r}}_S+\boldsymbol{\Omega}\times\boldsymbol{r}.
# $$
# If we define the velocities in the two frames as
# $$
# \boldsymbol{v}_{S_0}=\dot{\boldsymbol{r}}_{S_0},
# $$
# and
# $$
# \boldsymbol{v}_{S}=\dot{\boldsymbol{r}}_{S},
# $$
# we have then
# $$
# \dot{\boldsymbol{r}}_{S_0}=\boldsymbol{v}_{S_0}=\boldsymbol{v}_{S}+\boldsymbol{\Omega}\times\boldsymbol{r}.
# $$
# In order to find the equations of motion, we need the acceleration and
# thereby the time derivative of the last equation. The derivative of
# the angular velocity $\Omega$ will turn in handy in these derivations
# (repeated applications of the chain rule again).
# The latter derivative is
# $$
# \dot{\boldsymbol{\Omega}}_{S_0}=\dot{\boldsymbol{\Omega}}_S+\boldsymbol{\Omega}\times\boldsymbol{\Omega},
# $$
# which leads to (an expected result, why?)
# $$
# \dot{\boldsymbol{\Omega}}_{S_0}=\dot{\boldsymbol{\Omega}}_S,
# $$
# since $\boldsymbol{\Omega}\times\boldsymbol{\Omega}=0$.
#
# Let us now take the second derivative with respect to time.
#
# Using
# $$
# \left[\frac{d^2\boldsymbol{r}}{dt^2}\right]_{S_0}=\ddot{\boldsymbol{r}}_{S_0}=\left[\frac{d}{dt}\right]_{S_0}\left[\frac{d\boldsymbol{r}}{dt}\right]_{S_0},
# $$
# we have
# $$
# \ddot{\boldsymbol{r}}_{S_0}=\left[\frac{d}{dt}\right]_{S_0}\left[\boldsymbol{v}_{S}+\boldsymbol{\Omega}\times\boldsymbol{r}\right]=\left[\frac{d}{dt}\right]_{S_0}\boldsymbol{v}_{S_0},
# $$
# which gives
# $$
# \ddot{\boldsymbol{r}}_{S_0}=\left[\frac{d\boldsymbol{v}_S}{dt}\right]_{S}+\dot{\boldsymbol{\Omega}}\times \boldsymbol{r}+2\boldsymbol{\Omega}\times\boldsymbol{v}_S+\boldsymbol{\Omega}\times(\boldsymbol{\Omega}\times\boldsymbol{r}).
# $$
# Defining the accelerations $\boldsymbol{a}_{S_0}=\ddot{\boldsymbol{r}}_{S_0}=\dot{\boldsymbol{v}}_{S_0}$ and $\boldsymbol{a}_{S}=\dot{\boldsymbol{v}}_{S}$, we have
# $$
# \boldsymbol{a}_{S_0}=\boldsymbol{a}_{S}+\dot{\boldsymbol{\Omega}}\times \boldsymbol{r}+2\boldsymbol{\Omega}\times\boldsymbol{v}_S+\boldsymbol{\Omega}\times(\boldsymbol{\Omega}\times\boldsymbol{r}).
# $$
# If we now use Newton's law in the inertial frame $\boldsymbol{F}=m\boldsymbol{a}_{S_0}$, we get the effective force in the rotating frame (multiplying by the mass $m$)
# $$
# m\boldsymbol{a}_{S}=\boldsymbol{F}+m\dot{\boldsymbol{r}\times\boldsymbol{\Omega}}+2m\boldsymbol{v}_S\times\boldsymbol{\Omega}+m(\boldsymbol{\Omega}\times\boldsymbol{r})\times\boldsymbol{\Omega},
# $$
# which is what we wanted to demostrate. We have the Coriolis force
# $$
# \boldsymbol{F}_{\mathrm{Coriolis}}=2m\boldsymbol{v}_S\times\boldsymbol{\Omega},
# $$
# while the last term is the standard centrifugal force
# $$
# \boldsymbol{F}_{\mathrm{Centrifugal}}=m\left(\boldsymbol{\Omega}\times\boldsymbol{r}\right)\times\boldsymbol{\Omega}.
# $$
# In our discussions below we will assume that the angular acceleration of the rotating frame is zero and focus only on the Coriolis force and the centrifugal force.
#
#
#
# ### Effective potential and Centrifugal force
#
# Suppose we can ignore the Coriolis force. If we focus only on the
# centrifugal force we have an additional force
# $$
# \boldsymbol{F}_{\mathrm{Centrifugal}}=m\left(\boldsymbol{\Omega}\times\boldsymbol{r}\right)\times\boldsymbol{\Omega},
# $$
# where the term $\boldsymbol{\Omega}\times\boldsymbol{r}$ is the radial velocity.
#
# Consider now an object with position $\boldsymbol{r}$ according to an observer in a frame
# rotating about the $z$ axis with angular velocity
# $\boldsymbol{\Omega}=\Omega\hat{z}$. To an observer in the inertial frame
# the vector will change even if the vector appears
# fixed to the rotating observer.
#
#
# If $\boldsymbol{\Omega}$ is in the $z$ direction,
# the centrifugal force becomes
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# \boldsymbol{F}_{\mathrm{Centrifugal}}=m\Omega^2(x\hat{x}+y\hat{y}).
# \label{_auto2} \tag{2}
# \end{equation}
# $$
# The centrifugal force points outward in the $x-y$ plane, and its
# magnitude is $m\Omega^2r$, where
# $r=\sqrt{x^2+y^2}$.
#
# Continuing along these lines,
# if we define a rotating frame which makes an angle $\theta$ with the inertial frame and define the distance to an object in this frame from the origin as $\boldsymbol{r}$, then the centrifugal force (which points outward) has as magnitude $\Omega^2r\sin{\theta}$. Defining $\rho=r\sin{\theta}$ and the unit vector $\hat{\boldsymbol{\rho}}$ (see figure here)
# <!-- dom:FIGURE: [figslides/centrifugal.png, width=600 frac=0.6] -->
# <!-- begin figure -->
# <img src="figslides/centrifugal.png" width=600><p style="font-size: 0.9em"><i>Figure 1: </i></p><!-- end figure -->
#
# we have the well-known expression for the centrifugal force
# $$
# \boldsymbol{F}_{\mathrm{Centrifugal}}=m\Omega^2\rho\hat{\boldsymbol{\rho}},
# $$
# and with the velocity given by its magnitude $v=\Omega\rho$ we obtain the well-known expression for the centrifugal force
# $$
# \boldsymbol{F}_{\mathrm{Centrifugal}}=m\frac{v^2}{\rho}{\boldsymbol{\rho}}.
# $$
# If we now go back again to our falling object discussed in the
# beginning of these lectures, we need to modify for the fact that the Earth
# is rotating with respect to the falling object.
#
# Seen from a rotating coordinate system we have now that the forces acting on the falling object are
# $$
# m\ddot{\boldsymbol{r}}=\boldsymbol{F}_{\mathrm{gravity}}+\boldsymbol{F}_{\mathrm{Centrifugal}}.
# $$
# If we define the mass of Earth as $M$ and its radius as $R$ and assuming that the object is close to the Earth, the gravitational force takes then well-known expression
# $$
# \boldsymbol{F}_{\mathrm{gravity}}=-\frac{GMm}{R^2}\hat{\boldsymbol{r}}=m\boldsymbol{g}_0.
# $$
# Inserting the expression for the centrifugal force, we can then define an effective force
# $$
# \boldsymbol{F}_{\mathrm{eff}}=\boldsymbol{F}_{\mathrm{gravity}}+\boldsymbol{F}_{\mathrm{Centrifugal}}=m\boldsymbol{g}_0-m\Omega^2R\sin{(\theta)}\hat{\boldsymbol{\rho}},
# $$
# and with
# $$
# \boldsymbol{g}_{\mathrm{eff}}=\boldsymbol{g}_0-\Omega^2R\sin{(\theta)}\hat{\boldsymbol{\rho}},
# $$
# we have
# $$
# \boldsymbol{F}_{\mathrm{eff}}=m\boldsymbol{g}_{\mathrm{eff}}.
# $$
# In the rotating coordinate system (not an inertial frame), motion is
# thus determined by an apparent force and one can define effective
# potentials. In addition to the normal gravitational potential energy,
# there is a contribution to the effective potential,
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# \delta V_{\rm eff}(r)=-\frac{m}{2}\Omega^2r^2=-\frac{m}{2}r^2\Omega^2\sin^2\theta,
# \label{_auto3} \tag{3}
# \end{equation}
# $$
# where $\theta$ is the polar angle, measured from say the north
# pole. If the true gravitational force can be considered as originating
# from a point in Earth's center, the net effective potential for a mass
# $m$ near Earth's surface could be (a distance $h$)
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# V_{\rm eff}=mgh-m\frac{1}{2}\Omega^2(R+h)^2\sin^2\theta.
# \label{_auto4} \tag{4}
# \end{equation}
# $$
# As an example, let us ask ourselves how much wider is Earth at the
# equator than the north-south distance between the poles assuming that
# the gravitational field above the surface can be approximated by that
# of a point mass at Earth's center.
#
#
# The surface of the ocean must be at constant effective potential for a
# sample mass $m$. This means that if $h$ now refers to the height of
# the water
# $$
# m g[h(\theta=\pi/2)-h(\theta=0)]=\frac{m}{2}\Omega^2(R+h)^2.
# $$
# Because $R>>h$, one can approximate $R+h\rightarrow R$ on the right-hand side, thus
# $$
# h(\theta=\pi)-h(\theta=0)=\frac{\Omega^2R^2}{2g}.
# $$
# This come out a bit less than 11 km, or a difference of near 22 km for
# the diameter of the Earth in the equatorial plane compared to a
# diameter between the poles. In reality, the difference is
# approximately 41 km. The discrepancy comes from the assumption that
# the true gravitational force can be treated as if it came from a point
# at Earth's center. This would be true if the distribution of mass was
# radially symmetric. However, Earth's center is molten and the rotation
# distorts the mass distribution. Remarkably this effect nearly doubles
# the elliptic distortion of Earth's shape. Due to this distortion, the
# top of Mount Everest is not the furthest point from the center of the
# Earth. That belongs to the top of a volcano, Chimborazo, in Equador,
# which is one degree in latitude below the Equator. Chimborazo is about
# 8500 ft lower than Everest when measured relative to sea level, but is
# 7700 feet further from the center of the Earth.
#
#
#
#
#
# ## Coriolis Force and Falling Objects
#
# The Coriolis force is given by
# $$
# \boldsymbol{F}_{\mathrm{Coriolis}}=2m\boldsymbol{v}_S\times\boldsymbol{\Omega},
# $$
# It does not enter problems like the shape of the Earth
# above because in that case the water was not moving relative to the
# rotating frame.
#
# The Coriolis force is non-zero only if $\boldsymbol{v}_S\ne 0$ and is directed
# perpendicular to both $\boldsymbol{v}_S$ and $\Omega$. Viewed along the
# direction of $\boldsymbol{v}_S$, the Coriolis force associated with
# counter-clockwise rotational motion produces a deflection to the
# right. For clockwise rotational motion, it produces a deflection to
# the left.
#
# The Coriolis force associated with Earth’s rotational motion is
# responsible for the circulating or cyclonic weather patterns
# associated with hurricanes and cyclones, as illustrated in the figure
# here. Basically, a pressure gradient gives rise to air currents that
# tend to flow from high pressure to low pressure regions. But as the
# air flows toward the low pressure region, the Coriolis force deflects
# the air currents away from their straight line paths. Since the
# projection of $\Omega$ perpendicular to the local tangent plane
# changes sign as one crosses the equator, the direction of the cyclonic
# motion (either counter-clockwise or clockwise) is different in the
# Northern and Southern hemispheres.
# <!-- dom:FIGURE: [figslides/coriolis.png,width=600 frac=0.6] -->
# <!-- begin figure -->
# <img src="figslides/coriolis.png" width=600><p style="font-size: 0.9em"><i>Figure 1: </i></p><!-- end figure -->
#
#
#
# As an example, assume a ball is dropped from a height $h=500$m above Minneapolis. Due to the
# Coriolis force, it is deflected by an amount $\delta x$ and $\delta
# y$. We want to find the deflection due to the Coriolis force. Here we ignore the centrifugal terms.
#
# The equations of motion are:
# $$
# \begin{eqnarray*}
# \frac{dv_x}{dt}&=&-2(\Omega_yv_z-\Omega_zv_y),\\
# \frac{dv_y}{dt}&=&-2(\Omega_zv_x-\Omega_xv_z),\\
# \frac{dv_z}{dt}&=&-g-2(\Omega_xv_y-\Omega_yv_x),\\
# \Omega_z&=&\Omega\cos\theta,~~~\Omega_y=\Omega\sin\theta,~~~\Omega_x=0.
# \end{eqnarray*}
# $$
# Here the coordinate system is $\hat{x}$ and points east, $\hat{y}$ points
# north and $\hat{z}$ points upward.
#
# One can now ignore all the Coriolis terms on the right-hand sides
# except for those with $v_z$. The other terms will all be doubly
# small. One can also throw out terms with $\Omega_x$. This gives
# $$
# \begin{eqnarray*}
# \frac{dv_x}{dt}&\approx& -2\Omega v_z\sin\theta,\\
# \frac{dv_y}{dt}&\approx& 0,\\
# \frac{dv_z}{dt}&\approx& -g.
# \end{eqnarray*}
# $$
# There will be no significant deflection in the $y$ direction, $\delta
# y=0$, but in the $x$ direction one can substitute $v_z=-gt$ above,
# $$
# \begin{eqnarray*}
# v_x&\approx&\int_0^t dt'~2\Omega gt'\sin\theta=\Omega gt^2\sin\theta,\\
# \delta x&\approx& \int_0^t dt'~v_x(t')=\frac{g\Omega\sin\theta t^3}{3}.
# \end{eqnarray*}
# $$
# One can find the deflections by using $h=\frac{1}{2}gt^2$, to find the
# time, and using the all-knowing internet to see that the latitude of
# Minneapolis is $44.6^\circ$ or $\theta=45.4^\circ$.
# $$
# \begin{eqnarray*}
# t&=&\sqrt{2h/g}=10.1~{\rm s},\\
# \Omega&=&\frac{2\pi}{3600\cdot 24~{\rm s}}=7.27\times 10^{-5}~{\rm s}^{-1},\\
# \delta x&=&17.4~{\rm cm}~~{\rm(east)}.
# \end{eqnarray*}
# $$
# ## Accelerating and Rotating Frames
#
# It is now simple to bring together the equations for an accelerating and rotating frame. Using our results we have the equations of motion for an object in an accelerating and rotating frame with respect to an inertial frame
# $$
# m\ddot{\boldsymbol{r}}_{S}=\boldsymbol{F}+m\boldsymbol{r}\times\dot{\boldsymbol{\Omega}}+2m\boldsymbol{v}_S\times\boldsymbol{\Omega}+m\left(\boldsymbol{\Omega}\times\boldsymbol{r}\right)\times\boldsymbol{\Omega}-\boldsymbol{a}_{S_0},
# $$
# where the last term is the acceleration of the accelerating frame seen from the inertial frame.
#
#
# ## The Foucault Pendulum
#
#
#
# The [Foucault
# Pendulum](https://en.wikipedia.org/wiki/Foucault_pendulum) is simply
# a regular pendulum moving in both horizontal directions, and with the
# Coriolis force included. It is explained at its simplest if we
# consider a pendulum positioned at the North pole. Foucault's
# experiment was actually the first laboratory demonstration that the
# Earth is rotating. The experiment is rather simple and many physics
# department worldwide have their own pendulum.
#
# In the original experiment done in Paris in 1851, Foucault used a
# massive pendulum of 28kg and 67m long.
#
# If use an inertial frame with the North pole as its origin, the Earth
# below the pendulum rotates with a period of 24h (actually 23h and
# 56min). Seen with respect to the surface of the Earth, the plane of
# the pendulum moves in the opposite direction of the rotation of the Earth.
#
# If we were to perform the experiment in other places, the setup is slightly more complicated since the pendulum will then rotate with the Earth. The net effect is a slower rotation compared to North pole.
#
# <!-- dom:FIGURE: [figslides/foucault.png,width=600 frac=0.6] -->
# <!-- begin figure -->
# <img src="figslides/foucault.png" width=600><p style="font-size: 0.9em"><i>Figure 1: </i></p><!-- end figure -->
#
#
# Let us look at the equations we need to solve.
# $$
# \begin{eqnarray*}
# m\ddot{\boldsymbol{r}}&=&\boldsymbol{T}+m\boldsymbol{g}-2m\boldsymbol{\Omega}\times\boldsymbol{v},
# \end{eqnarray*}
# $$
# as the centrifugal force term is absorbed into the definition of
# $\boldsymbol{g}$. The magnitude of the tension, $\boldsymbol{T}$, is considered
# constant because we consider only small oscillations. Then $T\approx mg$, and the components, using $\hat{x},\hat{y}$ to correspond to east
# and north respectively, are
# $$
# \begin{eqnarray*}
# T_x=-mgx/L,~~~T_y=-mgy/L.
# \end{eqnarray*}
# $$
# If $\Omega$ is the rotation of the earth, and if $\theta$ is the polar angle, $\pi$-latitude,
# $$
# \begin{eqnarray*}
# \ddot{x}&=&-gx/L+2\dot{y}\Omega_z,\\
# \ddot{y}&=&-gy/L-2\dot{x}\Omega_z.
# \end{eqnarray*}
# $$
# Here we have used the fact that the oscillations are sufficiently
# small so we can ignore $v_z$. Using $\Omega_0\equiv\sqrt{k/m}$,
# $$
# \begin{eqnarray*}
# \ddot{x}-2\Omega_z\dot{y}+\Omega_0^2x&=&0\\
# \ddot{y}+2\Omega_z\dot{x}+\Omega_0^2y&=&0,
# \end{eqnarray*}
# $$
# where $\Omega_z=|\boldsymbol{\Omega}|\cos\theta$, with $\theta$ being the
# polar angle (zero at the north pole). The terms linear in time
# derivatives are what make life difficult. This will be solved with a
# trick. We will incorporate both differential equations into a single
# complex equation where the first/second are the real/imaginary parts.
# $$
# \begin{eqnarray*}
# \eta\equiv x+iy,\\
# \ddot{\eta}+2i\Omega_z\dot{\eta}+\Omega_0^2\eta&=&0.
# \end{eqnarray*}
# $$
# Now, we guess at a form for the solutions, $\eta(t)=e^{-i\alpha t}$,
# which turns the differential equation into
# $$
# \begin{eqnarray*}
# -\alpha^2+2\Omega_z\alpha+\Omega_0^2&=&0,\\
# \alpha&=&\Omega_z\pm \sqrt{\Omega_z^2+\Omega_0^2},\\
# &\approx&\Omega_z\pm \Omega_0.
# \end{eqnarray*}
# $$
# The solution with two arbitrary constants is then
# $$
# \begin{eqnarray*}
# \eta&=&e^{-i\Omega_zt}\left[C_1e^{i\Omega_0t}+C_2e^{-i\Omega_0t}\right].
# \end{eqnarray*}
# $$
# Here, $C_1$ and $C_2$ are complex, so they actually represent four
# arbitrary numbers. These four numbers should be fixed by the four
# initial conditions, i.e. $x(t=0), \dot{x}(t=0), y(t=0)$ and
# $\dot{y}(t=0)$. With some lengthy algebra, one can rewrite the
# expression as
# <!-- Equation labels as ordinary links -->
# <div id="eq:precmess"></div>
#
# $$
# \begin{eqnarray*}
# \label{eq:precmess} \tag{5}
# \eta&=&e^{-i\Omega_zt}\left[A\cos(\Omega_0t+\phi_A)+iB\cos(\Omega_0t+\phi_B)\right].
# \end{eqnarray*}
# $$
# Here, the four coefficients are represented by the two real arbitrary
# real amplitudes, $A$ and $B$, and two arbitrary phases, $\phi_A$ and
# $\phi_B$. For an initial condition where $y=0$ at $t=0$, one can see
# that $B=0$. This then gives
# $$
# \begin{eqnarray*}
# \eta(t)&=&Ae^{-i\Omega_zt}\cos(\Omega_0t+\gamma)\\
# \nonumber
# &=&A\cos\Omega_zt\cos(\Omega_0t+\gamma)+iA\sin\Omega_zt\cos(\Omega_0t+\gamma).
# \end{eqnarray*}
# $$
# Translating into $x$ and $y$,
# $$
# \begin{eqnarray}
# x&=&A\cos\Omega_zt\cos(\Omega_0t+\gamma),\\
# \nonumber
# y&=&A\sin\Omega_zt\cos(\Omega_0t+\gamma).
# \end{eqnarray}
# $$
# Assuming the pendulum's frequency is much higher than Earth's
# rotational frequency, $\Omega_0>>\Omega_z$, one can see that the plane
# of the pendulum simply precesses with angular velocity
# $\Omega_z$. This means that in this limit the pendulum oscillates only
# in the $x$-direction with frequency many times before the phase
# $\Omega_zt$ becomes noticeable. Eventually, when $\Omega_zt=\pi/2$,
# the motion is along the $y$-direction. If you were at the north pole,
# the motion would switch from the $x$-direction to the $y$ direction
# every 6 hours. Away from the north pole, $\Omega_z\ne|\boldsymbol{\Omega}|$
# and the precession frequency is less. At the equator it does not
# precess at all. If one were to repeat for the solutions where $A=0$
# and $B\ne 0$, one would look at motions
# that started in the $y$-direction, then precessed toward the $-x$
# direction. Linear combinations of the two sets of solutions give
# pendulum motions that resemble ellipses rather than simple
# back-and-forth motion.
#
# ## Euler's Theorem from a Linear Algebra Perspective
#
# **this material will be added soon**
| 3.96875 | 4 |
apps/async_task/utils.py | goztrk/django-htk | 206 | 12789356 | <reponame>goztrk/django-htk
# Python Standard Library Imports
import base64
import json
def build_async_task_result(content, content_type, filename):
"""Builds an Async Task result from JSON
This is necessary if we want to return multiple values, as the result by default is just a plain string.
"""
payload = {
'content' : base64.b64encode(content),
'content_type' : content_type,
'filename' : filename,
}
result = json.dumps(payload)
return result
def extract_async_task_result_json_values(result_data):
"""Companion function to perform the inverse of `build_async_task_result()`
"""
payload = json.loads(result_data)
content = base64.b64decode(payload['content'])
content_type = payload['content_type']
filename = payload['filename']
return (content, content_type, filename,)
| 2.53125 | 3 |
row_analysis/row_analysis.py | guohaoqiang/gcn | 0 | 12789357 | <gh_stars>0
import csv
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from math import ceil, floor, sqrt
import glog as log
def load(f):
for i in f:
return i
def get_nnz(f):
row_offset = load(f)
ans = []
for i in range(1,len(row_offset)):
ans.append(int(row_offset[i])-int(row_offset[i-1]))
log.info(len(ans))
return ans;
def get_xy(r):
a = pd.Series(r)
b = a.value_counts()
yy = [] # Frequency
for i in list(b.index):
yy.append(b[i])
yy = np.array(yy)
yy = yy/yy.sum() # Percentile
xx = b.index # NNZs
ind = np.lexsort((yy,xx))
x = [0]
y = [0]
for i in ind:
x.append(xx[i])
y.append(yy[i]+y[-1])
return min(x),max(x),x,y
f = open('cora.csv','r')
f1 = csv.reader(f)
cora = get_nnz(f1)
f.close()
f = open('polblogs.csv','r')
f2 = csv.reader(f)
polblogs = get_nnz(f2)
f.close()
f = open('citeseer.csv','r')
f3 = csv.reader(f)
citeseer = get_nnz(f3)
f.close()
f = open('pubmed.csv','r')
f4 = csv.reader(f)
pubmed = get_nnz(f4)
f.close()
f = open('ppi.csv','r')
f5 = csv.reader(f)
ppi = get_nnz(f5)
f.close()
f = open('flickr.csv','r')
f6 = csv.reader(f)
flickr = get_nnz(f6)
f.close()
f = open('reddit.csv','r')
f7 = csv.reader(f)
reddit = get_nnz(f7)
f.close()
f = open('yelp.csv','r')
f8 = csv.reader(f)
yelp = get_nnz(f8)
f.close()
f = open('amazon.csv','r')
f9 = csv.reader(f)
amazon = get_nnz(f9)
f.close()
fig = plt.gcf()
fig.set_size_inches(12,11)
cora_mn, cora_mx, cora_x, cora_y = get_xy(cora)
polblogs_mn, polblogs_mx, polblogs_x, polblogs_y = get_xy(polblogs)
citeseer_mn, citeseer_mx, citeseer_x, citeseer_y = get_xy(citeseer)
pubmed_mn, pubmed_mx, pubmed_x, pubmed_y = get_xy(pubmed)
ppi_mn, ppi_mx, ppi_x, ppi_y = get_xy(ppi)
flickr_mn, flickr_mx, flickr_x, flickr_y = get_xy(flickr)
reddit_mn, reddit_mx, reddit_x, reddit_y = get_xy(reddit)
yelp_mn, yelp_mx, yelp_x, yelp_y = get_xy(yelp)
amazon_mn, amazon_mx, amazon_x, amazon_y = get_xy(amazon)
mn = min([cora_mn,polblogs_mn,citeseer_mn,pubmed_mn,ppi_mn,flickr_mn,reddit_mn,yelp_mn,amazon_mn])
mx = max([cora_mx,polblogs_mx,citeseer_mx,pubmed_mx,ppi_mx,flickr_mx,reddit_mx,yelp_mx,amazon_mx])
plt.step(cora_y, np.log10(cora_x), label='cora')
log.info(cora_mn)
log.info(cora_mx)
plt.step(polblogs_y, np.log10(polblogs_x), label='polblogs')
log.info(polblogs_mn)
log.info(polblogs_mx)
plt.step(citeseer_y, np.log10(citeseer_x), label='citeseer')
log.info(citeseer_mn)
log.info(citeseer_mx)
plt.step(pubmed_y, np.log10(pubmed_x), label='pubmed')
log.info(pubmed_mn)
log.info(pubmed_mx)
plt.step(ppi_y, np.log10(ppi_x), label='ppi')
log.info(ppi_mn)
log.info(ppi_mx)
plt.step(flickr_y, np.log10(flickr_x), label='flickr')
log.info(flickr_mn)
log.info(flickr_mx)
plt.step(reddit_y, np.log10(reddit_x), label='reddit')
log.info(reddit_mn)
log.info(reddit_mx)
plt.step(yelp_y, np.log10(yelp_x), label='yelp')
log.info(yelp_mn)
log.info(yelp_mx)
plt.step(amazon_y, np.log10(amazon_x), label='amazon')
log.info(amazon_mn)
log.info(amazon_mx)
plt.title('Cumulative distribution function of NNZs in each row')
plt.ylabel('NNZs in each row (log10)')
plt.xlabel('Percentile of NNZ-row')
plt.legend(loc='best')
plt.xlim([-0.01,1.01])
plt.ylim([np.log10(1), np.log10(mx)+0.1])
plt.grid()
plt.savefig("row.svg") | 2.375 | 2 |
backend/tab_widget/item.py | JoyPang123/facial_identity_system | 6 | 12789358 | <gh_stars>1-10
import numpy as np
import torch
from model.triplet.loss import TripletLoss
import torchvision.transforms as transforms
from PyQt5 import QtWidgets
class CustomItem(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.pass_info = QtWidgets.QLabel()
self.time_info = QtWidgets.QLabel()
self.train_btn = QtWidgets.QPushButton()
self.train_btn.setText("🔼")
self.train_btn.clicked.connect(self.train_model)
self.setStyleSheet(
"""
QLabel {
margin: 0;
padding: 0;
}
QPushButton {
margin: 0;
padding: 0;
border: None
}
QPushButton::hover {
font-size: 25px;
}
"""
)
side_v_box = QtWidgets.QVBoxLayout()
side_v_box.addWidget(self.pass_info)
side_v_box.addWidget(self.time_info)
main_h_box = QtWidgets.QHBoxLayout()
main_h_box.addLayout(side_v_box, 1)
main_h_box.addWidget(self.train_btn)
self.setLayout(main_h_box)
def set_pass_info(self, status, name=None):
if status:
self.pass_info.setText(f"🟢 {name}")
self.train_btn.setToolTip("Select for training")
else:
self.pass_info.setText("🔴")
self.train_btn.setDisabled(True)
self.train_btn.setToolTip("Select for training (Disabled)")
def set_time_info(self, time):
self.time_info.setText(time)
def train_model(self):
windows = QtWidgets.QMessageBox()
windows.setIcon(QtWidgets.QMessageBox.Information)
windows.setText("Training...")
windows.addButton(QtWidgets.QMessageBox.Ok)
windows.button(QtWidgets.QMessageBox.Ok).hide()
windows.show()
QtWidgets.QApplication.processEvents()
# Obtain the model from parent->parent ....
# I know the code its weird, but this it how it works...
model = self.parent().parent().parent().parent().parent().video_container.model
transform = self.parent().parent().parent().parent().parent().video_container.transform
optimizer = self.parent().parent().parent().optimizer
image = self.pix_to_array(self.parent().parent().parent().monitor.pixmap())
if image is not None:
model.train()
image = transform(image[..., :3].copy())
# Create other positive features
horizontal_flip_image = transforms.RandomHorizontalFlip(p=1.0)(image)
# Create negative features
vertical_flip_image = transforms.RandomVerticalFlip(p=1.0)(image)
vertical_horizontal_image = transforms.RandomHorizontalFlip(p=1.0)(vertical_flip_image)
model.train()
anchor_features, pos_features, neg_features = model(image.unsqueeze(0),
horizontal_flip_image.unsqueeze(0),
vertical_flip_image.unsqueeze(0))
criterion = TripletLoss(0.4)
loss = criterion(anchor_features, pos_features, neg_features)
optimizer.zero_grad()
loss.sum().backward()
optimizer.step()
# Change back to eval mode
model.eval()
torch.save(model.state_dict(), "weight/model.pt")
bucket = self.parent().parent().parent().bucket
model_weight_blob = bucket.blob("model.pt")
model_weight_blob.upload_from_filename("weight/model.pt")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Training success!")
msg.exec_()
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText("The image is not available")
msg.setWindowTitle("Error for doing training")
msg.exec_()
windows.done(0)
@staticmethod
def pix_to_array(pixmap):
if pixmap is not None:
h = pixmap.size().height()
w = pixmap.size().width()
q_image = pixmap.toImage()
byte_str = q_image.bits().asstring(w * h * 4)
img = np.frombuffer(byte_str, dtype=np.uint8).reshape((h, w, 4))
return img
return None
| 2.21875 | 2 |
bootimgpack/ui/main.py | hchyhchyxh/tools | 45 | 12789359 | <gh_stars>10-100
#!/usr/bin/python
# Filename main.py
# Main UI of bootimgpack
#
__author__ = '<EMAIL>'
import os
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from internal import bootimg
from Tkinter import *
import tkFileDialog
class Main:
def __init__(self):
root = Tk()
self.__layout(root)
root.mainloop()
def __layout(self, root):
root.title("BootimgPack")
root.geometry("500x220+400+400")
root.resizable(width=False, height=False)
self.__layoutBootImgFrame(root)
self.__layoutPackBtnFrame(root)
self.__layoutBootDirFrame(root)
self.__layoutResultFrame(root)
pass
def __layoutBootImgFrame(self, root):
frame = Frame(root)
Label(frame, width=12, text="Boot Image: ", anchor=W).pack(side=LEFT)
self.__bootImgText = StringVar()
Entry(frame, width=40, textvariable=self.__bootImgText).pack(side=LEFT)
self.__bootImgSelect = Button(frame, text="...")
self.__bindButtonAction(self.__bootImgSelect, self.onClick)
self.__bootImgSelect.pack(padx=5, side=LEFT)
frame.pack(padx=5, pady=15)
def __layoutPackBtnFrame(self, root):
frame = Frame(root)
self.__packBtn = Button(frame, text="PACK ^", width=7, height=1)
self.__bindButtonAction(self.__packBtn, self.onClick)
self.__packBtn.pack(padx=30, side=LEFT)
self.__unpackBtn = Button(frame, text ="UNPACK v", width=7, height=1)
self.__bindButtonAction(self.__unpackBtn, self.onClick)
self.__unpackBtn.pack(side=LEFT)
frame.pack(padx=5, pady=5)
def __layoutBootDirFrame(self, root):
frame = Frame(root)
Label(frame, width=12, text="Files Directory: ", anchor=W).pack(side=LEFT)
self.__bootDirText = StringVar()
Entry(frame, width=40, textvariable=self.__bootDirText).pack(side=LEFT)
self.__bootDirSelect = Button(frame, text="...")
self.__bindButtonAction(self.__bootDirSelect, self.onClick)
self.__bootDirSelect.pack(padx=5, side=LEFT)
frame.pack(padx=5, pady=5)
def __layoutResultFrame(self, root):
frame = Frame(root, relief=SUNKEN, borderwidth=1)
self.__resultText = StringVar()
Label(frame, height=3, textvariable=self.__resultText, wraplength=400, anchor=NW).pack(padx=10, side=LEFT)
self.__resultText.set("Result")
frame.pack(padx=10, pady=20, fill=X, expand=1)
def __bindButtonAction(self, btn, command):
btn.bind("<Button-1>", command)
btn.bind("<Return>", command)
def onClick(self, event):
if event.widget == self.__bootImgSelect:
filename = tkFileDialog.askopenfilename(initialdir=os.path.expanduser("~"))
if len(filename) > 0 :
self.__bootImgText.set(filename)
elif event.widget == self.__bootDirSelect:
directory = tkFileDialog.askdirectory(initialdir=os.path.expanduser("~"))
if len(directory) > 0 :
self.__bootDirText.set(directory)
elif event.widget == self.__unpackBtn:
bootfile = self.__bootImgText.get()
output = self.__bootDirText.get()
if len(bootfile) > 0 :
bootimg.unpack(bootfile, output)
result = "Unpack " + bootfile + " --> " + output
self.__resultText.set(result)
elif event.widget == self.__packBtn:
bootfile = self.__bootDirText.get()
output = self.__bootImgText.get()
if len(bootfile) > 0 :
bootimg.pack(bootfile, output)
result = "Pack " + bootfile + " --> " + output
self.__resultText.set(result)
# End of class Main
### Start
if __name__ == '__main__':
Main() | 2.53125 | 3 |
Temperature/Temperature_Sites/Temperature_Sites/worker.py | listenzcc/GeoChina | 0 | 12789360 | # Data Worker
# %%
import os
import pandas as pd
import plotly.express as px
from pypinyin import lazy_pinyin
locations_url = 'https://blog.csdn.net/envbox/article/details/80290103'
filename = 'locations.json'
sync_folder = os.environ.get('Sync', '.')
mapbox = dict(
mapbox_accesstoken=open(os.path.join(
os.environ['onedrive'], '.mapbox_token')).read(),
mapbox_style='light'
)
def fetch_locations():
locations = pd.read_html(locations_url)[0]
locations.columns = ['Province', 'ID', 'Name',
'Latitude', 'Longitude', 'Height']
# Fix Known Issue,
# use height - 10000 if height is greater than 10000
locations.Height = locations.Height.map(lambda e: e % 10000)
def translate(s):
return ''.join(lazy_pinyin(s))
locations['_province'] = locations['Province'].map(translate)
locations['_name'] = locations['Name'].map(translate)
locations = locations[['ID', 'Province', 'Name',
'Latitude', 'Longitude', 'Height',
'_province', '_name']]
return locations
class DataWorker(object):
def __init__(self):
self.locations = fetch_locations()
self.columns = self.locations.columns
self.plot_mapbox(self.locations.copy())
def search_by_pinyin(self, py):
found = dict()
if py.strip() == '':
found['_name'] = self.locations.copy()
found['_province'] = pd.DataFrame()
else:
for col in ['_province', '_name']:
found[col] = self.locations[self.locations[col].str.startswith(
py)]
output = pd.concat([found['_name'], found['_province']], axis=0)
self.plot_mapbox(output.copy())
return output
def plot_mapbox(self, df):
print('Reploting')
df['ID'] = df['ID'].map(str)
df['Text'] = df[['Province', 'Name', 'ID']].apply(', '.join, axis=1)
fig = px.scatter_mapbox(
df,
lon='Longitude',
lat='Latitude',
color='Province',
# size=3,
hover_name='Text',
zoom=2,
height=300
)
fig.update_layout(**mapbox)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
self.canvas = fig.to_html()
return self.canvas
# %%
# dw = DataWorker()
# dw.locations
# %%
# dw.search_by_pinyin('bei')
| 2.875 | 3 |
src/advent/solutions/day17.py | khwilson/advent2021 | 0 | 12789361 | <reponame>khwilson/advent2021
import itertools as its
import re
from collections import defaultdict
from ..solution import Solution
def run_sims(x1, x2, y1, y2):
# Making assumption y1 and y2 are both negative
max_vy = abs(y1) + 1
t_to_vy_in_range = defaultdict(list)
for vy in range(-max_vy, max_vy + 1):
cur_vy = vy
t = 0
y = 0
while y >= y1:
if y <= y2:
t_to_vy_in_range[t].append(vy)
t += 1 # Has to be integer so t must be even
y += cur_vy
cur_vy -= 1
# Making assumption x1 and x2 are both positive
max_t = max(t_to_vy_in_range)
max_vx = x2 + 1
t_to_vx_in_range = defaultdict(list)
vx = 1
for vx in range(max_vx + 1):
cur_vx = vx
t = 0
x = 0
while t <= max_t:
if x1 <= x <= x2:
t_to_vx_in_range[t].append(vx)
t += 1
x += cur_vx
cur_vx = max(cur_vx - 1, 0)
return t_to_vx_in_range, t_to_vy_in_range
class Day17(Solution, day=17):
def parse(self):
with open(self.input_file, "rt") as infile:
return tuple(map(int, re.findall(r"(-?\d+)", infile.read())))
def part1(self):
x1, x2, y1, y2 = self.data
t_to_vx_in_range, t_to_vy_in_range = run_sims(x1, x2, y1, y2)
shared_ts = set(t_to_vy_in_range) & set(t_to_vx_in_range)
correct_vy = max(max(t_to_vy_in_range[t]) for t in shared_ts)
return (correct_vy * (correct_vy + 1)) // 2
def part2(self):
x1, x2, y1, y2 = self.data
t_to_vx_in_range, t_to_vy_in_range = run_sims(x1, x2, y1, y2)
shared_ts = set(t_to_vy_in_range) & set(t_to_vx_in_range)
return len(
{
(vx, vy)
for t in shared_ts
for vx, vy in its.product(t_to_vx_in_range[t], t_to_vy_in_range[t])
}
)
| 2.828125 | 3 |
src/apodeixi/knowledge_base/shutil_kb_store.py | ChateauClaudia-Labs/apodeixi | 0 | 12789362 | <gh_stars>0
import os as _os
from apodeixi.knowledge_base.isolation_kb_store import Isolation_KBStore_Impl
from apodeixi.knowledge_base.manifest_utils import ManifestUtils
from apodeixi.util.a6i_error import ApodeixiError
from apodeixi.util.path_utils import PathUtils
class Shutil_KBStore_Impl(Isolation_KBStore_Impl):
'''
File-system-based implementation of the KnowledgeBaseStore where environment synchronization is done
via the "shutil" module, i.e., via file system operations that copy entire folders across environments.
The entire knowledge base is held under a two root folders
(one for postings and one for all derived data, including manifests)
and follows a structure based on filing schemes of the KB_ProcessingRules.
Implements failover for reads by re-trying in parent environment, if one exists and if such failover policy
is stipulated in the current environment's configuration.
@param kb_rootdir A string, corresponding to the absolute path in the local machine
corresponding to the KnowledgeBase.
@param clientURL A string, corresponding to the absolute path to a root folder in a collaboration
drive system (such as SharePoint) in which end-users will collaborate to create
the Excel spreadsheets that will be eventually posted to the KnowledgeBase. This
shared drive is also the location to which the KnowledgeBase will save
generated forms or reports requested by end-users. This is a "root folder" in that
the structure below will be assumed to follow the filing structure of the
KnowledgeBase for postings.
'''
def __init__(self, parent_trace, kb_rootdir, clientURL):
super().__init__(parent_trace, kb_rootdir, clientURL)
def beginTransaction(self, parent_trace):
'''
Starts an isolation state in which all subsequent I/O is done in an isolation area
dedicated to this transaction, and not applied back to the store's persistent area until the
transaction is committed..
'''
return super().beginTransaction(parent_trace)
def commitTransaction(self, parent_trace):
'''
Finalizes a transaction previously started by beginTransaction, by cascading any I/O previously done in
the transaction's isolation area to the store's persistent area.
'''
env, parent_env = self._validate_transaction_end_of_life(parent_trace)
src_postings_root = env.postingsURL(parent_trace)
dst_postings_root = parent_env.postingsURL(parent_trace)
src_manifests_root = env.manifestsURL(parent_trace)
dst_manifests_root = parent_env.manifestsURL(parent_trace)
src_clientURL_root = env.clientURL(parent_trace)
dst_clientURL_root = parent_env.clientURL(parent_trace)
# If the parent environment is also a transactional envinronment, we will have to record in it
# the events so that when the parent is committed, those events are cascaded to the parent's parent.
# But it may also be that the parent is not transactional, which is why the `parent_events`
# variable may be None and why we need to be checking for that all the time.
parent_name = parent_env.name(parent_trace)
if parent_name in self._transaction_events_dict.keys():
parent_events = self._transaction_events_dict[parent_name]
else:
parent_events = None
# **GOTCHA**
#
# Don't call pop()! We want to see the "last transaction's" environment, but not yet remove
# the last transaction (so peek, not pop). The reason is that if any of the subsequent code in this commit() method
# raises an exception, it will cause a subsequent problem for the abortTransaction method,
# since abortTransaction will look for the "last transaction" and will not find it (or will)
# find the wrong one) if we have poped. So use the [-1] notation to peek (not pop!) the last
# transaction. Later, just before exiting this method, do the pop()
ending_env = self._transactions_stack[-1]
events = self._transaction_events_dict[ending_env.name(parent_trace)]
for relative_path in events.posting_writes():
from_path = src_postings_root + "/" + relative_path
to_path = dst_postings_root + "/" + relative_path
to_dir = _os.path.dirname(to_path)
PathUtils().create_path_if_needed(parent_trace, to_dir)
PathUtils().copy_file(parent_trace, from_path, to_dir)
if parent_events != None:
parent_events.remember_posting_write(relative_path)
for relative_path in events.manifest_writes():
from_path = src_manifests_root + "/" + relative_path
to_path = dst_manifests_root + "/" + relative_path
to_dir = _os.path.dirname(to_path)
PathUtils().create_path_if_needed(parent_trace, to_dir)
PathUtils().copy_file(parent_trace, from_path, to_dir)
if parent_events != None:
parent_events.remember_manifest_write(relative_path)
for relative_path in events.clientURL_writes():
from_path = src_clientURL_root + "/" + relative_path
to_path = dst_clientURL_root + "/" + relative_path
# Normally clientURL is the same across environments (except mostly in test situations),
# so to prevent the copy operation from raising an exception make sure we only attempt to copy
# the file when the two paths are different
if not _os.path.samefile(from_path, to_path):
#if from_path != to_path:
to_dir = _os.path.dirname(to_path)
PathUtils().create_path_if_needed(parent_trace, to_dir)
PathUtils().copy_file(parent_trace, from_path, to_dir)
if parent_events != None:
parent_events.remember_clientURL_write(relative_path)
for relative_path in events.posting_deletes():
to_path = dst_postings_root + "/" + relative_path
if 0 == PathUtils().remove_file_if_exists(parent_trace, to_path):
if parent_events != None:
parent_events.remember_posting_delete(relative_path)
for relative_path in events.manifest_deletes():
to_path = dst_manifests_root + "/" + relative_path
if 0 == PathUtils().remove_file_if_exists(parent_trace, to_path):
if parent_events != None:
parent_events.remember_manifest_deletes(relative_path)
for relative_path in events.clientURL_deletes():
to_path = dst_clientURL_root + "/" + relative_path
if 0 == PathUtils().remove_file_if_exists(parent_trace, to_path):
if parent_events != None:
parent_events.remember_clientURL_deletes(relative_path)
# Last but not least: persist foreign key constraints and copy them to the
# parent environment
#
self.persistForeignKeyConstraints(parent_trace)
version = 1
FOREIGN_KEY_FILE = "foreign_key_contraints." + str(version) + ".yaml"
from_path = src_manifests_root + "/system/" + FOREIGN_KEY_FILE
to_dir = dst_manifests_root + "/system/"
PathUtils().create_path_if_needed(parent_trace, to_dir)
PathUtils().copy_file(parent_trace, from_path, to_dir)
# Now remove the environment of the transaction we just committed
self.removeEnvironment(parent_trace, env.name(parent_trace))
self.activate(parent_trace, parent_env.name(parent_trace))
# **GOTCHA**
#
# Now it is safe to pop() - it wasn't safe earlier because if any of the code in this method
# raised an exception after having popped the last transaction in the stack, the abortTransaction
#method woudld have failed since it wouldh't have found the last transaction to then abort it.
ending_env = self._transactions_stack.pop()
events = self._transaction_events_dict.pop(ending_env.name(parent_trace))
def abortTransaction(self, parent_trace):
'''
Aborts a transaction previously started by beginTransaction, by deleting transaction's isolation area,
effectively ignoring any I/O previously done during the transaction's lifetime, and leaving the
KnowledgeBaseStore in a state such that any immediately following I/O operation would be done
directly to the store's persistent area.
'''
return super().abortTransaction(parent_trace)
def activate(self, parent_trace, environment_name):
'''
Switches the store's current environment to be the one identified by the `environment_name`, unless
no such environment exists in which case it raises an ApodeixiError
'''
super().activate(parent_trace, environment_name)
def deactivate(self, parent_trace):
'''
Switches the store's current environment to be the base environment.
'''
super().deactivate(parent_trace)
def copy_posting_across_environments(self, parent_trace, handle, from_environment, to_environment):
'''
Copies the posting file denoted by the `handle` in the `from_environment` to the `to_environment`
'''
from_path = from_environment.postingsURL(parent_trace) + "/" + handle.getRelativePath(parent_trace)
to_path = to_environment.postingsURL(parent_trace) + "/" + handle.getRelativePath(parent_trace)
to_dir = _os.path.dirname(to_path)
my_trace = parent_trace.doing("Copying a posting file",
data = {"src_path": from_path,
"to_dir": to_dir})
if not _os.path.exists(to_dir):
PathUtils().create_path_if_needed(parent_trace=my_trace, path=to_dir)
PathUtils().copy_file(parent_trace, from_path, to_dir)
def _file_not_found_error(self, ex):
'''
Helper method. Returns true if `ex` is an ApodeixiError triggered by a file not found
'''
if not type(ex) == ApodeixiError or not 'error' in ex.data.keys():
return False
elif "No such file or directory" in ex.data['error']:
return True
else:
return False
def loadPostingLabel(self, parent_trace, posting_label_handle):
'''
Loads and returns a DataFrame based on the `posting_label_handle` provided
'''
try:
label_df = super().loadPostingLabel(parent_trace, posting_label_handle)
except ApodeixiError as ex:
# Try again in parent environment if failover is configured and error is a missing file
if self._file_not_found_error(ex) and self._failover_posting_reads_to_parent(parent_trace):
my_trace = parent_trace.doing("Searching in parent environment")
# Temporarily switch to the environment in which to search
original_env = self.current_environment(my_trace)
self.activate(my_trace, self.parent_environment(my_trace).name(my_trace))
label_df = self.loadPostingLabel(
parent_trace = my_trace,
posting_label_handle = posting_label_handle)
# Now that search in parent environment is done, reset back to original environment
self.activate(my_trace, original_env.name(my_trace))
# Before leaving, copy the parent's data into our environment, so next time
# we don't have to failover again
self.copy_posting_across_environments(
parent_trace = my_trace,
handle = posting_label_handle,
from_environment = self.parent_environment(my_trace),
to_environment = self.current_environment(my_trace))
else:
raise ex
return label_df
def loadPostingData(self, parent_trace, data_handle, config):
'''
Loads and returns a DataFrame based on the `posting_data_handle` provided
@param config PostingConfig
'''
try:
df = super().loadPostingData(parent_trace, data_handle, config)
except ApodeixiError as ex:
# Try again in parent environment if failover is configured and error is a missing file
if self._file_not_found_error(ex) and self._failover_posting_reads_to_parent(parent_trace):
my_trace = parent_trace.doing("Searching in parent environment")
# Temporarily switch to the environment in which to search
original_env = self.current_environment(my_trace)
self.activate(my_trace, self.parent_environment(my_trace).name(my_trace))
df = self.loadPostingData(
parent_trace = my_trace,
data_handle = data_handle,
config = config)
# Now that search in parent environment is done, reset back to original environment
self.activate(my_trace, original_env.name(my_trace))
# Before leaving, copy the parent's data into our environment, so next time
# we don't have to failover again
self.copy_posting_across_environments(
parent_trace = my_trace,
handle = data_handle,
from_environment = self.parent_environment(my_trace),
to_environment = self.current_environment(my_trace))
else:
raise ex
return df
def searchPostings(self, parent_trace, posting_api, filing_coordinates_filter=None):
'''
Returns a list of PostingLabelHandle objects, one for each posting in the Knowledge Base that matches
the given criteria:
* They are all postings for the `posting_api`
* They pass the given filters
@param posting_api A string that identifies the type of posting represented by an Excel file. For example,
'milestone.modernization.a6i' is a recognized posting API and files that end with that suffix,
such as 'opus_milestone.modernization.a6i.xlsx' will be located by this method.
@param filing_coordinates_filter A function that takes a FilingCoordinates instance as a parameter and returns a boolean.
Any FilingCoordinates instance for which this filter returns False will be excluded from the output.
If set to None then no filtering is done.
'''
ME = Shutil_KBStore_Impl
if self._failover_posting_reads_to_parent(parent_trace):
# Search in parent first, and copy anything found to the current environment
my_trace = parent_trace.doing("Searching in parent environment")
# Temporarily switch to the environment in which to search
original_env = self.current_environment(my_trace)
self.activate(my_trace, self.parent_environment(my_trace).name(my_trace))
parent_handles = self.searchPostings(
parent_trace = my_trace,
posting_api = posting_api,
filing_coordinates_filter = filing_coordinates_filter)
# Now that search in parent environment is done, reset back to original environment
self.activate(my_trace, original_env.name(my_trace))
# Populate current environment with anything found in the parent environment, but only if it is not
# already in current environment
my_trace = parent_trace.doing("Copying postings from parent environment",
data = {"parent environment name":
self.parent_environment(my_trace).name(my_trace),
"current environment name":
self.current_environment(my_trace).name(my_trace)})
for handle in parent_handles:
self.copy_posting_across_environments(
parent_trace = my_trace,
handle = handle,
from_environment = self.parent_environment(my_trace),
to_environment = self.current_environment(my_trace))
my_trace = parent_trace.doing("Searching in environment '"
+ str(self.current_environment(parent_trace).name(parent_trace))
+ "'" )
scanned_handles = super().searchPostings(
parent_trace = my_trace,
posting_api = posting_api,
filing_coordinates_filter = filing_coordinates_filter)
return scanned_handles
def persistManifest(self, parent_trace, manifest_dict):
'''
Persists manifest_dict as a yaml object and returns a ManifestHandle that uniquely identifies it.
Will raise an ApodeixiError if version consistency is violated, i.e., can only save
manifest_dict with version N+1 if N is the highest version existing in the KnowledgeStore, if any.
If no prior version of the manifests exists then the manifest_dict must have version number equal to 1.
'''
my_trace = parent_trace.doing("Checking version consistency")
if True:
self.checkDuplicateManifest(my_trace, manifest_dict)
prior_manifest = self.retrievePreviousManifest(my_trace, manifest_dict)
new_handle = ManifestUtils().inferHandle(my_trace, manifest_dict)
new_version = new_handle.version
# Check that if we are doing an update a prior version does exist
if new_version > 1 and prior_manifest == None:
raise ApodeixiError(my_trace, "Can't persist manifest with version " + str(new_version)
+ " because no prior manifest exist with version " + str(new_version - 1),
data = {"manifest handle": new_handle.display(my_trace)})
my_trace = parent_trace.doing("Persisting manifest")
handle = super().persistManifest(parent_trace, manifest_dict)
return handle
def findLatestVersionManifest(self, parent_trace, manifest_api_name, namespace, name, kind):
'''
For a given manifest API, a manifest is logically identified by its name and kind properties within
a given namespace.
However, there might be multiple versions of a logical manifest (versions are integers starting
at 1, 2, 3, ..., with version increasing each time the manifest gets updated).
This method returns a manifest (as a dict) and a string.
The manifest is the most recent version of the manifest that is logically identified
by the parameters.
The 2nd returned value is the path to that manifest.
If no such manifest exists in the KnowledgeBase store then the first returned object is None.
Example: for file-based stores, a manifest may be stored in a filename like:
$KB_STORE/manifests/my-corp.production/modernization.default.dec-2020.fusionopus/big-rock.2.yaml
In this example,
* the namespace is "my-corp.production"
* the name is "modernization.default.dec-2020.fusionopus"
* the kind is "big-rock"
* the version is 2 (an int)
* the manifest api is embedded within the YAML file. The YAML file has a field called
"apiVersion" with a value like "delivery-planning.journeys.a6i.io/v1a", and the manifest api
is the substring without the suffix: "delivery-planning.journeys.a6i.io"
@param manifest_api_name A string representing the Apodeixi API defining the YAML schemas for the
manifest kinds subsumed under such API. The search for manifests is filtered to those
whose YAML representation declares itself as falling under this API.
Example: 'delivery-planning.journeys.a6i.io'
@param namespace A string. Represents the namespace in the KnowledgeBase store's manifests area
where to look for the manifest.
@param name A string representing the name of the manifest. Along with kind, this identifies a
unique logical manifest (other than version number)
@param kind A string representing the kind of the manifest. Along with kind, this identifies a unique
logical manifest (other than version number)
'''
manifest, manifest_path = super().findLatestVersionManifest(parent_trace, manifest_api_name,
namespace, name, kind)
if manifest == None:
# Not found, so normally we should return None. But before giving up, look in parent environment
# if we have been configured to fail over the parent environment whenver we can't find something
if self._failover_manifest_reads_to_parent(parent_trace):
# Search in parent first, and copy anything found to the current environment
my_trace = parent_trace.doing("Searching in parent environment")
# Temporarily switch to the parent environment, and try again
original_env = self.current_environment(my_trace)
self.activate(my_trace, self.parent_environment(my_trace).name(my_trace))
manifest, manifest_path = self.findLatestVersionManifest(my_trace, manifest_api_name,
namespace, name, kind)
# Now that search in parent environment is done, reset back to original environment
self.activate(my_trace, original_env.name(my_trace))
# Populate current environment with anything found in the parent environment, but only if it is not
# already in current environment
if manifest != None:
my_trace = parent_trace.doing("Copying manifest from parent environment",
data = {"parent environment name":
self.parent_environment(my_trace).name(my_trace),
"current environment name":
self.current_environment(my_trace).name(my_trace)})
from_path = manifest_path
to_dir = self.current_environment(my_trace).postingsURL(parent_trace)
if not _os.path.exists(to_dir):
my_trace = parent_trace.doing("Copying a manifest file",
data = {"src_path": from_path,
"to_dir": to_dir})
PathUtils().create_path_if_needed(parent_trace=my_trace, path=to_dir)
PathUtils().copy_file(parent_trace, from_path, to_dir)
return manifest, manifest_path
def retrievePreviousManifest(self, parent_trace, manifest_dict):
'''
Given a manifest expressed as a dict with a certain version N, will retrieve the same manifest
but with version N-1, and return is a dict.
If no prior version exists, it returns None
'''
new_handle = ManifestUtils().inferHandle(parent_trace, manifest_dict)
new_version = new_handle.version
if new_version < 1: # versions should be 1, 2, 3, .. etc, not 0 or below
raise ApodeixiError(parent_trace, "Invalid manifest with a version below 1",
data = {"version given": str(new_version),
"manifest handle": new_handle.display(parent_trace)})
# Check that if we are doing an update a prior version does exist
prior_handle = new_handle
prior_handle.version = new_version - 1
prior_manifest, prior_manifest_path = self.retrieveManifest(parent_trace, prior_handle)
return prior_manifest
def checkDuplicateManifest(self, parent_trace, manifest_dict):
'''
Given a manifest expressed as a dict with a certain version N, will confirm that the store
does not already have a manifest with version N.
If it does, this method raises an ApodeixiError
'''
new_handle = ManifestUtils().inferHandle(parent_trace, manifest_dict)
new_version = new_handle.version
if new_version < 1: # versions should be 1, 2, 3, .. etc, not 0 or below
raise ApodeixiError(parent_trace, "Invalid manifest with a version below 1",
data = {"version given": str(new_version),
"manifest handle": new_handle.display(parent_trace)})
# Check that no manifest exists with this version
duplicate_manifest, duplicate_manifest_path = self.retrieveManifest(parent_trace, new_handle)
if duplicate_manifest != None:
raise ApodeixiError(parent_trace, "Invalid duplicate manifest: one already exists for the given version",
data = {"version given": str(new_version),
"manifest handle": new_handle.display(parent_trace)})
def retrieveManifest(self, parent_trace, manifest_handle):
'''
Returns a dict and a string.
The dict represents the unique manifest in the store that is identified by the `manifest handle`.
The string represents the full pathname for the manifest.
If none exists, it returns (None, None). That said, before giving up and returning (None, None),
this method will attempt to find the manifest in the parent environment if that is what is stipulated
in the current environment's configuration
@param manifest_handle A ManifestHandle instance that uniquely identifies the manifest we seek to retrieve.
'''
manifest, manifest_path = super().retrieveManifest(parent_trace, manifest_handle)
if manifest == None:
# Not found, so normally we should return None. But before giving up, look in parent environment
# if we have been configured to fail over the parent environment whenver we can't find something
if self._failover_manifest_reads_to_parent(parent_trace):
# Search in parent first, and copy anything found to the current environment
my_trace = parent_trace.doing("Searching in parent environment")
# Temporarily switch to the parent environment, and try again
original_env = self.current_environment(my_trace)
self.activate(my_trace, self.parent_environment(my_trace).name(my_trace))
manifest, manifest_path = self.retrieveManifest(my_trace, manifest_handle)
# Now that search in parent environment is done, reset back to original environment
self.activate(my_trace, original_env.name(my_trace))
# Populate current environment with anything found in the parent environment, but only if it is not
# already in current environment
if manifest != None:
my_trace = parent_trace.doing("Copying manifest from parent environment",
data = {"parent environment name":
self.parent_environment(my_trace).name(my_trace),
"current environment name":
self.current_environment(my_trace).name(my_trace)})
from_path = manifest_path
to_dir = self.current_environment(my_trace).postingsURL(parent_trace)
if not _os.path.exists(to_dir):
my_trace = parent_trace.doing("Copying a manifest file",
data = {"src_path": from_path,
"to_dir": to_dir})
PathUtils().create_path_if_needed(parent_trace=my_trace, path=to_dir)
PathUtils().copy_file(parent_trace, from_path, to_dir)
return manifest, manifest_path
def loadForeignKeyConstraints(self, parent_trace):
'''
Loads this store's ForeignKeyConstraintsRegistry from the system area of the store
Returns two things:
* A ForeignKeyConstraintsRegistry object. If null, this signifies that there was none found in storage
* A string, for the path in the file system where the ForeignKeyConstraintsRegistry was retrieved from
'''
foreign_key_constraints, path = super().loadForeignKeyConstraints(parent_trace)
if foreign_key_constraints == None:
# Not found, so normally we should return None. But before giving up, look in parent environment
# if we have been configured to fail over the parent environment whenver we can't find something
if self._failover_manifest_reads_to_parent(parent_trace):
# Search in parent first, and copy anything found to the current environment
my_trace = parent_trace.doing("Searching in parent environment")
# Temporarily switch to the parent environment, and try again
original_env = self.current_environment(my_trace)
self.activate(my_trace, self.parent_environment(my_trace).name(my_trace))
foreign_key_constraints, path = self.loadForeignKeyConstraints(my_trace)
# Now that search in parent environment is done, reset back to original environment
self.activate(my_trace, original_env.name(my_trace))
# Populate current environment with anything found in the parent environment, but only if it is not
# already in current environment
if foreign_key_constraints != None:
my_trace = parent_trace.doing("Copying foreign key constraints from parent environment",
data = {"parent environment name":
self.parent_environment(my_trace).name(my_trace),
"current environment name":
self.current_environment(my_trace).name(my_trace)})
from_path = path
to_dir = self.current_environment(my_trace).postingsURL(parent_trace)
if not _os.path.exists(to_dir):
my_trace = parent_trace.doing("Copying a manifest file",
data = {"src_path": from_path,
"to_dir": to_dir})
PathUtils().create_path_if_needed(parent_trace=my_trace, path=to_dir)
PathUtils().copy_file(parent_trace, from_path, to_dir)
return foreign_key_constraints, path
def archivePosting(self, parent_trace, posting_label_handle, subnamespace):
'''
Used after a posting Excel file has been processed. It moves the Excel file to a newly created folder dedicated
to this posting event and returns a PostingLabelHandle to identify the Excel file in this newly
created archival folder.
'''
archival_handle = super().archivePosting(parent_trace, posting_label_handle, subnamespace)
return archival_handle
def logPostEvent(self, parent_trace, controller_response):
'''
Used to record in the store information about a posting event that has been completed.
'''
log_txt = super().logPostEvent(parent_trace, controller_response)
return log_txt
def logFormRequestEvent(self, parent_trace, form_request, controller_response):
'''
Used to record in the store information about a request form event that has been completed.
'''
log_txt = super().logFormRequestEvent(parent_trace, form_request, controller_response)
return log_txt
| 1.757813 | 2 |
example/is_main_example.py | jamesabel/ismain | 1 | 12789363 | from ismain import is_main
if is_main():
print("Hello from main.")
| 1.6875 | 2 |
Chapter 08/ch8_46.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | 0 | 12789364 | <gh_stars>0
import collections
q=collections.deque([10,20,30,40])
q.appendleft(0)
print(q)
#[0,10,20,30,40] | 2.5 | 2 |
pybie2d/misc/interior_points.py | dbstein/pybie2d | 11 | 12789365 | import numpy as np
import scipy as sp
import scipy.spatial
import matplotlib as mpl
import matplotlib.path
from ..kernels.high_level.cauchy import Cauchy_Layer_Apply
from ..point_set import PointSet
def find_interior_points(source, target, boundary_acceptable=False):
"""
quick finding of which points in target are outside vs. inside
"""
# first exclude things outside of bounding box
xmin = source.x.min()
xmax = source.x.max()
ymin = source.y.min()
ymax = source.y.max()
in_bounding_box = np.logical_and.reduce([ target.x > xmin, target.x < xmax,
target.y > ymin, target.y < ymax])
out_bounding_box = np.logical_not(in_bounding_box)
small_targ = PointSet(c=target.c[in_bounding_box])
small_targ.compute_tree()
wn = np.zeros(target.N, dtype=complex)
wn[out_bounding_box] = 0.0
# compute winding number via cauchy sums
wn[in_bounding_box] = Cauchy_Layer_Apply(source, small_targ, \
dipstr=np.ones(source.N)).real
wn = np.abs(wn)
bad = np.logical_or(np.isnan(wn), np.isinf(wn))
good = np.logical_not(bad)
big = np.zeros_like(wn)
big[good] = wn[good] > 1e5
bad = np.logical_or(big, bad)
wn[bad] = 1.0
# get region where that sum was not accurate enough
dist = source.tolerance_to_distance(1e-2)
q = target.find_near_points(source, dist).ravel()
# phys array, good except in near boundary region
wn[q] = 0.0
phys = wn > 0.5
# brute force search
poly = mpl.path.Path(source.get_stacked_boundary(T=False))
xq = target.x[q]
yq = target.y[q]
tq = np.column_stack([xq, yq])
interior = poly.contains_points(tq)
phys[q] = interior
phys[bad] = boundary_acceptable
ext = np.logical_not(phys)
return phys, ext
| 2.390625 | 2 |
{{cookiecutter.repo_name}}/python/{{cookiecutter.package_name}}/web/jinja_filters.py | havok2063/cookiecutter-marvin | 0 | 12789366 | # !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
from __future__ import print_function, division, absolute_import
import flask
import jinja2
jinjablue = flask.Blueprint('jinja_filters', __name__)
@jinja2.contextfilter
@jinjablue.app_template_filter()
def split(context, value, delim=None):
'''Split a string based on a delimiter'''
if not delim:
delim = ' '
return value.split(delim) if value else None
| 2.5 | 2 |
Run_MK.py | cduranalarcon/MRR_MK_processing | 0 | 12789367 | <filename>Run_MK.py
import numpy as np
from netCDF4 import Dataset
import calendar, datetime, time, glob, os, sys, shutil
sys.path.append("lib/") # adding lib path
sys.path.append("lib/IMProToo/") # adding lib path
from MRR_functions import raw2snow # Process raw data into Doppler moments using MK2012
import core3 as IMProToo
np.warnings.filterwarnings('ignore')#to avoid the error messages
df = open('default_parameters.txt','r')
df_lines = df.readlines()
df.close()
Root_desc = df_lines[0].replace('\t','').replace('\n','').split('=')[0]
TRES_desc = df_lines[1].replace('\t','').replace('\n','').split('=')[0]
Short_name_station_desc = df_lines[2].replace('\t','').replace('\n','').split('=')[0]
KtoX_desc = df_lines[3].replace('\t','').replace('\n','').split('=')[0]
KtoX_a_desc = df_lines[4].replace('\t','').replace('\n','').split('=')[0]
KtoX_b_desc = df_lines[5].replace('\t','').replace('\n','').split('=')[0]
ZeToS_desc = df_lines[6].replace('\t','').replace('\n','').split('=')[0]
ZeToS_A_desc = df_lines[7].replace('\t','').replace('\n','').split('=')[0]
ZeToS_B_desc = df_lines[8].replace('\t','').replace('\n','').split('=')[0]
Root = df_lines[0].replace('\t','').replace('\n','').split('=')[1]
TRES = int(df_lines[1].replace('\t','').replace('\n','').replace(' ','').split('=')[1])
Short_name_station = df_lines[2].replace('\t','').replace('\n','').split('=')[1]
KtoX = df_lines[3].replace('\t','').replace('\n','').replace(' ','').split('=')[1]
KtoX_a = float(df_lines[4].replace('\t','').replace('\n','').replace(' ','').split('=')[1])
KtoX_b = float(df_lines[5].replace('\t','').replace('\n','').replace(' ','').split('=')[1])
ZeToS = df_lines[6].replace('\t','').replace('\n','').replace(' ','').split('=')[1]
ZeToS_A = float(df_lines[7].replace('\t','').replace('\n','').replace(' ','').split('=')[1])
ZeToS_B = float(df_lines[8].replace('\t','').replace('\n','').replace(' ','').split('=')[1])
#Parameters
print("Define parameters (data path, temporal resolution, etc.). YES (Y,y,yes): update parameters, or NOT (N,n,not,Enter): Use default parameters (already defined by the user).")
answer = input() #input from the user
if (answer == "Y") or (answer == "y") or (answer == "YES") or (answer == "yes"):
print('Insert input Data path (Press Enter for default = ' + Root + '):')
answer = input()
if answer != '': Root=answer #input from the user
print("Insert output temporal resolution in seconds (Press Enter for default = " + str(TRES) + "s):")
answer = input() #input from the user
if answer != '': TRES = int(answer)
print('Insert short name of the station (Press Enter for default = ' + Short_name_station + '):')
answer = input()
if answer != '': Short_name_station = answer
print('Perform Linear correction of radome attenuation (K to X band conversion)? (Press Enter for default = ' + KtoX + '):')
answer = input()
if answer != '': KtoX = answer
if (KtoX == 'True') or (KtoX == 'T') or (KtoX == 'TRUE') or (KtoX == 'true') or (KtoX == 't'):
print('Insert the slope parameter for the radome attenuation correction (Press Enter for default = ' + str(KtoX_a) + '):')
answer = input()
if answer != '': KtoX_a = float(answer)
print('Insert the intercept parameter for the radome attenuation correction (Press Enter for default = ' + str(KtoX_b) + '):')
answer = input()
if answer != '': KtoX_b = float(answer)
print('Convert Ze to Precipitation rate? (Press Enter for default = ' + ZeToS + '):')
answer = input()
if answer != '': ZeToS = answer
if (ZeToS == 'True') or (ZeToS == 'T') or (ZeToS == 'TRUE') or (ZeToS == 'true') or (ZeToS == 't'):
print('Insert the "A" parameter (constant in Ze-S relationship) (Press Enter for default = ' + str(ZeToS_A) + '):')
answer = input()
if answer != '': ZeToS_A = float(answer)
print('Insert the "B" parameter (Exponent in Ze-S relationship) (Press Enter for default = ' + str(ZeToS_B) + '):')
answer = input()
if answer != '': ZeToS_B = float(answer)
df = open('default_parameters.txt','w')
df.write(Root_desc+"\t"+"="+'\t'+Root+"\n")
df.write(TRES_desc+"\t"+"="+'\t'+str(TRES)+"\n")
df.write(Short_name_station_desc+"\t"+"="+'\t'+Short_name_station+"\n")
df.write(KtoX_desc+"\t"+"="+'\t'+KtoX+"\n")
df.write(KtoX_a_desc+"\t"+"="+'\t'+str(KtoX_a)+"\n")
df.write(KtoX_b_desc+"\t"+"="+'\t'+str(KtoX_b)+"\n")
df.write(ZeToS_desc+"\t"+"="+'\t'+ZeToS+"\n")
df.write(ZeToS_A_desc+"\t"+"="+'\t'+str(ZeToS_A)+"\n")
df.write(ZeToS_B_desc+"\t"+"="+'\t'+str(ZeToS_B)+"\n")
df.close()
os.chdir(Root)
name_station = '_'.join(Short_name_station.split(' '))
Descr = "MRR data at " + name_station + ", first MRR processed with MK12 method v.0.103."
folder=Root
dircf=glob.glob(Root+'*.raw')
dircf=np.sort(dircf)
if len(dircf) == 1:
print('In this folder there is '+str(len(dircf))+' raw file')
else:
print('In this folder there are '+str(len(dircf))+' raw files')
for name in dircf:
NameFile=name
count=0
NameFile_out = NameFile[:-4]+'-MK.nc' #create a new file
raw2snow(NameFile,NameFile_out, TRES = TRES, Descr = Descr) # Convert Raw into Doppler Moments using MK2012
##Including S estimates using Grazioli et al, 2017.
if (KtoX == 'True') or (KtoX == 'T') or (KtoX == 'TRUE') or (KtoX == 'true') or (KtoX == 't'):
print("Converting K to X band using Grazioli et al, 2017, TC.")
ds = Dataset(NameFile_out,'a')
Ze = ds.variables["Ze"][:]
ZeX = ds.createVariable('ZeX', 'f', ('time', 'range',),fill_value=-9999.)
#print(type(KtoX_a),type(KtoX_b))
ZeX[:] = KtoX_a*Ze+KtoX_b
ZeX.description = "Ze converted into X-band to take into accound the radome attenuation (see Grazioli et al. 2017, TC)"
ZeX.units = "dBZe"
if (ZeToS == 'True') or (ZeToS == 'T') or (ZeToS == 'TRUE') or (ZeToS == 'true') or (ZeToS == 't'):
print("Including S estimates using Grazioli et al, 2017.")
S = ds.createVariable('SnowfallRate', 'f', ('time', 'range',),fill_value=-9999.)
S[:] = ((10**(ZeX[:]/10.))/(1.*ZeToS_A))**(1./ZeToS_B)
S.description = "Snowfall rate derived from S-Ze relationship in Grazioli et al. (2017, TC)"
S.units = "mm h-1"
ds.close()
else:
if (ZeToS == 'True') or (ZeToS == 'T') or (ZeToS == 'TRUE') or (ZeToS == 'true') or (ZeToS == 't'): #In case there is not a Radome only perform the Z-S conversion
print("Including S estimates using Grazioli et al, 2017, TC.")
ds = Dataset(NameFile_out,'a')
Ze = ds.variables["Ze"][:]
S = ds.createVariable('SnowfallRate', 'f', ('time', 'range',),fill_value=-9999.)
S[:] = ((10**(Ze[:]/10.))/(1.*ZeToS_A))**(1./ZeToS_B)
S.description = "Snowfall rate derived from S-Ze relationship in Grazioli et al. (2017, TC)"
S.units = "mm h-1"
ds.close()
##default Parameters
#Input data path = /home/claudio/Projects/git/MRR_DDU/Data/DDU/RawSpectra/201702/
#output temporal resolution in seconds = 60
#Short name of the station = DDU
#Radome attenuation correction (k to x band) (True or False) = True
#Radome attenuation (k to x band), a slope (dBZ) = 0.99
#Radome attenuation (k to x band), b intercept (dBZ) = 6.14
#Snowfall rate conversion (Z-S) (True or False) = True
#Z-S relationship, A parameter (constant) = 76
#Z-S relationship, B parameter (exponent) = 0.91 | 2.21875 | 2 |
scripts/fast_sparCC.py | shafferm/fast_sparCC | 6 | 12789368 | <reponame>shafferm/fast_sparCC
#!/usr/local/bin/python2
import argparse
from biom import load_table
from sparcc_fast.sparcc_functions import sparcc
from sparcc_fast import utils
from sparcc_fast.bootstrap_correlations import bootstrap_correlations
__author__ = 'shafferm'
# TODO: implement sparse pandas dataframe
def main(args):
print "reading in table"
table = load_table(args.input)
df = utils.biom_to_pandas(table)
# filter
if args.min_samples is not None:
df = utils.min_sample_filter(df, args.min_samples)
print "Table filtered: " + str(table.shape[1]) + " observations"
print ""
elif args.sparcc_filter is True:
df = utils.sparcc_paper_filter(df)
print "Table filtered: " + str(table.shape[1]) + " observations"
print ""
print "calculating correlations"
cor, cov = sparcc(df, procs=args.procs)
correls = utils.df_to_correls(cor)
if not args.corr_only:
print "bootstrapping"
correls['p_value'] = bootstrap_correlations(df, cor, args.boots, args.procs)
# adjust p-value if desired
if args.p_adjust == "FDR":
correls['p_adjusted'] = utils.bh_adjust(correls['p_value'])
elif args.p_adjust == "bonferroni":
correls, header = utils.bonferroni_adjust(correls['p_value'])
correls.to_csv(args.output, sep='\t', index=False)
if __name__ == '__main__':
"""main, takes argparser"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="location of input biom file")
parser.add_argument("-o", "--output", help="output file location", default="correls.txt")
parser.add_argument("-p", "--procs", help="number of processors to use, only matters if calculating p-values",
type=int)
parser.add_argument("-b", "--boots", help="number of bootstraps", type=int, default=100)
parser.add_argument("--corr_only", help="only calculate correlations, don't calculate p-values",
action="store_true", default=False)
parser.add_argument("--p_adjust", help="multiple testing corretion method: FDR, bonferroni or none", default="FDR")
parser.add_argument("--sparcc_filter", help="filter input table according to parameters defined in Inferring"
"Correlation Networks from Genomic Survey Data", default=False,
action="store_true")
parser.add_argument("--min_samples", help="minimum number of samples a observation must be present in to be kept"
"in anaylsis", type=int)
main(parser.parse_args())
| 2.53125 | 3 |
models/tss_capsnet/model.py | StephenTerror/TSSCapsNet | 1 | 12789369 | # Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow as tf
from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
from utils.get_resnet_layer import get_resnet_depth_from_name
from . import dct_capsnet_e1_graph_mnist
from . import dct_capsnet_h1_attention_mnist
from . import dct_capsnet_h1_graph_mnist
from . import dct_capsnet_h1_gumbel_gate_mnist
from . import dwt_capsnet_e1_graph_mnist
from . import dwt_capsnet_e1_graph_smallnorb
from . import dwt_capsnet_fpn_graph_mnist
from . import dwt_capsnet_fpn_graph_smallnorb
from . import dwt_resnet_capsnet_fpn_graph_cifar
from . import rfft_capsnet_e1_graph_mnist
from . import wst_capsnet_e1_graph_mnist
from .call_backs import get_callbacks
from ..etc_model.call_backs import get_callbacks as etc_callback
from ..layers.model_base import Model
from utils.dataset import Dataset
from utils.tools import marginLoss
class TSSCapsNet(Model):
def __init__(self, data_name, model_name='DCT_Efficient_CapsNet', mode='test', config_path='config.json',
custom_path=None, verbose=True, gpu_number=None, optimizer='Adam', half_filter_in_resnet=True,
use_tiny_block=True, heterogeneous=False, **kwargs):
Model.__init__(self, data_name, mode, config_path, verbose)
self.model_name = model_name
if custom_path != None:
self.model_path = custom_path
else:
self.model_path = os.path.join(self.config['saved_model_dir'],
f"{self.model_name}",
f"{self.model_name}_{self.data_name}.h5")
os.makedirs(os.path.join(self.config['saved_model_dir'], f"{self.model_name}"), exist_ok=True)
self.model_path_new_train = os.path.join(self.config['saved_model_dir'],
f"{self.model_name}",
f"{self.model_name}_{self.data_name}_{'{epoch:03d}'}.h5")
self.tb_path = os.path.join(self.config['tb_log_save_dir'], f"{self.model_name}_{self.data_name}")
self.half = half_filter_in_resnet
self.tiny = use_tiny_block
self.heterogeneous = heterogeneous
self.load_graph()
if gpu_number:
self.model = multi_gpu_model(self.model, gpu_number)
self.optimizer = optimizer
def load_graph(self):
if self.data_name in ['MNIST', 'MNIST_SHIFT', 'FASHION_MNIST', 'FASHION_MNIST_SHIFT']:
input_shape = self.config['MNIST_INPUT_SHAPE']
num_classes = 10
elif self.data_name in ['CIFAR10', 'CIFAR10_SHIFT']:
input_shape = self.config['CIFAR10_INPUT_SHAPE']
num_classes = 10
elif self.data_name == 'SMALLNORB_INPUT_SHAPE':
num_classes = 5
input_shape = self.config['CIFAR10_INPUT_SHAPE']
elif self.data_name == 'MULTIMNIST':
raise NotImplemented
else:
raise NotImplementedError
if self.model_name == "DCT_E_MNIST":
self.model = dct_capsnet_e1_graph_mnist.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name == "DCT_H_A_MNIST":
self.model = dct_capsnet_h1_attention_mnist.build_graph(input_shape, self.mode, 3, self.model_name)
elif self.model_name == "DCT_H_MNIST":
self.model = dct_capsnet_h1_graph_mnist.build_graph(input_shape, self.mode, 3, self.model_name)
elif self.model_name == "DCT_H_Gumbel_MNIST":
self.model = dct_capsnet_h1_gumbel_gate_mnist.build_graph(input_shape, self.mode, 3, self.model_name)
elif self.model_name == "DWT_E_MNIST":
self.model = dwt_capsnet_e1_graph_mnist.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name == "DWT_E_SMALLNORB":
self.model = dwt_capsnet_e1_graph_smallnorb.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name == "DWT_FPN_MNIST":
self.model = dwt_capsnet_fpn_graph_mnist.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_Tiny_FPN_MNIST":
self.model = dwt_capsnet_fpn_graph_mnist.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_Attention_FPN_MNIST":
self.model = dwt_capsnet_fpn_graph_mnist.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_FPN_SMALLNORB":
self.model = dwt_capsnet_fpn_graph_smallnorb.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_Tiny_FPN_SMALLNORB":
self.model = dwt_capsnet_fpn_graph_smallnorb.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_Attention_FPN_SMALLNORB":
self.model = dwt_capsnet_fpn_graph_smallnorb.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "RFFT_E_MNIST":
self.model = rfft_capsnet_e1_graph_mnist.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name == "WST_E_MNIST":
self.model = wst_capsnet_e1_graph_mnist.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name.startswith("DWT_") and self.model_name.endswith("_FPN_CIFAR"):
# example: "DWT_Tiny_Half_R18_Tiny_FPN_CIFAR"
half = True if "Half_R" in self.model_name else False
tiny = True if "DWT_Tiny" in self.model_name else False
if "Tiny_FPN_CIFAR" in self.model_name:
routing_name_list = ["Tiny_FPN", "Tiny_FPN", "Tiny_FPN"]
elif "Attention_FPN_CIFAR" in self.model_name:
routing_name_list = ['Attention', 'Attention', 'Attention']
elif "FPN_CIFAR" in self.model_name:
routing_name_list = ['FPN', 'FPN', 'FPN']
else:
print("FPN type is not support!")
raise NotImplementedError
self.model = dwt_resnet_capsnet_fpn_graph_cifar.build_graph(
input_shape, self.mode, num_classes=10, routing_name_list=routing_name_list, regularize=1e-4,
depth=get_resnet_depth_from_name(self.model_name), tiny=tiny, half=half, name=self.model_name,
heterogeneous=self.heterogeneous
)
else:
print(f"model name {self.model_name} is NotImplemented")
raise NotImplemented
def train(self, dataset=None, initial_epoch=0):
callbacks = get_callbacks(self.model_name,
self.tb_path,
self.model_path_new_train,
self.config['lr_dec'],
self.config['lr'],
optimizer=self.optimizer)
if dataset is None:
dataset = Dataset(self.data_name, self.config_path)
dataset_train, dataset_val = dataset.get_tf_data()
if self.optimizer == 'Adam':
self.model.compile(optimizer=tf.keras.optimizers.Adam(lr=self.config['lr'], momentum=0.9),
loss=[marginLoss, 'mse'],
loss_weights=[1., self.config['lmd_gen']],
metrics={self.model_name: 'accuracy'})
else:
self.model.compile(optimizer=tf.keras.optimizers.SGD(lr=self.config['lr']),
loss=[marginLoss, 'mse'],
loss_weights=[1., self.config['lmd_gen']],
metrics={self.model_name: 'accuracy'})
# self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.config['lr']),
# loss=[marginLoss, 'mse'],
# loss_weights=[1., self.config['lmd_gen']],
# metrics={self.model_name: 'accuracy'})
steps = None
print('-' * 30 + f'{self.data_name} train' + '-' * 30)
history = self.model.fit(dataset_train,
epochs=self.config[f'epochs'], steps_per_epoch=steps,
validation_data=dataset_val, batch_size=self.config['batch_size'],
initial_epoch=initial_epoch,
callbacks=callbacks,
workers=self.config['num_workers'])
self.model.save_weights(os.path.join(self.config['saved_model_dir'],
f"{self.model_name}",
f"{self.model_name}_{self.data_name}.h5"))
return history
| 1.570313 | 2 |
df_browse/chunk_search_utils.py | petergaultney/df-browse | 0 | 12789370 | # chunk search utils
# from gui_debug import *
def not_at_end(lengthable, position, down):
return position < len(lengthable) if down else position > 0
def get_next_chunk(sliceable, start_position, chunk_size, down):
"""includes start_position, of size chunk_size"""
if not down:
chunk_beg = max(0, start_position - chunk_size + 1)
# print('yielding chunk upwards from ', chunk_beg, 'to', start_position + 1)
return sliceable[chunk_beg:start_position + 1], chunk_beg
else:
chunk_end = min(len(sliceable), start_position+chunk_size)
# print('yielding chunk downwards from', start_position, 'to', chunk_end)
return sliceable[start_position:chunk_end], start_position
def search_chunk_yielder(sliceable, start_location, down=True, chunk_size=100):
start_of_next_chunk = start_location
while not_at_end(sliceable, start_of_next_chunk, down):
yield get_next_chunk(sliceable, start_of_next_chunk, chunk_size, down)
start_of_next_chunk = start_of_next_chunk + chunk_size if down else start_of_next_chunk - chunk_size
raise StopIteration
def search_list_for_str(lst, search_string, starting_item, down, case_insensitive):
"""returns index into list representing string found, or None if not found"""
search_string = search_string.lower() if case_insensitive else search_string
search_slice_end = len(lst) if down else 0
search_list = lst[starting_item:] if down else reversed(lst[:starting_item+1])
# print('searching list of size', len(lst), 'down' if down else 'up', 'from', starting_item, 'to', search_slice_end, 'for:', search_string)
for idx, s in enumerate(search_list):
s = s.lower() if case_insensitive else s
if s.find(search_string) != -1:
# print('found! ', s, 'at', idx, 'starting from', starting_item, 'in list of len', len(lst), 'down?', down)
return starting_item + idx if down else starting_item - idx
return None
def search_sliceable_by_yielded_chunks_for_str(sliceable, search_string, starting_index, down, case_insensitive):
"""This is the main entry point for everything in this module."""
for chunk, chunk_start_idx in search_chunk_yielder(sliceable, starting_index, down):
found_at_chunk_idx = search_list_for_str(chunk, search_string, 0 if down else len(chunk) - 1, down, case_insensitive)
if found_at_chunk_idx is not None:
return found_at_chunk_idx + chunk_start_idx
return None
| 3.1875 | 3 |
foliant/preprocessors/base.py | foliant-docs/foliant | 105 | 12789371 | import re
from logging import Logger
from typing import Dict
import yaml
OptionValue = int or float or bool or str
class BasePreprocessor():
'''Base preprocessor. All preprocessors must inherit from this one.'''
# pylint: disable=too-many-instance-attributes
defaults = {}
tags = ()
@staticmethod
def get_options(options_string: str) -> Dict[str, OptionValue]:
'''Get a dictionary of typed options from a string with XML attributes.
:param options_string: String of XML attributes
:returns: Dictionary with options
'''
if not options_string:
return {}
option_pattern = re.compile(
r'(?P<key>[A-Za-z_:][0-9A-Za-z_:\-\.]*)=(\'|")(?P<value>.+?)\2',
flags=re.DOTALL
)
return {
option.group('key'): yaml.load(option.group('value'), yaml.Loader)
for option in option_pattern.finditer(options_string)
}
def __init__(self, context: dict, logger: Logger, quiet=False, debug=False, options={}):
# pylint: disable=dangerous-default-value
# pylint: disable=too-many-arguments
self.project_path = context['project_path']
self.config = context['config']
self.context = context
self.logger = logger
self.quiet = quiet
self.debug = debug
self.options = {**self.defaults, **options}
self.working_dir = self.project_path / self.config['tmp_dir']
if self.tags:
self.pattern = re.compile(
rf'(?<!\<)\<(?P<tag>{"|".join(self.tags)})' +
r'(\s(?P<options>[^\<\>]*))?\>' +
r'(?P<body>.*?)\<\/(?P=tag)\>',
flags=re.DOTALL
)
def apply(self):
'''Run the preprocessor against the project directory. Must be implemented
by every preprocessor.
'''
raise NotImplementedError
| 2.5 | 2 |
Algorithms/010_SUBS.py | ChaoticMarauder/Project_Rosalind | 0 | 12789372 | <gh_stars>0
def dna_motif(dna, motif):
m_len=len(motif)
list_motifs=[]
for i in range(len(dna)-m_len):
if(dna[i:i+m_len]==motif):
list_motifs.append(i+1)
return list_motifs
def main():
with open('datasets/rosalind_subs.txt') as input_data:
dna,motif=input_data.read().strip().split('\n')
# dna='GATATATGCATATACTT'
# motif='ATAT'
motifs=dna_motif(dna, motif)
print(' '.join(map(str,(motifs))))
with open('solutions/rosalind_subs.txt', 'w') as output_data:
output_data.write(' '.join(map(str,(motifs))))
if(__name__=='__main__'):
main() | 3.40625 | 3 |
tests/integration/test_integration_delivery_method.py | pwitab/visma | 5 | 12789373 | <gh_stars>1-10
import pytest
from visma.models import DeliveryMethod
class TestDeliveryMethod:
@pytest.fixture()
def method(self):
yield DeliveryMethod.objects.all().first()
def test_list_delivery_methods(self):
methods = DeliveryMethod.objects.all()
assert len(methods) is not 0
def test_get_delivery_method(self, method):
delivery_method = DeliveryMethod.objects.get(method.id)
assert delivery_method.id == method.id | 2.25 | 2 |
app/models/comment.py | maricio41/homebase | 2 | 12789374 | from .db import db
from sqlalchemy.sql import func
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, nullable=False, primary_key=True)
commentBody = db.Column(db.String(255), nullable=False)
createdAt = db.Column(db.DateTime(timezone=True), nullable=False, server_default=func.now())
projectId = db.Column(db.Integer, db.ForeignKey("projects.id"), nullable = False)
userId = db.Column(db.Integer, db.ForeignKey("users.id"), nullable = False)
project = db.relationship("Project", back_populates="comments")
user = db.relationship("User", back_populates="comments")
def to_dict(self):
return {
"id": self.id,
"commentBody": self.commentBody,
"createdAt": self.createdAt,
"projectId": self.projectId,
"userId": self.userId
} | 2.75 | 3 |
tests/helpers/response_test.py | Impactstory/jump-api | 15 | 12789375 | import json
import unittest
from jsonschema import validate, ValidationError
from views import app
import os
import urllib.parse
from urllib.parse import urlencode
def testget(url):
client = app.test_client()
client.testing = True
return json.loads(client.get(url).get_data(as_text=True))
class ResponseTest(unittest.TestCase):
def __init__(self):
self.client = app.test_client()
self.client.testing = True
def json_response(self, url):
return json.loads(self.client.get(url).get_data(as_text=True))
def assert_schema(obj, schema, test_name):
try:
validate(obj, schema)
except ValidationError as e:
raise AssertionError(u'error in {}: {}'.format(test_name, str(e)))
def dev_request_url(path, params=None):
params = params.copy() if params else {}
params.update({
'jwt': os.environ["UNSUB_USER1_JWT"]
})
return urllib.parse.urlunparse([
'',
'',
path,
'',
urlencode(params),
'',
])
| 2.84375 | 3 |
tests/test_updater.py | amjadsaadeh/dvc | 0 | 12789376 | <filename>tests/test_updater.py
import os
from tests.basic_env import TestDvc
class TestUpdater(TestDvc):
def test(self):
# NOTE: only test on travis CRON to avoid generating too much logs
travis = os.getenv('TRAVIS') == 'true'
if not travis:
return
cron = os.getenv('TRAVIS_EVENT_TYPE') == 'cron'
if not cron:
return
env = os.environ.copy()
if os.getenv('CI'):
del os.environ['CI']
self.dvc.updater.check()
self.dvc.updater.check()
self.dvc.updater.check()
os.environ = env.copy()
| 2.234375 | 2 |
old/utils.py | NateThom/contrastive_learning | 0 | 12789377 | <filename>old/utils.py
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--image_path',
default='/home/nthom/Documents/datasets/CelebA/Img/',
# default='/home/nthom/Documents/datasets/CelebA/Img/partial_blackout/',
#default='/home/nthom/Documents/datasets/lfwa/',
# default='/home/nthom/Documents/datasets/UMD-AED/',
help='Path to input data directory [/home/user/Documents/input_images/]')
parser.add_argument('--image_dir',
default='resized_images_178x218/',
# default='img_celeba/',
# default='resized_aligned_images_178x218',
# default='resized_segment1',
# default='lfw',
# default='croppedImages',
help='input_images')
parser.add_argument('--attr_label_path',
# default='/home/nthom/Documents/datasets/UNR_Facial_Attribute_Parsing_Dataset/list_attr_celeba_attparsenet.csv',
default='/home/nthom/Documents/datasets/UNR_Facial_Attribute_Parsing_Dataset/list_attr_celeba_hair.csv',
# default='/home/nthom/Documents/datasets/lfwa/lfwa_labels_full_paths.csv',
# default='/home/nthom/Documents/datasets/UMD-AED/Files_attparsenet/list_attr_umdaed_reordered.csv',
help='Path to mapping between input images and binary attribute labels [/home/user/Documents/list_attr_celeba_attparsenet.csv]')
parser.add_argument('--simclr',
# default=False,
default=True,
help='If true, use the simclr framework')
parser.add_argument('--model',
# default="resnet18",
default="resnet50",
# default="resnet152",
help='Designates the model to be initialized [attparsenet]')
parser.add_argument('--load',
default=False,
# default=True,
help='True for loading a pretrained model, False otherwise [0]')
parser.add_argument('--load_path',
default='/home/nthom/Documents/contrastive_learning/checkpoints/',
help='File path for the model to load [/home/user/Document/models/]')
parser.add_argument('--load_file',
default='epoch=24-Validation Loss=0.24166-resnet18Pretrain_resizedImages178x218_hair_randomResizedCrop_blur15_hFlip_0.01.ckpt',
help='File name for the model to load [/model_to_load]')
parser.add_argument('--save',
default=True,
# default=False,
help='True for saving the model, False otherwise [True]')
parser.add_argument('--save_path',
default='/home/nthom/Documents/contrastive_learning/checkpoints',
help='Dir for saving models [./saved_models/]')
# Base Model, Dataset, labels, data augs, Learning Rate
parser.add_argument('--save_name',
default='SimClr_resizedImages178x218_hair_randomResizedCrop_SimClr_0.01',
help='Dir for saving models [./saved_models/]')
parser.add_argument('--train_epochs',
default=25,
help='Number of training epochs [22]')
parser.add_argument('--train_size',
#lfwa and umd
# default=0,
#celeba
default=162771,
help='Number of samples in training set [162770]')
parser.add_argument('--val_size',
#lfwa and umd
# default=0,
#celaba
default=19867,
help='Number of samples in validation set [19867]')
parser.add_argument('--test_size',
#lfwa
# default=13088,
# umd
# default=2808,
#celeba
default=19961,
help='Number of samples in test set [19963]')
parser.add_argument('--all_size',
#lfwa
# default=13088,
# umd
# default=2808,
#celeba
default=202599,
help='Total Number of samples in the dataset [202600]')
parser.add_argument('--train',
# default=False,
default=True,
help='Train the model on the training set and evaluate on the validation set')
parser.add_argument('--val_only',
default=False,
# default=True,
help='Evaluate the model on the validation set')
parser.add_argument('--test',
default=False,
# default=True,
help='Evaluate the model on the test set')
parser.add_argument('--pretrain',
# default=False,
default=True,
help='Download pretrained resnet weights')
parser.add_argument('--n_labels',
default=5,
# default=40,
help='Number of classes in task')
parser.add_argument('--attr_to_use',
# default=['Bald', 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair'],
default=['Other', 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair'],
# default=['5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive',
# 'Bags_Under_Eyes', 'Bald', 'Bangs', 'Big_Lips', 'Big_Nose', 'Black_Hair',
# 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows', 'Chubby', 'Double_Chin',
# 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones', 'Male',
# 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face',
# 'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns',
# 'Smiling', 'Straight_Hair', 'Wavy_Hair', 'Wearing_Earrings', 'Wearing_Hat',
# 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young'],
help='List of attributes to predict')
parser.add_argument('--attr_list',
default=['5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive',
'Bags_Under_Eyes', 'Bald', 'Bangs', 'Big_Lips', 'Big_Nose', 'Black_Hair',
'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows', 'Chubby', 'Double_Chin',
'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones', 'Male',
'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face',
'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns',
'Smiling', 'Straight_Hair', 'Wavy_Hair', 'Wearing_Earrings', 'Wearing_Hat',
'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young'],
help='List of all 40 attributes')
parser.add_argument('--show_batch',
default=False,
# default=True,
help='Show the batch input images and masks for debugging')
parser.add_argument('--shuffle',
# default=False,
default=True,
help='Shuffle the order of training samples. Validation and Testing sets will not be shuffled [True]')
parser.add_argument('--random_seed',
default=256,
help='Seed for random number generators [64]')
parser.add_argument('--batch_size',
default=64,
help='Batch size for images [32]')
parser.add_argument('--lr',
default=0.01,
help='Learning rate [0.001]')
parser.add_argument('--temperature',
default=0.07,
help='Temperature [0.07]')
parser.add_argument('--patience',
default=5,
help='Learning Rate Scheduler Patience [5]')
return parser.parse_args()
# parser.add_argument('--save_feature_maps',
# default=False,
# # default=True,
# help='Save all feature maps for data in either the test or val set')
| 2.296875 | 2 |
protostar/format/format1/exploit.py | RegaledSeer/netsecnoobie | 0 | 12789378 | import subprocess
FILE = "/opt/protostar/bin/format1"
#9 bytes
ADDRESS = "BBBB\x38\x96\x04\x08B"
def craft_payload(string):
string += ADDRESS
string += "%130$n"
return string
def main():
payload = craft_payload("", NUM_POINTERS)
subprocess.call([FILE, payload])
if __name__ == "__main__":
main()
| 2.578125 | 3 |
lab/doodle/test03/testcode01.py | JoHyukJun/RasberryPiLab | 0 | 12789379 | <reponame>JoHyukJun/RasberryPiLab
import RPi.GPIO as GPIO
import time
REDLEDlist = [4, 17, 18, 27, 22, 23, 24, 25]
KEYPADlist = [6, 12, 13, 16, 19, 28, 26, 21]
def KeypadRead():
keypadnum = -1
for i in range(8):
if (not GPIO.input(KEYPADlist[1])):
keypadnum = i
break
return keypadnum
def LEDControl(keypadnum):
for i in range(8):
if(i == keypadnum):
GPIO.output(REDLEDlist[i], GPIO.HIGH)
else:
GPIO.output(REDLEDlist[i], GPIO.LOW)
GPIO.setmode(GPIO.BCM)
for i in REDLEDlist:
GPIO.setup(i, GPIO.OUT, initial=GPIO.LOW)
for i in KEYPADlist:
GPIO.setup(i, GPIO.IN)
time.sleep(0.5)
while(1):
try:
keypadnum = KeypadRead()
LEDControl(keypadnum)
except KeyboardInterrupt:
pass
GPIO.cleanup()
| 3.296875 | 3 |
jokenpo.py | hlruffo/exercicios_Python | 1 | 12789380 | # coding: iso-8859-1 -*-
import random
from time import sleep
jogar ='sim'
print("_________________________")
print("!!Bem-vindo ao Jokenpo!!")
print("_________________________")
resultado = ['Vitória do CPU!', 'Houve um empate!', 'O usuário venceu!']
jogadas = ['pedra', 'papel', 'tesoura']
pontos_user = 0
pontos_cpu = 0
while jogar == 'sim':
user = input(f'Escolha uma jogada entre {jogadas}: ').lower()
if user not in jogadas:
user = input (f'As jogadas válidas são {jogadas}. Escolha uma entre elas: ').lower()
print('Pronto?')
cpu = random.choice(jogadas)
sleep(3)
print('Processando jogadas!')
sleep(2)
# 0 1 2
#jogadas = ['pedra', 'papel', 'tesoura']
#resultado = ['Vitória do CPU!', 'Houve um empate!', 'O usuário venceu!']
if user in jogadas[0] and cpu in jogadas[1] or user in jogadas[1] and cpu in jogadas[2] or user in jogadas[2] and cpu in jogadas[0]:
result =0
elif user == cpu:
result = 1
else:
result =2
if result == 1:
caso= resultado[1]
elif result == 0:
caso= resultado[0]
pontos_cpu +=1
else:
caso = resultado[2]
pontos_user += 1
print(f'Usuário escolheu {user} e CPU {cpu}.{caso} Placar: Usuário {pontos_user} x CPU {pontos_cpu}.')
jogar = input('Gostaria de jogar novamente? Digite sim ou não:').lower()
while jogar != "sim" and jogar != "não":
jogar = input('Opção inválida! Gostaria de jogar novamente? Digite sim ou não: \n').lower()
if pontos_cpu > pontos_user:
print(f'Placar final: Usuário {pontos_user}x CPU {pontos_cpu}. Vitória do CPU. Melhor sorte na próxima vez!')
elif pontos_user > pontos_cpu:
print(f'Placar final: Usuário {pontos_user}x CPU {pontos_cpu}. Parabéns!!!! Você venceu!! Até a próxima')
else:
print(f'Placar final: Usuário {pontos_user}x CPU {pontos_cpu}. Desta vez empatamos! Até a próxima!')
| 3.8125 | 4 |
dudu/2021-01-02.py | vollov/python-test | 1 | 12789381 | string_from_file = "aAu($requirementMet);g_pc.<strong>$233.45</strong>asdfdsaf<strong>$2.33</strong>op98795436jlkn lk;<strong>$15.22</strong>0989-0k;lkk0"
str=string_from_file
print(str)
# this function finds the price and return to caller
# - steps to do: aaaa, bbb
# parameters:
#
# return: a float number
def get_price():
start_index=str.find('<strong>$')
end_index=str.find('</strong>')
p =str[start_index + 9:end_index]
# here we want convert string 'p' into a float number
p=float(p) # '2.33'--> 2.33 save into p--> 2.33
return p
discount=0.9
print("the discount price is: ")
price=get_price()
actual_price=price*discount
print(actual_price)
| 3.640625 | 4 |
scripts/create_ec2.py | noahhai/autovpn | 0 | 12789382 | from __future__ import print_function
import time
import boto
import boto.ec2
import sys
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
keyname=sys.argv[1]
instance_type=sys.argv[2]
region=sys.argv[3]
ami=sys.argv[4]
port=sys.argv[5]
if region:
conn_region = boto.ec2.connect_to_region(region)
else:
conn_region = boto.connect_ec2()
def auto_vpn(ami=ami,
instance_type=instance_type,
key_name=keyname,
group_name="vpn_2",
ssh_port="22",
vpn_port=port,
cidr="0.0.0.0/0",
tag="auto_vpn",
user_data=None):
ec2 = conn_region
try:
group = ec2.get_all_security_groups(groupnames=[group_name])[0]
except ec2.ResponseError, e:
if e.code == 'InvalidGroup.NotFound':
group = ec2.create_security_group(group_name,
'A group that allows VPN access')
group.authorize('tcp',ssh_port,ssh_port,cidr)
group.authorize('udp',vpn_port,vpn_port,cidr)
else:
raise
if int(port) != int(1194):
try:
mgroup = ec2.get_all_security_groups(groupnames=[group_name])[0]
mgroup.authorize('udp',vpn_port,vpn_port,cidr)
except ec2.ResponseError, e:
if e.code == 'InvalidPermission.Duplicate':
'''fail here'''
else:
raise
spot_request = ec2.request_spot_instances(
price="0.005",
count=1,
image_id=ami,
key_name=key_name,
security_groups=[group_name],
instance_type=instance_type,
user_data=user_data,
)[0]
while True:
eprint("Waiting. spot request status: '%s', state: '%s'" % (spot_request.state, spot_request.status.code))
if spot_request.state == 'active' and spot_request.status.code == 'fulfilled':
break
time.sleep(10)
spot_request = ec2.get_all_spot_instance_requests(request_ids=[spot_request.id])[0]
while True:
instance = ec2.get_all_instances(instance_ids=[spot_request.instance_id])[0].instances[0]
eprint("Waiting. spot instance state: '%s'" % instance.state)
if instance.state == 'running':
break
time.sleep(10)
ec2.create_tags([instance.id], {tag:""})
global host
instance = ec2.get_all_instances(instance_ids=[spot_request.instance_id])[0].instances[0]
host = instance.ip_address
print("%s" % host)
if __name__ == "__main__":
auto_vpn() | 2.25 | 2 |
workflow/assemble_page.py | ncsa/ng-notebook | 0 | 12789383 | import os
# in a real project, this script probably wouldn't make so many assumptions
# about the inputs and outputs
NG_DIST_DIR_PATH = '/ng-notebook/angular/dist/'
OUTFILE_PATH = '/hostmount/output/index.html'
# ng build's index.html will be a small, single-line file
with open(os.path.join(NG_DIST_DIR_PATH, 'index.html'), 'r') as infile:
html = infile.read()
# replace the favicon with inline data
inline_data = "window['breedCounts'] = {" + \
"appenzeller: 6, briard: 9, cotondetulear: 5, dhole: 1, eskimo: 4};"
html = html.replace(\
'<link rel="icon" type="image/x-icon" href="favicon.ico">',
'<script type="text/javascript">' + inline_data + '</script>')
# insert the css
with open(os.path.join(NG_DIST_DIR_PATH, 'styles.bundle.css'), 'r') as infile:
css = infile.read()
html = html.replace(\
'<link href="styles.bundle.css" rel="stylesheet"/>',
'<style>' + css + '</style>')
# insert the js bundles (there are three)
js_files = ['inline.bundle.js', 'polyfills.bundle.js', 'main.bundle.js']
for js_file in js_files:
with open(os.path.join(NG_DIST_DIR_PATH, js_file), 'r') as infile:
js = infile.read()
html = html.replace(\
'<script type="text/javascript" src="' + js_file + '"></script>',
'<script type="text/javascript">' + js + '</script>')
# write the final html
with open(OUTFILE_PATH, 'w') as outfile:
outfile.write(html)
| 2.46875 | 2 |
timeseries/timeseries/models/__init__.py | takotab/timeseriesAI2 | 0 | 12789384 | from .layers import *
from .ResNet import *
from .InceptionTime import * | 0.976563 | 1 |
leetcode/290.py | zhaipro/acm | 0 | 12789385 | <gh_stars>0
class Solution:
def wordPattern(self, pattern: str, s: str) -> bool:
r1 = {}
r2 = {}
worlds = s.split()
if len(worlds) != len(pattern):
return False
for c, w in zip(pattern, worlds):
if c in r1 and r1[c] != w:
return False
if w in r2 and r2[w] != c:
return False
r1[c] = w
r2[w] = c
return True
| 3.1875 | 3 |
tests/test/test_feature_class.py | vb64/oeg.feature.class | 0 | 12789386 | <gh_stars>0
"""
make test T=test_feature_class.py
"""
from . import TestBase
class TestFeatureClass(TestBase):
"""
oeg_feature_class
"""
@staticmethod
def test_class_gene():
"""
FeatureClass.GENE
"""
from oeg_feature_class import FeatureClass, size_class, is_in_limits, MagnetType
thick = 10
real = (100, 100, 5)
calcked = (90, 90, 6)
assert size_class(real[0], real[1], thick) == FeatureClass.GENE
assert is_in_limits(calcked, real, thick, magnet_type=MagnetType.MFL) == (True, True, True)
assert is_in_limits(calcked, real, thick, magnet_type=MagnetType.TFI) == (True, True, True)
assert is_in_limits((90, 90, 1), real, 30, magnet_type=MagnetType.TFI) == (True, True, False)
assert is_in_limits((90, 5, 1), real, 30, magnet_type=MagnetType.TFI) == (True, False, False)
@staticmethod
def test_class_pitt():
"""
FeatureClass.PITT
"""
from oeg_feature_class import FeatureClass, size_class, is_in_limits, MagnetType
thick = 10
real = (10, 10, 10)
calcked = (9, 9, 6)
assert size_class(real[0], real[1], thick) == FeatureClass.PITT
assert is_in_limits(calcked, real, thick, magnet_type=MagnetType.MFL) == (True, True, False)
assert is_in_limits(calcked, real, thick, magnet_type=MagnetType.TFI) == (True, True, False)
assert is_in_limits((90, 90, 1), real, 30, magnet_type=MagnetType.TFI) == (False, False, False)
def test_class_pinh(self):
"""
FeatureClass.PINH
"""
from oeg_feature_class import FeatureClass, size_class, is_in_limits, MagnetType
thick = 10
real = (5, 5, 5)
calcked = (9, 9, 6)
self.assertEqual(size_class(real[0], real[1], thick), FeatureClass.PINH)
self.assertEqual(is_in_limits(calcked, real, thick, magnet_type=MagnetType.MFL), (True, True, True))
self.assertEqual(is_in_limits(calcked, real, thick, magnet_type=MagnetType.TFI), (True, True, True))
self.assertEqual(is_in_limits((90, 90, 1), real, 30, magnet_type=MagnetType.TFI), (False, False, True))
def test_class_cisl(self):
"""
FeatureClass.CISL
"""
from oeg_feature_class import Error, FeatureClass, size_class, is_in_limits, MagnetType
thick = 10
real = (5, 100, 10)
calcked = (5, 100, 6)
self.assertEqual(size_class(real[0], real[1], thick), FeatureClass.CISL)
self.assertEqual(is_in_limits(calcked, real, thick, magnet_type=MagnetType.MFL), (True, True, False))
with self.assertRaises(Error) as context:
is_in_limits(calcked, real, thick, magnet_type=MagnetType.TFI)
self.assertIn("class 'CISL'. Not applicable for method 'TFI'", str(context.exception))
def test_class_axsl(self):
"""
FeatureClass.AXSL
"""
from oeg_feature_class import Error, FeatureClass, size_class, is_in_limits, MagnetType
thick = 10
real = (100, 5, 10)
calcked = (100, 5, 6)
self.assertEqual(size_class(real[0], real[1], thick), FeatureClass.AXSL)
self.assertEqual(is_in_limits(calcked, real, thick, magnet_type=MagnetType.TFI), (True, True, False))
self.assertEqual(is_in_limits((90, 90, 1), real, 30, magnet_type=MagnetType.TFI), (True, False, False))
with self.assertRaises(Error) as context:
is_in_limits(calcked, real, thick, magnet_type=MagnetType.MFL)
self.assertIn("class 'AXSL'. Not applicable for method 'MFL'", str(context.exception))
def test_class_axgr(self):
"""
FeatureClass.AXGR
"""
from oeg_feature_class import FeatureClass, size_class, is_in_limits, MagnetType
thick = 10
real = (50, 10, 10)
calcked = (50, 10, 6)
self.assertEqual(size_class(real[0], real[1], thick), FeatureClass.AXGR)
self.assertEqual(is_in_limits(calcked, real, thick, magnet_type=MagnetType.MFL), (True, True, False))
self.assertEqual(is_in_limits(calcked, real, thick, magnet_type=MagnetType.TFI), (True, True, False))
self.assertEqual(is_in_limits((90, 90, 1), real, 30, magnet_type=MagnetType.TFI), (False, False, False))
def test_class_cigr(self):
"""
FeatureClass.CIGR
"""
from oeg_feature_class import FeatureClass, size_class, is_in_limits, MagnetType
thick = 10
real = (10, 50, 10)
calcked = (10, 50, 6)
self.assertEqual(size_class(real[0], real[1], thick), FeatureClass.CIGR)
self.assertEqual(is_in_limits(calcked, real, thick, magnet_type=MagnetType.MFL), (True, True, False))
self.assertEqual(is_in_limits(calcked, real, thick, magnet_type=MagnetType.TFI), (True, True, False))
def test_size_class_wrong(self):
"""
wrong parameters for FeatureClass calculate
"""
from oeg_feature_class import Error, size_class
with self.assertRaises(Error) as context:
size_class(0, 10, 10)
self.assertIn('Wrong FeatureClass params', str(context.exception))
with self.assertRaises(Error) as context:
size_class(-1, 10, 10)
self.assertIn('Wrong FeatureClass params', str(context.exception))
def test_is_in_limits(self): # pylint: disable=too-many-locals
"""
compare is_in_limits
"""
from oeg_feature_class import is_in_limits, size_class, FeatureClass, MagnetType
thick = 16.6
length = 90
width = 12
depth = 4
real = (length, width, depth)
real_class = size_class(length, width, thick)
self.assertEqual(real_class, FeatureClass.AXSL)
x_mm = 72
y_mm = 11
z_mm = 1
calcked = (x_mm, y_mm, z_mm)
length_ok, width_ok, depth_ok = is_in_limits(calcked, real, thick, magnet_type=MagnetType.TFI)
self.assertEqual(length_ok, True)
self.assertEqual(width_ok, True)
self.assertEqual(depth_ok, False)
def test_small_thick(self):
"""
wall thick < 10 mm
"""
from oeg_feature_class import size_class, FeatureClass
self.assertEqual(size_class(10, 10, 5), FeatureClass.PITT)
def test_is_detectable(self):
"""
is_detectable
"""
from oeg_feature_class import is_detectable, MagnetType
thick = 10
self.assertTrue(is_detectable((10, 10, 5), thick, magnet_type=MagnetType.MFL))
self.assertTrue(is_detectable((10, 10, -1), thick, magnet_type=MagnetType.MFL))
self.assertFalse(is_detectable((20, 1, -1), thick, magnet_type=MagnetType.MFL))
| 2.421875 | 2 |
back_office/helpers/diff.py | Envinorma/back-office | 0 | 12789387 | import string
from typing import List
from envinorma.models import ArreteMinisteriel, StructuredText
from text_diff import TextDifferences, text_differences
from unidecode import unidecode
_SIMPLE_CHARS = set(string.ascii_letters + string.digits + string.whitespace)
def _clean_line(line: str) -> str:
res = str(unidecode(line)).strip()
return ''.join(c for c in res if c in _SIMPLE_CHARS)
def extract_am_lines(am: ArreteMinisteriel, normalize_text: bool) -> List[str]:
lines = [line for section in am.sections for line in section.text_lines(1)]
if normalize_text:
return [_clean_line(line) for line in lines]
return lines
def compute_am_diff(am_before: ArreteMinisteriel, am_after: ArreteMinisteriel, normalize_text: bool) -> TextDifferences:
lines_before = extract_am_lines(am_before, normalize_text)
lines_after = extract_am_lines(am_after, normalize_text)
return text_differences(lines_before, lines_after)
def compute_text_diff(text_before: StructuredText, text_after: StructuredText) -> TextDifferences:
lines_before = text_before.text_lines()
lines_after = text_after.text_lines()
return text_differences(lines_before, lines_after)
| 2.53125 | 3 |
src/train_model.py | tahaceritli/ptype-cat-experiments | 0 | 12789388 | <filename>src/train_model.py
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import RobustScaler
from src import constants
import joblib
import numpy as np
def reshape_pred_scores(pred_scores):
y_probs = ()
for y_prob in pred_scores:
n = int(len(y_prob) / 4)
y_prob = np.array(y_prob).reshape(n, 4)
y_probs += (y_prob,)
y_probs = np.vstack(y_probs)
return y_probs
def get_pred_scores_combined(p, y_tests):
y_tests_hat = []
pred_scores = []
for i in range(constants.FOLD):
if i == 0:
y_test_hat = p[: len(y_tests[i]), :]
last_index = len(y_tests[i])
else:
y_test_hat = p[last_index : last_index + len(y_tests[i]), :]
last_index = last_index + len(y_tests[i])
pred_scores.append(list(y_test_hat.flatten()))
y_test_hat = np.argmax(y_test_hat, axis=1)
y_test_hat = [constants.CANONICAL_TYPES_SORTED[index] for index in y_test_hat]
y_tests_hat.append(y_test_hat)
return pred_scores, y_tests_hat
robust_scaler = RobustScaler()
# load inputs
X = np.load("inputs/X.npy")
y = np.load("inputs/y.npy")
z = np.load("inputs/z.npy", allow_pickle=True)
# scale features
features = ["u_ratio", "u_ratio_clean", "U", "U_clean"]
features_indices = [constants.ADDITIONAL_FEATURES[feat] for feat in features]
# magic numbers below
X_feats = X[:, list(range(4)) + features_indices]
X_feats[:, [6, 7]] = robust_scaler.fit_transform(X_feats[:, [6, 7]])
# run classifier
clf = LogisticRegression(multi_class="multinomial", max_iter=10000, penalty="l2").fit(
X_feats, y
)
y_hat = clf.predict(X_feats)
# save the model and scaler
joblib.dump(robust_scaler, "outputs/models/robust_scaler.pkl")
joblib.dump(clf, "outputs/models/LR.sav")
| 2.390625 | 2 |
setup.py | zozzz/yapic.di | 0 | 12789389 | <reponame>zozzz/yapic.di<gh_stars>0
#!/usr/bin/env python3
import os
import sys
from glob import glob
from os import path
from pathlib import Path
from setuptools import setup, Extension
from setuptools.command.test import test as TestCommand
from setuptools import Command
VERSION = "2.0.3"
define_macros = {
"YAPIC_DI_VERSION_MAJOR": VERSION.split(".")[0],
"YAPIC_DI_VERSION_MINOR": VERSION.split(".")[1],
"YAPIC_DI_VERSION_PATCH": VERSION.split(".")[2]
}
undef_macros = []
extra_compile_args = [] # -flto
subcommand_args = []
if "--" in sys.argv:
subcommand_args = sys.argv[sys.argv.index("--") + 1:]
del sys.argv[sys.argv.index("--"):]
if sys.platform == "win32":
define_macros["UNICODE"] = "1"
DEVELOP = sys.executable.endswith("python_d.exe")
if DEVELOP:
define_macros["_DEBUG"] = "1"
undef_macros.append("NDEBUG")
extra_compile_args.append("/MTd")
extra_compile_args.append("/Zi")
else:
undef_macros.append("_DEBUG")
extra_compile_args.append("/Ox")
extra_compile_args.append("/FAs")
else:
extra_compile_args.append("-std=c++11")
extra_compile_args.append("-Wno-unknown-pragmas")
extra_compile_args.append("-Wno-write-strings")
DEVELOP = sys.executable.endswith("-dbg")
if DEVELOP:
define_macros["_DEBUG"] = 1
undef_macros.append("NDEBUG")
else:
extra_compile_args.append("-O3")
def root(*p):
return path.join(path.dirname(path.abspath(__file__)), *p)
cpp_ext = Extension(
name="yapic.di._di",
sources=["src/di.cpp"],
include_dirs=["./libs/yapic.core/src/yapic/core/include"],
depends=glob("src/*.hpp") + glob("./libs/yapic.core/src/yapic/core/include/**/*.hpp"),
extra_compile_args=extra_compile_args,
define_macros=list(define_macros.items()),
undef_macros=undef_macros,
language="c++",
)
def cmd_prerun(cmd, requirements):
for r in requirements(cmd.distribution):
if r:
installed = cmd.distribution.fetch_build_eggs(r)
if installed:
for dp in map(lambda x: x.location, installed):
if dp not in sys.path:
sys.path.insert(0, dp)
cmd.distribution.get_command_obj("build").force = True
cmd.run_command("build")
ext = cmd.get_finalized_command("build_ext")
ep = str(Path(ext.build_lib).absolute())
if ep not in sys.path:
sys.path.insert(0, ep)
for e in ext.extensions:
if e._needs_stub:
ext.write_stub(ep, e, False)
class PyTest(TestCommand):
user_options = [
("file=", "f", "File to run"),
]
def initialize_options(self):
super().initialize_options()
self.file = "./tests/"
def finalize_options(self):
super().finalize_options()
def run(self):
def requirements(dist):
yield dist.install_requires
yield dist.tests_require
cmd_prerun(self, requirements)
self.run_tests()
def run_tests(self):
import pytest
errno = pytest.main(subcommand_args)
sys.exit(errno)
class Benchmark(Command):
user_options = [
("file=", "f", "File to run"),
]
def initialize_options(self):
self.file = None
self.pytest_args = "-x -s"
def finalize_options(self):
if self.file:
self.pytest_args += " " + self.file.replace("\\", "/")
def run(self):
def requirements(dist):
yield dist.extras_require["benchmark"]
cmd_prerun(self, requirements)
import shlex
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
# typing: https://github.com/python/typing/issues/84
setup(
name="yapic.di",
version=VERSION,
url="https://github.com/zozzz/yapic.di",
author="<NAME>",
author_email="<EMAIL>",
description="Very fast Dependency Injection library",
long_description=(Path(__file__).parent / "README.rst").read_text(encoding="utf-8"),
license="BSD",
packages=["yapic.di"],
package_dir={"yapic.di": "src"},
package_data={"yapic.di": ["_di.pyi"]},
ext_modules=[cpp_ext],
tests_require=["pytest", "pytest-leaks"],
python_requires=">=3.7",
extras_require={"benchmark": ["pytest", "pytest-benchmark"]},
cmdclass={
"test": PyTest,
"bench": Benchmark
},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix",
"Programming Language :: C++",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"Typing :: Typed",
],
)
| 1.929688 | 2 |
simplebot.py | dmarx/VideoLinkBot | 7 | 12789390 | <filename>simplebot.py
"""
Reddit bot that scrapes a post for videoo links
and posts the collection in a table as a comment.
This script is designed for scraping a single post.
Run as follows:
import simplebot as s
s.login(_user=username, _pass=password)
post_aggregate_links(submisison_id)
To run the bot as a continuous scrape of
/r/all/comments, use simplemonitor.py.
"""
import praw
from praw.errors import APIException
import re
import urlparse as up
from HTMLParser import HTMLParser
from lxml import etree
from urllib2 import Request, urlopen
import time
import pandas as pd
from video_host_utilities import youtube_link_cleaner, supported_domains, \
link_cleaners, title_cleaners, get_host_code
_ua = "YoutubeLinkBot reddit bot by /u/shaggorama"
r = praw.Reddit(_ua)
botCommentsMemo = {}
scrapedCommentsMemo = {}
scrapedLinksMemo = {}
def login(_user=None, _pass=None, fname='loginCredentials.txt'):
if _user is None and _pass is None:
with open(fname,'r') as f:
_user = f.readline().strip()
_pass = f.readline().strip()
print "Logging in as: {0} / {1}".format(_user, _pass)
r.login(username=_user, password=_pass)
def fix_html_entities(html, parser=HTMLParser()):
return parser.unescape( parser.unescape(html))
def get_video_links_from_html(text):
"""
Strips video link from a string in html format
by looking for the href attribute.
"""
# could also just use BeautifulSoup, but this regex works fine
link_pat = re.compile('href="(.*?)"')
links = link_pat.findall(text)
video_links = []
for l in links:
code = get_host_code(l)
if code:
clean = link_cleaners[code]
if clean:
link = clean(fix_html_entities(l))
if link:
video_links.append(link)
return video_links
def get_title(url, default = None, hparser=etree.HTMLParser(encoding='utf-8')):
"""
returns the title of a webpage given a url
(e.g. the title of a youtube video)
"""
def _get_title(_url):
HEADER = {'Accept-Language':'en-US,en;q=0.5'}
request = Request(_url, headers=HEADER)
data = urlopen(request)
htree=etree.parse(data, hparser)
raw_title = htree.find(".//title").text
code = get_host_code(_url)
title = title_cleaners[code](raw_title)
title = re.sub('[\|\*\[\]\(\)~\\\]','',title)
return title
try:
title = _get_title(url)
except Exception, e:
print "Encountered some error getting title for video at", url
print e
time.sleep(2)
try:
title = _get_title(url)
except:
print 'OK then, let''s just call it "%s"' % default
title = default
if title is None:
title = default
return title
def scrape(submission):
"""
Given a submission id, scrapes that submission and returns a list of comments
associated with their links
@submission: a
"""
### Should add in some functionality for recognizing when we've already maxed-out the comment length on a post.
### OH SHIT! better yet, figure out a way to RESPOND TO MY OWN COMMENT WITH ADDITIONAL LINKS.
# just for convenience
if type(submission) == type(''):
submission = r.get_submission(submission_id = submission)
# for updating links and whatever.
if scrapedLinksMemo.has_key(submission.id):
collected_links = scrapedLinksMemo[submission.id]
scrapedCommentIDs = scrapedCommentsMemo[submission.id]
print "We have already collected %d video links on this submission." % len(collected_links)
else:
scrapedCommentIDs = set()
scrapedCommentsMemo[submission.id] = scrapedCommentIDs
print "got %d comments" % len(submission.all_comments_flat)
for i, comment in enumerate(submission.all_comments_flat):
try:
if comment.author.name == r.user.name: # deleted comment handling doesn't seem to be working properly.
# if we have already memoized a bot comment for this post, continue
# otheriwse, confirm found bot comment contains links and if it does,
# memoize it.
if botCommentsMemo.has_key(submission.id):
continue
elif get_video_links_from_html(comment.body_html):
botCommentsMemo[submission.id] = comment
else:
links = get_video_links_from_html(comment.body_html)
for link in links:
add_memo_entry(comment, link)
except Exception, e:
# ignore deleted comments and comments by deleted users.
print "encountered some error in scrape()"
print e
continue # why do name attribute errors keep getting re-raised???
scrapedCommentIDs.add(comment.id)
collected_links = scrapedLinksMemo[submission.id]
print "Scraped {0} comments, found {1} links".format(i, len(collected_links) )
return collected_links # this isn't really even necessary since we could just call it down from the memo.
def get_scraped_comments(link_id):
""" to be retired in favor of call to memo"""
print "building comments memo"
if scrapedLinksMemo.has_key(link_id):
collected_comments = scrapedCommentsMemo[link_id]
scraped = set( [collected_comments[url]['id'] for url in collected_comments] )
else:
"Populating scrapedCommentsMemo with", link_id
scraped = set()
scrapedCommentsMemo[link_id] = {}
return scraped
def add_memo_entry(comment, link):
submission_id = comment.submission.id
if not link:
if not scrapedCommentsMemo.has_key(submission_id):
scrapedCommentsMemo[submission_id] = set() # this might be redundant
scrapedCommentsMemo[submission_id].add(comment.id)
try:
username = comment.author.name
except:
username = None
link_entry = {'author':username
,'created_utc':comment.created_utc
,'permalink':comment_shortlink(comment)
, 'id':comment.id
,'score':comment.score
,'title':None # This is lazy
}
if scrapedLinksMemo.has_key(submission_id):
collected_links = scrapedLinksMemo[submission_id]
try:
if collected_links.ix[link, 'score'] < comment.score:
# collected_links.ix[link, :] = link_entry ### I think this is causing the bug in issue # 25
# This is a shitty fix, but it should solve the problem.
for k in link_entry.keys():
collected_links.ix[link, k] = link_entry[k]
except KeyError, e:
new_rec = pd.DataFrame(link_entry, index=[link])
collected_links = collected_links.append(new_rec)
scrapedLinksMemo[submission_id] = collected_links
else:
scrapedLinksMemo[submission_id] = pd.DataFrame(link_entry, index=[link])
def comment_shortlink(c):
return 'http://reddit.com/comments/'+ c.link_id[3:] + '/_/' + c.id
def build_comment(collected_links, link_id=None):
print "Building comment"
head = '''Here is a list of video links collected from comments that redditors have made in response to this submission:
|Source Comment|Score|Video Link|
|:-------|:-------|:-------|\n'''
tail ="""\n* [VideoLinkBot FAQ](http://www.reddit.com/r/VideoLinkBot/wiki/faq)
* [Feedback](http://www.reddit.com/r/VideoLinkBot/submit)"""
titles = []
print "Getting video titles"
if link_id: # if we've been provided with a link_id, memoize the link titles.
for url in collected_links.index:
try:
if not scrapedLinksMemo[link_id].ix[url,'title']:
scrapedLinksMemo[link_id].ix[url,'title'] = get_title(url)
print "got title for",url
except Exception, e:
print "some problem getting title for", url
print e
continue
print "Got video titles. Formatting text for each link."
text=u''
for _url, c in scrapedLinksMemo[link_id].sort(columns='score',ascending=False).iterrows():
if c['title']:
_title = c['title']
else:
_title = _url
text += u'|[{author}]({permalink})|{score}|[{title}]({url})|\n'.format(
author=c['author']
,permalink = c['permalink']
,title = c['title']
,url = _url
,score= c['score']
)
len_playlist = 82 # I think...
print "Trimming content as needed"
text = trim_comment(text, 10000-len(head)-len(tail)-len_playlist)
print "Comment built."
return head+text+tail
def post_comment(link_id, subm, text):
try:
if botCommentsMemo.has_key(link_id):
bot_comment = botCommentsMemo[link_id]
print "editing", bot_comment.id
bot_comment.edit(text)
# need to overwrite existing comment object, otherwise we'll add playlist
# using the pre-scrape text.
# Manually overwrite 'body' attribute.
bot_comment.body = text
print "successfully updated comment."
else:
print "Posting new comment"
bot_comment = subm.add_comment(text)
botCommentsMemo[link_id] = bot_comment
print "Successfully posted new comment."
result = True
print bot_comment.id
except APIException, e:
# need to handle comments that are too long.
# Really, this should probably be in build_comment()
print e
print "sleeping for 5 seconds, trimming comment"
time.sleep(5) # maybe the API is annoyed with
trim_comment(text) # maybe the comment is too long (this should have been handled already)
result = False
return result
def trim_comment(text, targetsize=10000):
"""
If comment is longer than 10000 chars, reddit won't let us post it. This boils down to around 50 links (I think).
"""
# Removing permalink's to comments would significantly reduce the size of my comments.
# could still post a link to the user's commenting history
# Alternatively, could post a shortlink (?)
print "Trimming comment down to %d chars." % targetsize
while len(text)> targetsize:
text = '\n'.join(text.split('\n')[:-1])#[2:]
print "Processed comment length:",len(text)
return text
def add_playlist(c):
"""
Adds a radd.it playlist to an existing comment.
"""
playlist = "http://radd.it/comments/{0}/_/{1}?only=videos&start=1".format(c.link_id[3:], c.id)
text = c.body + "\n* [Playlist of videos in this comment]({0})".format(playlist)
c.edit(text)
def post_aggregate_links(link_id='178ki0', max_num_comments = 1000, min_num_comments = 8, min_num_links=5):
"""Not sure which function to call? You probably want this one."""
subm = r.get_submission(submission_id = link_id)
if not min_num_comments < subm.num_comments < max_num_comments:
print "[NO POST] Submission has %d comments. Not worth scraping." % subm.num_comments
return None
try:
print u'Scraping "{0}"'.format(subm.title)
except:
print u'Scraping "{0}"'.format(subm.id)
links = scrape(subm) # Theoretically, we could just pull this down from the memo.
n_links = len(links)
if n_links >= min_num_links:
authors = links.author.unique()
if len(authors) >1:
try:
print u'[POST] Posting {nlinks} links to "{sub}" post "{post}"'.\
format(nlinks = n_links
,sub = subm.subreddit.display_name
,post = subm.title)
except:
print u'[POST] Posting {nlinks} links to "{sub}" post "{post}"'.\
format(nlinks = n_links
,sub = subm.subreddit.id
,post = subm.id)
text = build_comment(links, subm.id)
print "comment built, trying to post."
posted = False
while not posted:
posted = post_comment(link_id, subm, text)
print "Appending playlist..."
add_playlist(botCommentsMemo[link_id])
print "Video links successfully posted."
else:
print "[NO POST] All links from same user. Need at least 2 different users to post."
else:
print "[NO POST] Only found %d links. Need %d to post." % (n_links, min_num_links)
if __name__ == '__main__':
login()
post_aggregate_links()
| 3.359375 | 3 |
Modules/Scripted/Home/HomeLib/plots.py | ebrahimebrahim/lungair-desktop-application | 0 | 12789391 | <reponame>ebrahimebrahim/lungair-desktop-application
import slicer, qt, vtk
from .constants import *
PLOT_TYPES = {
"line" : slicer.vtkMRMLPlotSeriesNode.PlotTypeLine,
"bar" : slicer.vtkMRMLPlotSeriesNode.PlotTypeBar,
"scatter" : slicer.vtkMRMLPlotSeriesNode.PlotTypeScatter,
"scatterbar" : slicer.vtkMRMLPlotSeriesNode.PlotTypeScatterBar,
}
def createPlotView():
"""Create and return a qMRMLPlotView widget.
It is associated to the main scene, and it also gets a button for fitToContent."""
plot_view = slicer.qMRMLPlotView()
plot_view.setMRMLScene(slicer.mrmlScene)
fit_plot_tool_button = qt.QToolButton()
fit_plot_tool_button.clicked.connect(lambda: plot_view.fitToContent())
# Put the QToolButton in the top right corner of the plot
assert(plot_view.layout() is None) # failure here indicates a slicer change in which plot views gained layouts, which we should take care not to replace
plot_view.setLayout(qt.QHBoxLayout())
plot_view.layout().insertWidget(1,fit_plot_tool_button,0,qt.Qt.AlignTop)
spacer = qt.QSpacerItem(20,20,qt.QSizePolicy.Expanding, qt.QSizePolicy.Expanding)
plot_view.layout().insertItem(0,spacer)
plot_view.layout().margin=0
# Give it a nice appearance
fit_plot_tool_button.setIconSize(qt.QSize(10,10))
fit_plot_tool_button.setIcon(qt.QIcon(":Icons/SlicesFitToWindow.png"))
fit_plot_tool_button.setStyleSheet(f"background-color:#{BAR_WIDGET_COLOR};")
fit_plot_tool_button.setAutoRaise(True)
fit_plot_tool_button.setToolTip("Reset zoom to fit entire plot")
return plot_view
class SlicerPlotData:
"""Container for and manager of the nodes associated to a slicer plot view."""
# A plot view node we will keep empty in order to have a way of displaying no plot
empty_plot_view_node = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotViewNode", "SlicerPlotDataEmptyPlotViewNode")
def __init__(self, name:str):
"""Create SlicerPlotData, making a qMRMLPlotView and a vtkMRMLPlotViewNode with the given name."""
self.name = name
self.plot_view = createPlotView()
self.plot_view_node = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotViewNode", name+"PlotView")
self.plot_view.setMRMLPlotViewNode(self.plot_view_node)
self.plot_nodes = {} # chart, table, and series; see the parameter "nodes" in the doc of slicer.util.plot
def set_plot_data(self, data, x_axis_label=None, y_axis_label=None, title=None, legend_label=None, plot_type="line", labels=None):
"""
Populate the plot with the data from the given numpy array.
Args:
data: a numpy array of shape (N,2) containing the data to plot
x_axis_label: the title of the x-axis to display
y_axis_label: the title of the y-axis to display
title: plot title; also shows up in the names of helper nodes
legend_label: the text to put in the legend
plot_type: one of "line", "bar", "scatter", or "scatterbar"
labels: a list of string labels-- this affects bar and scatterbar plot types
"""
if title is None: title = self.name
if legend_label is None: legend_label = title
if len(data.shape)!=2 or data.shape[1]!=2:
raise ValueError(f"data was expected to be a numpy array of shape (N,2), got {tuple(data.shape)}")
# Here we avoid changing plots while they are associated to a plot view.
# This is to supress an error that otherwise shows up
# (see e.g. https://github.com/KitwareMedical/lungair-desktop-application/issues/27).
self.plot_view.setMRMLPlotViewNode(self.empty_plot_view_node)
if x_axis_label is not None and y_axis_label is not None:
columnNames = [x_axis_label, y_axis_label]
else:
columnNames = None
plot_chart_node = slicer.util.plot(
data, 0, show = False,
title = title,
columnNames = columnNames,
nodes = self.plot_nodes
)
plot_chart_node.SetXAxisTitle(x_axis_label)
if y_axis_label is not None:
plot_chart_node.SetYAxisTitle(y_axis_label)
assert(len(self.plot_nodes["series"]) == 1)
self.plot_nodes["series"][0].SetName(legend_label) # This text is displayed in the legend
self.plot_nodes["series"][0].SetPlotType(PLOT_TYPES[plot_type])
self.plot_view_node.SetPlotChartNodeID(plot_chart_node.GetID())
if labels is not None:
labels_array = vtk.vtkStringArray()
for label in labels:
labels_array.InsertNextValue(label)
label_column_name = (x_axis_label if x_axis_label else "X-axis") + " Label"
labels_array.SetName(label_column_name)
self.plot_nodes['table'].AddColumn(labels_array)
self.plot_nodes["series"][0].SetLabelColumnName(label_column_name)
self.plot_view.setMRMLPlotViewNode(self.plot_view_node)
| 1.992188 | 2 |
GibbsSampling_3.py | srinirajaraman/Machine-Learning | 0 | 12789392 | '''
Reference: Algorithm 10.1 from Gibbs sampling for a discrete undirected model
Book : <NAME>
Notation details:
vector - vec
matrix - mat
Author:<NAME>
Dept of EECE
Computational Ocularscience Lab
Date: 22/9/2016
'''
#Import packages
import numpy as np
import matplotlib.pyplot as plt
import math as m
import os as os
def function_readimage(given_image):
return plt.imread(given_image)
def function_plot(given_image, color_option):
plt.imshow(given_image, cmap = color_option, vmin = 0, vmax = 1, interpolation = 'none')
plt.show()
def function_get_right_coord(i, j):
return i, j + 1
def function_get_left_coord(i, j):
return i, j - 1
def function_get_top_coord(i, j):
return i - 1, j
def function_get_bottom_coord(i, j):
return i + 1, j
def function_get_current_coord(i, j):
return i, j
'''
If pixel values are same return theta_01 which has highest probability or if pixel values are different return theta_00 which has least
probability
'''
def binary_phi_calc(w1, w2, theta_00, theta_01):
return theta_00 if w1 == w2 else theta_01
#if w1 == w2:
# return theta_01
#else:
# return theta_00
#Compute cliques based on the node index
def compute_clique(node_index, row_size, col_size):
#
s_list = []
r = node_index[0]
c = node_index[1]
#Identify all valid cliques containing node_index
if( r - 1 >= 0):
s_list.append(function_get_top_coord(r, c))
if( r + 1 < row_size):
s_list.append(function_get_bottom_coord(r, c))
if( c - 1 >= 0):
s_list.append(function_get_left_coord(r, c))
if( c + 1 < col_size):
s_list.append(function_get_right_coord(r, c))
return s_list
#Helper function to get index with the value 'val'
def function_get_index(num_array, val):
#
for ind in range(0, len(num_array)):
if(num_array[ind] == val):
break;
return ind
#Perform gibbs sampling for a binary image
def gibbs_sampling_categorical_2d(x_mat_t0, sample_space_vec):
#x_mat_t0: Current state of the undirected graph for Gibb's sampling; will return the next state x_mat_t1
#sample_space_vec: contains the values that each of the x_d variable in x_mat sample from. e.g. [0, 1] for binary sample space
#
#Dimension D in Algorithm 10.1 has been split into row and columns (i.e. into a 2D vector)
row_size, col_size = np.shape(x_mat_t0);
#Initialize the next state of the undirected graph with the previous state
x_mat_t1 = x_mat_t0;
#Potential function paramteres
#Note: these are defined for binary MRF and should be updated when there is change in the potential function form or change from binary to categorical variable
theta_00 = 0.1;
theta_01 = 1.0;
#For each dimension
for row in range(0, row_size):
for col in range(0, col_size):
#Compute the values that a variable can take here {0, 1}
lambda_vec = np.ones(np.shape(sample_space_vec)); #Initialize the lambda parameter of the categorical distribution for the d'th location (or [row, col] location)
#Get the cliques corresponding to row and col indcies
clique_list = compute_clique([row, col], row_size, col_size);
for k in sample_space_vec:
#Set the current location's state to k, i.e. now working with p(x_d = k | x_{1...D}\d at t0)
x_mat_t1[row, col] = k
#Compute the unnormalized marginal probability
for c in range(0, len(clique_list), 1):
lambda_vec[k] *= binary_phi_calc(x_mat_t1[row, col], x_mat_t1[clique_list[c]], theta_00, theta_01)
#Normalize the probabilities
lambda_vec = lambda_vec / np.sum(lambda_vec)
#Sample from categorical distribution
curr_no_of_samples = 1;
sample_k_array = np.random.multinomial(curr_no_of_samples, lambda_vec) #returned value contains number of sample in each of the k categories
#Assign the index associated with the value 1 to current row and column
x_mat_t1[row, col] = function_get_index(sample_k_array, curr_no_of_samples)
return x_mat_t1;
#Compute gibbs sampling for a binary image
def wrapper_gibbs_sampling_categorical_2d(no_of_time_samples, noisy_image, burn_in_count, sample_space_vec, row_size, col_size):
#
debug_flag = 0;
#Plotting variables
color_option = 'gray'
#
fig_no_of_rows = np.floor(np.sqrt(no_of_time_samples)); #Identifying subplot layout to show all samples simultaneously
fig_no_of_cols = np.ceil(no_of_time_samples / fig_no_of_rows);
#Initialize the state vector (containing the states of the nodes in the undirected graph)
x_mat_all_samples = np.zeros([no_of_time_samples, row_size, col_size])
#Specify the initial state of the state vector to begin Gibb's sampling
#x_mat_t0 = np.zeros([row_size, col_size])
x_mat_t0 = noisy_image
mu = 0;
sigma = 1
#Debugging
if (debug_flag == 1):
plt.imshow(x_mat_t0, cmap = color_option, interpolation = 'none');
plt.draw();
plt.pause(0.01);
for t in range(1, no_of_time_samples + burn_in_count):
print 'Sample #', t;
x_mat_t1 = gibbs_sampling_categorical_2d(x_mat_t0, sample_space_vec)
#Start capturing the samples after burn-in
if t >= burn_in_count:
x_mat_all_samples[t - burn_in_count, :, :] = x_mat_t1
#Debugging
if (debug_flag == 1):
plt.subplot(fig_no_of_rows, fig_no_of_cols, t - burn_in_count + 1);
plt.imshow(x_mat_t1, cmap = color_option);
plt.draw()
plt.pause(0.01)
#Current state becomes the initial state for the next Gibb's sampling
x_mat_t0 = x_mat_t1;
return x_mat_all_samples[no_of_time_samples - 1, :, :]
if __name__ == '__main__':
#Get the dataset from the path:
currDir = os.getcwd()
dataset_loc = '/Datasets/'
dataset_name = 'test_binary_image_01.png'
dataset_file_loc = currDir + dataset_loc + dataset_name
#Time sample and no of variables on a subset Sc
no_of_time_samples = 10
#Gibbs sampling parameter
burn_in_count = 20;
#Dimension d
row_size = 100;
col_size = 100; #In original notation, d -> (row_size x col_size); e.g. 100 x 100 matrix equivalent to D = 10,000
noisy_image = plt.imread(dataset_file_loc)
#Parameters to generate prior
row_size, col_size = noisy_image.shape
#Sample space of x_d (where x_d is the d'th element of x_vec)
sample_space_vec = np.array([0, 1]);
#Generate
x_mat_all_samples = wrapper_gibbs_sampling_categorical_2d(no_of_time_samples, noisy_image, burn_in_count, sample_space_vec, row_size, col_size)
| 2.6875 | 3 |
lists/linkedlist.py | santoshmano/pybricks | 0 | 12789393 | <reponame>santoshmano/pybricks
class Node:
def __init__(self, val, next=None):
self.val = val
self.next = next
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def insertEnd(self, val):
n = Node(val)
if self.head == None:
self.head = self.tail = n
else:
self.tail.next = n
self.tail = n
def __str__(self):
temp = self.head
s = "-"
while temp:
s = s + " " + str(temp.val)
temp = temp.next
return s
n = Node(5)
n.next = Node(6)
n.next.next = Node(7)
n.next.next.next = Node(8) | 3.96875 | 4 |
code_icc/utils/__init__.py | ThmCuong/IIC-Python3 | 0 | 12789394 | from . import cluster, segmentation, semisup | 0.960938 | 1 |
docker/src/clawpack-5.3.1/pyclaw/src/petclaw/solution.py | ian-r-rose/visualization | 11 | 12789395 | from clawpack import pyclaw
from clawpack.pyclaw.solution import Solution
class Solution(Solution):
""" Parallel Solution class.
"""
__doc__ += pyclaw.util.add_parent_doc(pyclaw.Solution)
def get_read_func(self, file_format):
from clawpack.petclaw import io
if file_format == 'petsc':
return io.petsc.read
elif file_format == 'hdf5':
return io.hdf5.read
else:
raise ValueError("File format %s not supported." % file_format)
def get_write_func(self, file_format):
from clawpack.petclaw import io
if 'petsc' in file_format:
return io.petsc.write
elif 'hdf5' in file_format:
return io.hdf5.write
else:
raise ValueError("File format %s not supported." % file_format)
| 2.53125 | 3 |
covid_cases/clients/sacoronavirus.py | praekeltfoundation/healthcheck | 0 | 12789396 | import re
from dataclasses import dataclass
from datetime import date, datetime
from typing import Iterator
import requests
from bs4 import BeautifulSoup
@dataclass
class Counters:
tests: int
positive: int
recoveries: int
deaths: int
vaccines: int
@dataclass
class CaseImage:
url: str
date: date
class SACoronavirusClient:
def __init__(self):
self.session = requests.Session()
def get_homepage(self) -> str:
with self.session as session:
response = session.get(
"https://sacoronavirus.co.za/",
headers={"User-Agent": "contactndoh-whatsapp"},
timeout=30,
)
response.raise_for_status()
return response.text
def get_daily_cases_page(self) -> str:
with self.session as session:
response = session.get(
"https://sacoronavirus.co.za/category/daily-cases",
headers={"User-Agent": "contactndoh-whatsapp"},
timeout=30,
)
response.raise_for_status()
return response.text
def get_homepage_counters(self) -> Counters:
soup = BeautifulSoup(self.get_homepage(), "html.parser")
counters = soup.find("div", class_="counters-box")
for counter in counters.find_all("div", "counter-box-container"):
name = counter.find("div", "counter-box-content").string
if "test" in name.lower():
tests = int(counter.span["data-value"])
elif "case" in name.lower():
positive = int(counter.span["data-value"])
elif "recover" in name.lower():
recoveries = int(counter.span["data-value"])
elif "death" in name.lower():
deaths = int(counter.span["data-value"])
elif "vaccine" in name.lower():
vaccines = int(counter.span["data-value"])
return Counters(
tests=tests,
positive=positive,
recoveries=recoveries,
deaths=deaths,
vaccines=vaccines,
)
def get_daily_cases_image_urls(self) -> Iterator[CaseImage]:
soup = BeautifulSoup(self.get_daily_cases_page(), "html.parser")
for article in soup.main.find_all("article"):
url = article.img["src"]
d = article.select("h2.entry-title")[0].string
d = re.search(".*\((.*)\).*", d).group(1)
d = datetime.strptime(d, "%A %d %B %Y").date()
yield CaseImage(url=url, date=d)
| 2.75 | 3 |
bert_sklearn/model/pytorch_pretrained/__init__.py | ezesalta/bert-sklearn | 0 | 12789397 | __version__ = "0.6.1"
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .modeling import (BertConfig, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert, BertPreTrainedModel,
WEIGHTS_NAME, CONFIG_NAME)
from .optimization import BertAdam, warmup_linear
from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE, cached_path
| 1.523438 | 2 |
scripts/argument_parser.py | k-nuth/kth | 20 | 12789398 | <reponame>k-nuth/kth
#!/usr/bin/env python3
# Copyright (c) 2016-2020 Knuth Project developers.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from os.path import expanduser
def parse_args():
parser = ArgumentParser('kth Release Manager')
parser.add_argument("-rp", "--root_path", dest="root_path", help="root path where the projects are", default=expanduser("~"))
parser.add_argument('old_version', type=str, nargs=1, help='old version')
parser.add_argument('new_version', type=str, nargs='?', help='new version')
parser.add_argument("-t", "--token", dest="token", help="GitHub token", default='')
args = parser.parse_args()
old_version = args.old_version[0].split('.')
if len(old_version) != 3:
print('old_version has to be of the following format: xx.xx.xx')
return False,'','','',''
if args.new_version is None:
new_version = [old_version[0], str(int(old_version[1]) + 1), old_version[2]]
else:
new_version = args.new_version.split('.')
if len(new_version) != 3:
print('new_version has to be of the following format: xx.xx.xx')
return False,'','','',''
print (new_version)
print (old_version)
return True, args.root_path, old_version, new_version, args.token
| 2.421875 | 2 |
tcex/resources/__init__.py | brikardtc/tcex | 0 | 12789399 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Resources module for TcEx Framework"""
# flake8: noqa
from .resources import Resources
| 1.046875 | 1 |
pawpy/model_builder.py | robbie-manolache/kaggle-pawpular | 0 | 12789400 |
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< #
# Module for building transfer learning framework #
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
import tensorflow as tf
import tensorflow.keras.layers as kl
import tensorflow.keras.regularizers as kreg
import tensorflow.keras.initializers as kini
def build_NN_model(config, model_map):
"""
"""
# load config variables
model_type = config["model_type"]
img_dim = config["img_dim"]
params = config["params"]
# load pre-trained model
pre_net = model_map[model_type]["model"](
weights='imagenet',
input_shape=(img_dim, img_dim, 3),
include_top=False
)
pre_net.trainable = False
# init input list for neural network
all_inputs = []
# image input
img_in = kl.Input(shape=(img_dim, img_dim, 3))
all_inputs.append(img_in)
# pre-processing
if model_map[model_type]["preproc"] is not None:
img_x = model_map[model_type]["preproc"](img_in)
img_x = pre_net(img_x, training=False)
else:
img_x = pre_net(img_in, training=False)
# tabular metadata inputs
x_in = kl.Input(shape=len(config["tab_feats"]))
all_inputs.append(x_in)
# image data processing
if config["conv_proc"]:
d = model_map[model_type]["final_shape"]
all_x = kl.Reshape((d[0], d[1]*d[2], 1))(img_x)
all_x = kl.Conv2D(
filters=params["conv"]["nf"],
kernel_size=d[:2], strides=d[:2], name="post_conv2D",
kernel_regularizer=kreg.l2(params["conv"]["l2"]),
kernel_initializer=kini.RandomUniform(
minval=1/((d[0]+1)*(d[1]+1)),
maxval=1/((d[0]-1)*(d[1]-1))
)
)(all_x)
all_x = kl.Flatten()(all_x)
else:
all_x = kl.GlobalAvgPool2D()(img_x)
# add tabular features and then dropout
if config["batch_norm"]:
all_x = kl.BatchNormalization()(all_x)
all_x = kl.Concatenate()([all_x, x_in])
all_x = kl.Dropout(params["drop"])(all_x)
# additional dense layer
if config["extra_dense"]:
all_x = kl.Dense(
params["xtra"]["n"],
activation="linear",
kernel_regularizer=kreg.l1_l2(l1=params["xtra"]["l1"],
l2=params["xtra"]["l2"]),
name="extra_dense"
)(all_x)
if params["xtra"]["acti"] == "relu":
all_x = kl.LeakyReLU(alpha=params["xtra"]["relu_alpha"])(all_x)
elif params["xtra"]["acti"] == "elu":
all_x = kl.ELU()(all_x)
elif params["xtra"]["acti"] == "prelu":
all_x = kl.PReLU()(all_x)
else: # for sigmoid and tanh
if params["xtra"]["acti"] != "linear":
all_x = kl.Activation(params["xtra"]["acti"])(all_x)
else:
pass
# final output layer
all_x = kl.Dense(
1, activation="sigmoid", name="final_layer",
kernel_regularizer=kreg.l2(params["outy"]["l2"])
)(all_x)
out_y = kl.Lambda(lambda x: x * 100)(all_x)
# compile model
model = tf.keras.Model(inputs=all_inputs, outputs=out_y)
return(model)
| 2.421875 | 2 |
global_trainer.py | youngwoon/DnC-RL-Tensorflow | 9 | 12789401 | import os.path as osp
import os
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
import moviepy.editor as mpy
import tqdm
from contextlib import contextmanager
from mpi4py import MPI
import imageio
from baselines import logger
import baselines.common.tf_util as U
from baselines.common import colorize
from baselines.common.mpi_adam import MpiAdam
import dataset
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
class GlobalTrainer(object):
def __init__(self, name, env, runner, policy, config):
self._name = name
self._env = env.unwrapped
self._runner = runner
self._config = config
self._policy = policy
self._is_chef = (MPI.COMM_WORLD.Get_rank() == 0)
# global step
self.global_step = tf.Variable(0, name='global_step', dtype=tf.int64, trainable=False)
self._update_global_step = tf.assign(self.global_step, self.global_step + 1)
# tensorboard summary
self.summary_name = ['global/length', 'global/reward', 'global/success']
# build loss/optimizers
self._build_distillation()
def _build_distillation(self):
config = self._config
pi = self._policy
self._global_norm = U.function(
[], tf.global_norm([tf.cast(var, tf.float32) for var in pi.get_variables()]))
# policy update
ac = pi.pdtype.sample_placeholder([None])
pol_var_list = [v for v in pi.get_trainable_variables() if 'pol' in v.name]
self._pol_adam = MpiAdam(pol_var_list)
pol_loss = tf.reduce_mean(pi.pd.neglogp(ac))
#pol_loss = tf.reduce_mean(tf.square(pi.pd.sample() - ac))
fetch_dict = {
'loss': pol_loss,
'g': U.flatgrad(pol_loss, pol_var_list,
clip_norm=config.global_max_grad_norm)
}
self._pol_loss = U.function([ac] + pi.ob, fetch_dict)
self.summary_name += ['global/loss', 'global/grad_norm', 'global/global_norm']
# value update
if config.global_vf:
ret = tf.placeholder(dtype=tf.float32, shape=[None], name='return')
vf_var_list = [v for v in pi.get_trainable_variables() if 'vf' in v.name]
self._vf_adam = MpiAdam(vf_var_list)
vf_loss = tf.reduce_mean(tf.square(pi.vpred - ret))
fetch_dict = {
'vf_loss': vf_loss,
'vf_g': U.flatgrad(vf_loss, vf_var_list,
clip_norm=config.global_max_grad_norm)
}
self._vf_loss = U.function([ret] + pi.ob, fetch_dict)
self.summary_name += ['global/vf_loss', 'global/vf_grad_norm']
# initialize and sync
U.initialize()
self._pol_adam.sync()
if config.global_vf:
self._vf_adam.sync()
if config.debug:
logger.log("[worker: {} global] Init param sum".format(MPI.COMM_WORLD.Get_rank()), self._adam.getflat().sum())
@contextmanager
def timed(self, msg):
if self._is_chef:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
else:
yield
def update(self, step, ob, ac, ret=None):
info = defaultdict(list)
config = self._config
sess = U.get_session()
global_step = sess.run(self.global_step)
sess.run(self._update_global_step)
pi = self._policy
ob_dict = self._env.get_ob_dict(ob)
if self._config.obs_norm == 'learn':
for ob_name in pi.ob_type:
pi.ob_rms[ob_name].update(ob_dict[ob_name])
with self.timed("update global network"):
for _ in range(self._config.global_iters):
# policy network
for (mb_ob, mb_ac) in dataset.iterbatches(
(ob, ac), include_final_partial_batch=False,
batch_size=self._config.global_batch_size):
ob_list = pi.get_ob_list(mb_ob)
fetched = self._pol_loss(mb_ac, *ob_list)
loss, g = fetched['loss'], fetched['g']
self._pol_adam.update(g, self._config.global_stepsize)
info['global/loss'].append(np.mean(loss))
info['global/grad_norm'].append(np.linalg.norm(g))
if config.global_vf:
# value network
for (mb_ob, mb_ret) in dataset.iterbatches(
(ob, ret), include_final_partial_batch=False,
batch_size=self._config.global_batch_size):
ob_list = pi.get_ob_list(mb_ob)
fetched = self._vf_loss(mb_ret, *ob_list)
vf_loss, vf_g = fetched['vf_loss'], fetched['vf_g']
self._vf_adam.update(vf_g, self._config.global_stepsize)
info['global/vf_loss'].append(np.mean(vf_loss))
info['global/vf_grad_norm'].append(np.linalg.norm(vf_g))
for key, value in info.items():
info[key] = np.mean(value)
info['global/global_norm'] = self._global_norm()
return info
def summary(self, it):
info = self.evaluate(it, record=self._config.training_video_record)
# save checkpoint
if it % self._config.ckpt_save_step == 0:
fname = osp.join(self._config.log_dir, '%.5d' % it)
U.save_state(fname)
return info
def evaluate(self, ckpt_num=None, record=False):
config = self._config
ep_lens = []
ep_rets = []
ep_success = []
if record:
record_dir = osp.join(config.log_dir, 'video')
os.makedirs(record_dir, exist_ok=True)
for _ in tqdm.trange(10):
ep_traj = self._runner.rollout(True, True)
ep_lens.append(ep_traj["ep_length"][0])
ep_rets.append(ep_traj["ep_reward"][0])
ep_success.append(ep_traj["ep_success"][0])
logger.log('[{}] Trial #{}: lengths {}, returns {}'.format(
self._name, _, ep_traj["ep_length"][0], ep_traj["ep_reward"][0]))
# Video recording
if record:
visual_obs = ep_traj["visual_ob"]
video_name = '{}{}_{}{}.{}'.format(config.video_prefix or '', self._name,
'' if ckpt_num is None else 'ckpt_{}_'.format(ckpt_num), _, config.video_format)
video_path = osp.join(record_dir, video_name)
if config.video_format == 'mp4':
fps = 60.
def f(t):
frame_length = len(visual_obs)
new_fps = 1./(1./fps + 1./frame_length)
idx = min(int(t*new_fps), frame_length-1)
return visual_obs[idx]
video = mpy.VideoClip(f, duration=len(visual_obs)/fps+2)
video.write_videofile(video_path, fps, verbose=False)
elif config.video_format == 'gif':
imageio.mimsave(video_path, visual_obs, fps=100)
logger.log('[{}] Episode Length: {}'.format(self._name, np.mean(ep_lens)))
logger.log('[{}] Episode Rewards: {}'.format(self._name, np.mean(ep_rets)))
return {'global/length': np.mean(ep_lens),
'global/reward': np.mean(ep_rets),
'global/success': np.mean(ep_success)}
| 1.8125 | 2 |
Studabase/tkinterGrid.py | skhadka007/STUDABASE | 0 | 12789402 | # GUI Tkinter grid file.
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from PIL import Image, ImageTk # pip install pillow (<- in terminal if not already installed)
import tkinter as tk
import csv
# OTHER PYTHON FILES (OURS)
import menuFunctions
import openData
# import moreFunctions
import pyodbc
# pip install wheel, then pip install pandas
import pandas as pd
# Root needs to be created FIRST
root = tk.Tk()
searchText = "ERROR"
def filterOptions():
global searchText
# Label Frame
filterOptionsFrame = tk.LabelFrame(root, text="Sort & Search", pady=5, padx=5)
filterOptionsFrame.pack(side="top", padx=10, pady=10, fill="both", expand="no")
# filterOptionsFrame.configure(bg="white")
# Filter label and drop down menu
# label
filterLabel = tk.Label(filterOptionsFrame, text="Sort:")
filterLabel.pack(side='left')
# Option/Drop menu
filters = [
'Department',
'GPA',
'Graduation Year',
'First Name Start',
'Last Name Start'
]
currentFilter = tk.StringVar()
currentFilter.set(filters[0])
filterMenu = tk.OptionMenu(filterOptionsFrame, currentFilter, *filters)
filterMenu.pack(side='left', padx=5)
filterMenu.config(bg="white", fg="black", width=17) # filterMenu settings
# Reset Filter button
button_resetFilter = tk.Button(filterOptionsFrame, text="Reset Sort", bg="light sky blue")
button_resetFilter.pack(side='left')
# Search Text Box
searchBox = Entry(filterOptionsFrame, borderwidth=2)
# Search entry box deletion
def deleteSearch():
searchBox.delete(0, END)
# Clear Search Button
button_clearSearch = tk.Button(filterOptionsFrame, text="CLEAR", bg="light sky blue", command=deleteSearch)
button_clearSearch.pack(side='right', padx=2)
# Search Button
button_search = tk.Button(filterOptionsFrame, text="SEARCH", bg="khaki1", command=openResults)
searchText = searchBox.get()
button_search.pack(side='right', padx=2)
# Search text box pack
searchBox.pack(side='right', padx=5)
# Search label
searchLabel = tk.Label(filterOptionsFrame, text="Search:")
searchLabel.pack(side='right')
#######################################################################################################
############### DATA TABLE & RELATED FUNCTIONS #######################################
#######################################################################################################
# Label Frame
dataTableFrame = tk.LabelFrame(root, text="Student Data", pady=2, padx=5, width=1300, height=1000)
dataScrollbarV = tk.Scrollbar(dataTableFrame, orient=VERTICAL)
dataScrollbarH = tk.Scrollbar(dataTableFrame, orient=HORIZONTAL)
dataListBox = Listbox(dataTableFrame, width=20, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxID = Listbox(dataTableFrame, width=3, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxEmail = Listbox(dataTableFrame, width=25, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxDepartment = Listbox(dataTableFrame, width=8, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxMajor = Listbox(dataTableFrame, width=15, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxDate = Listbox(dataTableFrame, width=8, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
def yview( *args):
dataListBox.yview(*args)
dataListBoxID.yview(*args)
dataListBoxEmail.yview(*args)
dataListBoxDepartment.yview(*args)
dataListBoxMajor.yview(*args)
dataListBoxDate.yview(*args)
dataScrollbarV.config(command=yview)
#dataScrollbarV.config(command=lambda:[dataListBox.yview(), dataListBoxID.yview(), dataListBoxEmail.yview(), dataListBoxDepartment.yview(), dataListBoxMajor.yview(), dataListBoxDate.yview()])
#dataScrollbarH.config(command=dataListBox.xview)
#################################################################
## VARIABLES ##
filePathCurrent = ""
studentList = []
# Display listbox onto GUI
def dataTablePack():
dataTableFrame.pack(anchor="n", padx=10, pady=1, fill="both", expand="yes")
dataListBox.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxID.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxEmail.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxDepartment.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxMajor.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxDate.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataScrollbarV.pack(side=RIGHT, fill='y')
# Insert data from opened csv
def insertData():
global filePathCurrent
global studentList
deleteAll()
# Just so name is easier to use
filePath = openData.getFilePath()
filePathCurrent = filePath
# Opens chosen file
File = open(filePath)
Reader = csv.reader(File)
Data = list(Reader)
# Removes first line of file - Row filled with the Column titles
del(Data[0])
for x in list(range(0, len(Data))):
studentList.append(Data[x])
#dataListBox.insert(END, Data[x])
name = studentList[x][1] + ", " + studentList[x][0]
#formattedText = ('{:<20}{:>15}{:>50}'.format(name, studentList[x][2], studentList[x][4]))
#formattedText = (name + " " + studentList[x][2] + " " + studentList[x][3] + " " + studentList[x][4] + " " + studentList[x][5] + " " + studentList[x][6])
dataListBox.insert(END, (name))
dataListBoxID.insert(END, (studentList[x][2]))
dataListBoxEmail.insert(END, (studentList[x][3]))
dataListBoxDepartment.insert(END, (studentList[x][4]))
dataListBoxMajor.insert(END, (studentList[x][5]))
dataListBoxDate.insert(END, (studentList[x][6]))
# For refreshing current open file
def insertDataRefresh():
global filePathCurrent
global studentList
deleteAll()
# Opens chosen file
File = open(filePathCurrent)
Reader = csv.reader(File)
Data = list(Reader)
del(Data[0])
for x in list(range(0, len(Data))):
studentList.append(Data[x])
name = studentList[x][1] + ", " + studentList[x][0]
dataListBox.insert(END, (name))
dataListBoxID.insert(END, (studentList[x][2]))
dataListBoxEmail.insert(END, (studentList[x][3]))
dataListBoxDepartment.insert(END, (studentList[x][4]))
dataListBoxMajor.insert(END, (studentList[x][5]))
dataListBoxDate.insert(END, (studentList[x][6]))
## CREEATES DATATABLE AFTER PICKING FILE
def dataTable():
dataTablePack()
insertData()
# Deletes ONE student
def deleteOne():
global studentList
index = dataListBox.curselection()[0]
del studentList[index]
dataListBox.delete(index) # ANCHOR
dataListBoxID.delete(index)
dataListBoxEmail.delete(index)
dataListBoxDepartment.delete(index)
dataListBoxMajor.delete(index)
dataListBoxDate.delete(index)
dataListBox.config(text='')
# Clears Table
def deleteAll():
dataListBox.delete(0, END)
dataListBoxID.delete(0, END)
dataListBoxEmail.delete(0, END)
dataListBoxDepartment.delete(0, END)
dataListBoxMajor.delete(0, END)
dataListBoxDate.delete(0, END)
def select():
dataListBox.config(text=dataListBox.get(ANCHOR))
def saveFile():
global studentList
csvWrite = filedialog.asksaveasfile(mode='w', defaultextension=".csv", filetypes=(("CSV Files", "*.csv"), ("All Files", "*.*")))
if csvWrite is None: # When 'canceled' and no file saved
return
# with open(csvWrite, "wb") as f:
# writer = csv.writer(f)
# writer.writerows(a)
text2save = str(dataListBox.get(0, END)) # starts from `1.0`, not `0.0`
csvWrite.write(text2save)
csvWrite.close()
def refreshTable():
deleteAll()
insertDataRefresh()
def updateStudent():
global studentList
'''
First_Name
Last_Name
Student_ID
Email
Department
Major
Grad_Date
'''
# Gets location of current selection
index = dataListBox.curselection()[0]
newWindow = Toplevel(root)
newWindow.title("Update Student")
newWindow.geometry("315x230")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=False, height=False) # Window size changeability
#Create Text Boxes
First_Name = Entry(newWindow, width = 30)
First_Name.grid(row = 0, column = 1, padx = 20, pady = (10, 0))
First_Name.insert(0, studentList[index][0])
Last_Name = Entry(newWindow, width = 30)
Last_Name.grid(row = 1, column = 1, padx = 20)
Last_Name.insert(0, studentList[index][1])
Student_ID = Entry(newWindow, width = 30)
Student_ID.grid(row = 2, column = 1, padx = 20)
Student_ID.insert(0, studentList[index][2])
Email = Entry(newWindow, width = 30)
Email.grid(row = 3, column = 1, padx = 20)
Email.insert(0, studentList[index][3])
Department = Entry(newWindow, width = 30)
Department.grid(row = 4, column = 1, padx = 20)
Department.insert(0, studentList[index][4])
Major = Entry(newWindow, width = 30)
Major.grid(row = 5, column = 1, padx = 20)
Major.insert(0, studentList[index][5])
Grad_Date = Entry(newWindow, width = 30)
Grad_Date.grid(row = 6, column = 1, padx = 20)
Grad_Date.insert(0, studentList[index][6])
#Create Text Box Labels
First_Name_Label = Label(newWindow, text = 'First Name')
First_Name_Label.grid(row = 0, column = 0, pady = (10, 0))
Last_Name_Label = Label(newWindow, text = 'Last Name')
Last_Name_Label.grid(row = 1, column = 0)
Student_ID_Label = Label(newWindow, text = 'Student ID')
Student_ID_Label.grid(row = 2, column = 0)
Email_Label = Label(newWindow, text = 'Email')
Email_Label.grid(row = 3, column = 0)
Department_Label = Label(newWindow, text = 'Department')
Department_Label.grid(row = 4, column = 0)
Major_Label = Label(newWindow, text = 'Major')
Major_Label.grid(row = 5, column = 0)
Grad_Date_Label = Label(newWindow, text = 'Grad Date')
Grad_Date_Label.grid(row = 6, column = 0)
goodUpdate_Label = Label(newWindow, text="* * *")
goodUpdate_Label.grid(row = 8, columnspan=2)
def retrieve_input(entryBox):
input = entryBox.get()
return input
def goodUpdate():
## Update student
# Get entered text
studentList[index][0] = retrieve_input(First_Name)
studentList[index][1] = retrieve_input(Last_Name)
studentList[index][2] = retrieve_input(Student_ID)
studentList[index][3] = retrieve_input(Email)
studentList[index][4] = retrieve_input(Department)
studentList[index][5] = retrieve_input(Major)
studentList[index][6] = retrieve_input(Grad_Date)
name = studentList[index][1] + ", " + studentList[index][0]
dataListBox.insert(END, (name))
dataListBoxID.insert(END, (studentList[index][2]))
dataListBoxEmail.insert(END, (studentList[index][3]))
dataListBoxDepartment.insert(END, (studentList[index][4]))
dataListBoxMajor.insert(END, (studentList[index][5]))
dataListBoxDate.insert(END, (studentList[index][6]))
#name = studentList[index][1] + ", " + studentList[index][0]
#dataListBox.insert(END, Data[x])
#formattedText = str(name + " " + studentList[index][2] + " " + studentList[index][3] + " " + studentList[index][4] + " " + studentList[index][5] + " " + studentList[index][6])
#dataListBox.insert(index, (formattedText))
insertDataRefresh()
goodUpdate_Label.config(text="Successfull Update!")
#Create Update Button
Update_button = Button(newWindow, text = 'Update Student', bg="goldenrod1", command=goodUpdate)
Update_button.grid(row = 7, column = 0, columnspan = 2, pady = 10, padx = 10, ipadx = 100)
# print(index)
return None
def openResults():
global studentList
global searchText
newWindow = Toplevel(root)
newWindow.title("Search Results")
newWindow.geometry("315x170")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=False, height=False) # Window size changeability
#Create Text Boxes
First_Name = Entry(newWindow, width = 30)
First_Name.grid(row = 0, column = 1, padx = 20, pady = (10, 0))
#First_Name.insert(0, studentList[x][0])
Last_Name = Entry(newWindow, width = 30)
Last_Name.grid(row = 1, column = 1, padx = 20)
#Last_Name.insert(0, studentList[x][1])
Student_ID = Entry(newWindow, width = 30)
Student_ID.grid(row = 2, column = 1, padx = 20)
#Student_ID.insert(0, studentList[x][2])
Email = Entry(newWindow, width = 30)
Email.grid(row = 3, column = 1, padx = 20)
#Email.insert(0, studentList[x][3])
Department = Entry(newWindow, width = 30)
Department.grid(row = 4, column = 1, padx = 20)
#Department.insert(0, studentList[x][4])
Major = Entry(newWindow, width = 30)
Major.grid(row = 5, column = 1, padx = 20)
#Major.insert(0, studentList[x][5])
Grad_Date = Entry(newWindow, width = 30)
Grad_Date.grid(row = 6, column = 1, padx = 20)
#Grad_Date.insert(0, studentList[x][6])
#Create Text Box Labels
First_Name_Label = Label(newWindow, text = 'First Name')
First_Name_Label.grid(row = 0, column = 0, pady = (10, 0))
Last_Name_Label = Label(newWindow, text = 'Last Name')
Last_Name_Label.grid(row = 1, column = 0)
Student_ID_Label = Label(newWindow, text = 'Student ID')
Student_ID_Label.grid(row = 2, column = 0)
Email_Label = Label(newWindow, text = 'Email')
Email_Label.grid(row = 3, column = 0)
Department_Label = Label(newWindow, text = 'Department')
Department_Label.grid(row = 4, column = 0)
Major_Label = Label(newWindow, text = 'Major')
Major_Label.grid(row = 5, column = 0)
Grad_Date_Label = Label(newWindow, text = 'Grad Date')
Grad_Date_Label.grid(row = 6, column = 0)
#index = dataListBox.get(0, END).index(searchText)
#print(index)
#if (index == "ERROR"):
# filterOptions().searchBox.insert(0, "STRING NOT FOUND")
# Gets location of current selection
#index = dataListBox.curselection()[0]
def addStudent():
global studentList
'''
First_Name
Last_Name
Student_ID
Email
Department
Major
Grad_Date
'''
newWindow = Toplevel(root)
newWindow.title("Add Student")
newWindow.geometry("365x230")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=False, height=False) # Window size changeability
#Create Text Boxes
First_Name = Entry(newWindow, width = 30)
First_Name.grid(row = 0, column = 1, padx = 20, pady = (10, 0))
First_Name.insert(0, "FIRSTNAME")
Last_Name = Entry(newWindow, width = 30)
Last_Name.grid(row = 1, column = 1, padx = 20)
Last_Name.insert(0, "LASTNAME")
Student_ID = Entry(newWindow, width = 30)
Student_ID.grid(row = 2, column = 1, padx = 20)
Student_ID.insert(0, "#####")
Email = Entry(newWindow, width = 30)
Email.grid(row = 3, column = 1, padx = 20)
Email.insert(0, "<EMAIL>")
Department = Entry(newWindow, width = 30)
Department.grid(row = 4, column = 1, padx = 20)
Department.insert(0, "Business")
Major = Entry(newWindow, width = 30)
Major.grid(row = 5, column = 1, padx = 20)
Major.insert(0, "Finance")
Grad_Date = Entry(newWindow, width = 30)
Grad_Date.grid(row = 6, column = 1, padx = 20)
Grad_Date.insert(0, "##/##/20##")
#Create Text Box Labels
First_Name_Label = Label(newWindow, text = 'First Name')
First_Name_Label.grid(row = 0, column = 0, pady = (10, 0))
Last_Name_Label = Label(newWindow, text = 'Last Name')
Last_Name_Label.grid(row = 1, column = 0)
Student_ID_Label = Label(newWindow, text = 'Student ID')
Student_ID_Label.grid(row = 2, column = 0)
Email_Label = Label(newWindow, text = 'Email')
Email_Label.grid(row = 3, column = 0)
Department_Label = Label(newWindow, text = 'Department')
Department_Label.grid(row = 4, column = 0)
Major_Label = Label(newWindow, text = 'Major')
Major_Label.grid(row = 5, column = 0)
Grad_Date_Label = Label(newWindow, text = 'Grad Date')
Grad_Date_Label.grid(row = 6, column = 0)
goodAdd_Label = Label(newWindow, text="* * *")
goodAdd_Label.grid(row = 8, columnspan=2)
def retrieve_input(entryBox):
input = entryBox.get()
return input
# Button disables after a successfull addition
def goodAdd():
global filePathCurrent
global studentList
## Add student
# Get entered text
firstName = retrieve_input(First_Name)
lastName = retrieve_input(Last_Name)
studentid = retrieve_input(Student_ID)
email_ = retrieve_input(Email)
department_ = retrieve_input(Department)
major_ = retrieve_input(Major)
gradDate = retrieve_input(Grad_Date)
# Store into the table
gatheredText = [firstName, lastName, studentid, email_, department_, major_, gradDate]
studentList.append(gatheredText)
name = lastName + ", " + firstName
dataListBox.insert(END, (name))
dataListBoxID.insert(END, (studentid))
dataListBoxEmail.insert(END, (email_))
dataListBoxDepartment.insert(END, (department_))
dataListBoxMajor.insert(END, (major_))
dataListBoxDate.insert(END, (gradDate))
# formattedText = (lastName + ", " + firstName + " " + studentid + " " + email_ + " " + department_ + " " + major_ + " " + gradDate)
# dataListBox.insert(END, (formattedText))
# Confirmation & disbale button
goodAdd_Label.config(text="Successfull Add!")
Add_button.config(state=DISABLED)
# Create Add Button
Add_button = Button(newWindow, text = 'Add Student to Database', bg="SeaGreen1", command=goodAdd)
Add_button.grid(row = 7, column = 0, columnspan = 2, pady = 10, padx = 10, ipadx = 100)
# print(index)
return None
"""
# for r in range (rows):
# for c in range (cols):
canvas = tk.Canvas(dataTableFrame, bg="white", width=700, height=500)
canvas.pack(fill="both", expand="yes")
canvas2 = tk.Canvas(canvas, width=700, height=500)
canvas2.pack(side="left")
labelData = tk.Label(canvas2, text=(df.to_string()), bg="white")
labelData.grid(row=rows, column=cols)
scrollbar = tk.Scrollbar(canvas, command=canvas.yview)
scrollbar.pack(side="right", fill="y")
canvas.configure(yscrollcommand=scrollbar.set)
"""
#######################################################################################################
#######################################################################################################
def bottomButtons():
# Label Frame
bottomButtonsFrame = tk.LabelFrame(
root, text="Database Options", pady=5, padx=5)
bottomButtonsFrame.pack(side="bottom", padx=10, pady=10, fill="x", expand="no")
# Buttons
button_refresh = tk.Button(bottomButtonsFrame, text="Refresh Table", bg="light sky blue", command=refreshTable)
button_refresh.pack(side='left', padx=5)
button_save = tk.Button(bottomButtonsFrame, text="Save Current Database", bg="pale green", command=saveFile)
button_save.pack(side='left', padx=5)
#button_emailStudent = tk.Button(bottomButtonsFrame, text="Email Student(s)", bg="CadetBlue1")
#button_emailStudent.pack(side='left', padx=5)
button_add = tk.Button(bottomButtonsFrame, text="Add Student", bg="SeaGreen1", command=addStudent)
button_add.pack(side='right', padx=5)
button_update = tk.Button(bottomButtonsFrame, text="Update Student", bg="goldenrod1", command=updateStudent) # DarkSeaGreen1
button_update.pack(side='right', padx=5)
button_delete = tk.Button(bottomButtonsFrame, text="Delete Student", bg="IndianRed1", command=deleteOne)
button_delete.pack(side='right', padx=5)
button_clearTable = tk.Button(bottomButtonsFrame, text="CLEAR Table", bg="yellow2", command=deleteAll)
button_clearTable.pack(side='right', padx=5)
def userGuide():
newWindow = Toplevel(root)
newWindow.title("About Studabase")
newWindow.geometry("500x500")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=True, height=True) # Window size changeability
about_Label = Label(newWindow, text = "STUDABASE (stoo-da-base) is a GUI style student database organizational software that allows its users to:" + '\n' + "Take data from a MySQL database and translate it to a GUI system." + '\n' + "Sort data by fields such as student ID, first and last name, email, department, etc." + '\n' + "Add and remove students as well as search for specific ones." + '\n' + "Restrict displayed data through various filters.")
about_Label.grid(row = 0, column = 0, pady = (10, 0))
def aboutStudabase():
newWindow = Toplevel(root)
newWindow.title("About Studabase")
newWindow.geometry("500x800")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=True, height=True) # Window size changeability
about_Label = Label(newWindow, text = "SRDG - STUDABASE: The Student Database (Stoo-da-base)" + '\n' + "<NAME> <EMAIL>" + '\n' + "<NAME> <EMAIL>" + '\n' + "<NAME> <EMAIL>" + '\n' + "<NAME> <EMAIL>")
about_Label.grid(row = 0, column = 0, pady = (10, 0))
def mainWindow():
# Root Options
root.title("STUDABASE: The Student Database ")
# Icon - .ico file should be in the same directory as this file.
root.iconbitmap('hat.ico')
# Window Size: root.geometry('500x600')
root.geometry('800x800')
# Stops windows size from being changeable
root.resizable(width=True, height=True)
# root.configure(bg = 'gray24')
# MENU BAR
menubar = tk.Menu(root)
# File - Menu Bar
fileMenu = tk.Menu(menubar, tearoff=0)
#fileMenu.add_command(label="New Database", command=menuFunctions.placeHolderFunc)
fileMenu.add_command(label="Open Database", command=insertData)
fileMenu.add_command(label="Save As...(Current Database)", command=saveFile)
fileMenu.add_separator()
#fileMenu.add_command(label="Properties...", command=menuFunctions.placeHolderFunc)
#fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=fileMenu)
# Edit - Menu Bar
editMenu = tk.Menu(menubar, tearoff=0)
editMenu.add_command(label="Refresh Database", command=refreshTable)
editMenu.add_separator()
#editMenu.add_command(label="Select All", command=menuFunctions.placeHolderFunc)
editMenu.add_separator()
editMenu.add_command(label="Add Student", command=addStudent)
editMenu.add_command(label="Delete Student(s)", command=deleteOne)
editMenu.add_separator()
menubar.add_cascade(label="Edit", menu=editMenu)
# View - Menu Bar
#viewMenu = tk.Menu(menubar, tearoff=0)
#viewMenu.add_command(label="Choice 1", command=menuFunctions.placeHolderFunc)
#viewMenu.add_command(label="Choice 2", command=menuFunctions.placeHolderFunc)
#viewMenu.add_separator()
#viewMenu.add_command(label="Choice 3", command=menuFunctions.placeHolderFunc)
#viewMenu.add_command(label="Choice 4", command=menuFunctions.placeHolderFunc)
#menubar.add_cascade(label="View", menu=viewMenu)
# Settings - Menu Bar
#settingsMenu = tk.Menu(menubar, tearoff=0)
# Change id & pass for current database
#settingsMenu.add_command(label="Database Settings", command=menuFunctions.placeHolderFunc)
# Change email platform/tool
#settingsMenu.add_command(label="Email Platform", command=menuFunctions.placeHolderFunc)
# Block changes - disables adding and deleting students (basically a read only mode)
#settingsMenu.add_command( label="View Only Mode", command=menuFunctions.placeHolderFunc)
# settingsMenu.add_separator()
#menubar.add_cascade(label="Settings", menu=settingsMenu)
# Help - Menu Bar
helpmenu = tk.Menu(menubar, tearoff=0)
# Display guide on how to use STUDABASE
helpmenu.add_command(label="User Guide", command=userGuide)
# Display info abut STUDABASE - Creators, when made, etc.
helpmenu.add_command(label="About STUDABASE", command=aboutStudabase)
# helpmenu.add_separator()
menubar.add_cascade(label="Help", menu=helpmenu)
filterOptions()
dataTable()
bottomButtons()
# Needed for Menu bar
root.config(menu=menubar)
# GUI program is constantly looping to check for changes - Loop created
root.mainloop() # THIS SHOULD BE THE LAST LINE
| 3.203125 | 3 |
2019/day15.py | dimkarakostas/advent-of-code | 2 | 12789403 | <filename>2019/day15.py
class Program:
def __init__(self, memory):
self.memory = memory
self.halted, self.outputs, self.idx, self.rel_base = False, [], 0, 0
def run(self, inputs):
used_inputs = 0
while self.idx < len(self.memory):
instruction = str(self.memory[self.idx])
opcode, modes = int(instruction[-2:]), instruction[:-2][::-1] + 3 * '0'
self.idx += 1
if opcode in (1, 2, 5, 6, 7, 8): # Two params
ins = []
for i in range(2):
param = self.memory[self.idx + i]
if modes[i] == '0':
if param > len(self.memory) - 1:
self.memory += [0 for _ in range(param)]
ins.append(self.memory[param])
elif modes[i] == '1':
ins.append(param)
elif modes[i] == '2':
if param + self.rel_base > len(self.memory) - 1:
self.memory += [0 for _ in range(param + self.rel_base)]
ins.append(self.memory[param + self.rel_base])
if opcode in (1, 2, 7, 8): # Two params, one output
assert modes[2] in ('0', '2'), 'Invalid write'
out = self.memory[self.idx + 2]
if modes[2] == '2':
out += self.rel_base
if out > len(self.memory) - 1:
self.memory += [0 for _ in range(out)]
if opcode in (1, 2):
self.memory[out] = ins[0] + ins[1] if opcode == 1 else ins[0] * ins[1]
else:
if opcode == 7:
self.memory[out] = 1 if ins[0] < ins[1] else 0
else:
self.memory[out] = 1 if ins[0] == ins[1] else 0
self.idx += 3
elif opcode in (5, 6): # Two params, no output
if any([
opcode == 5 and ins[0] != 0,
opcode == 6 and ins[0] == 0,
]):
self.idx = ins[1]
else:
self.idx += 2
elif opcode in (3, 4, 9): # Single param
param = self.memory[self.idx]
if param + self.rel_base > len(self.memory) - 1:
self.memory += [0 for _ in range(param + self.rel_base)]
if opcode == 3:
assert modes[0] in ('0', '2'), 'Invalid write'
if used_inputs < len(inputs):
inp = inputs[used_inputs]
used_inputs += 1
if modes[0] == '0':
self.memory[param] = inp
elif modes[0] == '2':
self.memory[param + self.rel_base] = inp
else:
self.idx -= 1
return used_inputs
elif opcode == 4: # Single input, one output
if modes[0] == '0':
out = self.memory[param]
elif modes[0] == '1':
out = param
elif modes[0] == '2':
out = self.memory[param + self.rel_base]
self.outputs.append(out)
elif opcode == 9:
if modes[0] == '0':
self.rel_base += self.memory[param]
elif modes[0] == '1':
self.rel_base += param
elif modes[0] == '2':
self.rel_base += self.memory[param + self.rel_base]
self.idx += 1
elif opcode in (99, ): # No param
self.halted = True
break
else:
assert False, 'Unknown opcode'
assert self.halted, 'Exit without halting'
class _Getch:
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
getch = _Getch()
program = [int(i) for i in open('input15').readlines()[0].split(',')]
p = Program(program[:])
grid_size = 70
grid = [['.' for _ in range(grid_size)] for _ in range(grid_size)]
d_x, d_y = grid_size / 2, grid_size / 2
init_x, init_y = d_x, d_y
o_x, o_y, found = -1, -1, False
# STEP 1: Explore grid
moves = []
while True:
grid[init_x][init_y] = 'o'
grid[d_x][d_y] = '@'
if found:
grid[o_x][o_y] = '$'
for ln in grid:
print ''.join(ln)
move = getch()
moves.append(move)
if move == 'a':
s_x, s_y = d_x, d_y - 1
inp = 1
elif move == 'd':
s_x, s_y = d_x, d_y + 1
inp = 2
elif move == 'w':
s_x, s_y = d_x - 1, d_y
inp = 3
elif move == 's':
s_x, s_y = d_x + 1, d_y
inp = 4
elif move == 'p':
break
p.run([inp])
if p.outputs[-1] == 0:
grid[s_x][s_y] = '#'
elif p.outputs[-1] in (1, 2):
grid[d_x][d_y] = ' '
d_x, d_y = s_x, s_y
if p.outputs[-1] == 2:
found = True
open('moves', 'w').write(''.join(moves))
# STEP 2: BFS to fill grid with oxygen and find shortest path
empty_cells = set()
for x, ln in enumerate(grid):
for y, c in enumerate(ln):
if c not in ('#', '.'):
empty_cells.add((x, y))
visited, horizon = set(), set()
horizon.add((o_x, o_y))
mins = 0
while len(visited) < len(empty_cells):
if ((init_x, init_y)) in horizon:
print 'Part 1:', mins
mins += 1
new_horizon = set()
for (c_x, c_y) in horizon:
visited.add((c_x, c_y))
for i in (-1, 1):
if grid[c_x + i][c_y] not in ('#', '.') and (c_x + i, c_y) not in visited.union(horizon):
new_horizon.add((c_x + i, c_y))
if grid[c_x][c_y + i] not in ('#', '.') and (c_x, c_y + i) not in visited.union(horizon):
new_horizon.add((c_x, c_y + i))
horizon = (horizon.union(new_horizon)).difference(visited)
print 'Part 2:', mins - 1
| 3.328125 | 3 |
bin/downloadData/coingecko_download.py | albertodiazz/forecastingApp | 0 | 12789404 | '''
Lo que hace este script es bajar la data de https://www.coingecko.com por medio de su API con ayuda del
data.JSON, el cual funge como una lista de las crypto que acutlamente tradeo, esa lista es actualizada
manualmente. En cuanto a los precios, no baja margenes de precio de entrada y cierre solo es data para una previzualisacion general
de las graficas
'''
from bin import pd,np,time,json,CoinGeckoAPI
from bin import constant as c
path = c.PATHCOINGECKO
#esto no lo borre por que no me afecta
save_file = open(path + '/cardano.txt','w')
#aqui esta la lista de monedas que tradeo
lista_cryptos = c.PATHJSON
def get_lista_json(_path_):
_lista_ = open(_path_,"r")
_data_ = json.load(_lista_)
api_call_crypto_name, save_name_file = [],[]
for i in _data_['crypto_trade'][0]:
save_name_file.append(i.lower())
api_call_crypto_name.append(_data_['crypto_trade'][0][i])
df = pd.DataFrame(columns=["name_file","crypto_api_name"])
df['name_file'] = save_name_file
df['crypto_api_name'] = api_call_crypto_name
return df
def api_gecko(path_,output_file_,_id_,name_save_file):
cg = CoinGeckoAPI()
sb = cg.ping()
status_gecko = False
status_gecko = True
resultado = cg.get_coin_market_chart_by_id(id=str(_id_),vs_currency='usd',days='365')
fecha = []
tiempo = []
precios = []
for i in range(len(resultado['prices'])):
fecha.append(time.strftime('%Y.%m.%d',time.localtime((resultado['prices'][i][0])/1000)))
tiempo.append(time.strftime('%H:%M:%S',time.localtime((resultado['prices'][i][0])/1000)))
precios.append(resultado['prices'][i][1])
data_f = np.array(fecha)
data_t = np.array(tiempo)
data_p = np.array(precios)
data_ = {'FECHA':data_f,'HORA':data_t,'PRECIO':data_p}
df = pd.DataFrame(data=data_)
df.to_csv(path_ + '/' + str(name_save_file) + '.csv')
return status_gecko
def startDownload():
data = get_lista_json(lista_cryptos)
try:
for index in range(len(data)):
name_file = data['name_file'].iloc[index]
id = data['crypto_api_name'].iloc[index]
print(str(id),str(name_file))
api_gecko(path,save_file,_id_=str(id),name_save_file=str(name_file))
print('<<<<<<< ' + str(id) + ' >>>>>>>>>')
except Exception as error:
print(error)
#status_gecko = api_gecko(path,save_file)
| 3.4375 | 3 |
screenpy/actions/select.py | pjbarbatsis/screenpy | 0 | 12789405 | from typing import Union
from selenium.webdriver.support.ui import Select as SelSelect
from ..actor import Actor
from ..pacing import beat, MINOR
from ..target import Target
class Select:
"""
Selects an option from a dropdown menu. This is a superclass that will
create the correct specific Select action that will need to be used,
depending on how the option needs to be selected. Some examples of
invocations:
Select.the_option_named("January").from_the(MONTH_DROPDOWN)
Select.the_option_at_index(0).from_the(MONTH_DROPDOWN)
Select.the_option_with_value("jan").from_the(MONTH_DROPDOWN)
It can then be passed along to the |Actor| to perform the action.
"""
@staticmethod
def the_option_named(text: str) -> "SelectByText":
"""
Instantiate a |SelectByText| class which will select the option
with the given text.
Args:
text (str): The text of the option to select.
Returns:
|SelectByText|
"""
return SelectByText(text)
@staticmethod
def the_option_at_index(index: Union[int, str]) -> "SelectByIndex":
"""
Instantiate a |SelectByIndex| class which will select the option
at the specified index. This index is 0-based.
Args:
index (Union[int, str]): The index (0-based) of the option to
select.
Returns:
|SelectByIndex|
"""
return SelectByIndex(index)
@staticmethod
def the_option_with_value(value: str) -> "SelectByValue":
"""
Instantiate a |SelectByText| class which will select the option
with the given text.
Args:
value (str): The text of the option to select.
Returns:
|SelectByText|
"""
return SelectByValue(value)
class SelectByText:
"""
A specialized Select action that chooses the option by text. This
class is meant to be accessed via the Select action's static
|Select.the_option_named| method. A typical invocation might look
like:
Select.the_option_named("January").from_the(MONTH_DROPDOWN)
It can then be passed along to the |Actor| to perform the action.
"""
def from_the(self, target: Target) -> "SelectByText":
"""
Provides the |Target| to select the option from.
Args:
target (Target): The |Target| describing the dropdown element
to select from
Returns:
|SelectByText|
"""
self.target = target
return self
def from_(self, target: Target) -> "SelectByText":
"""Syntactic sugar for |SelectByText.from_the|."""
return self.from_the(target)
@beat("{0} selects the option '{text}' from the {target}.", gravitas=MINOR)
def perform_as(self, the_actor: Actor) -> None:
"""
Asks the actor to attempt to find the dropdown element described
by the stored target, then performs the select action.
Args:
the_actor (Actor): The |Actor| who will perform the action.
Raises:
|UnableToPerformException|: if the actor does not have the
ability to |BrowseTheWeb|.
"""
element = self.target.found_by(the_actor)
select = SelSelect(element)
select.select_by_visible_text(self.text)
def __init__(self, text: str, target: Target = None) -> None:
self.target = target
self.text = text
class SelectByIndex:
"""
A specialized |Select| action that chooses the option by its index.
This class is meant to be accessed via the Select action's static
|Select.the_option_at_index| method. A typical invocation might look
like:
Select.the_option_at_index(0).from_the(MONTH_DROPDOWN)
It can then be passed along to the |Actor| to perform the action.
"""
def from_the(self, target: Target) -> "SelectByIndex":
"""
Provides the |Target| to select the option from.
Args:
target (Target): The |Target| describing the dropdown element
to select from
Returns:
|SelectByIndex|
"""
self.target = target
return self
def from_(self, target: Target) -> "SelectByIndex":
"""Syntactic sugar for |SelectByIndex.from_the|."""
return self.from_the(target)
@beat("{0} selects the option at index {index} from the {target}.", gravitas=MINOR)
def perform_as(self, the_actor: Actor) -> None:
"""
Asks the actor to attempt to find the dropdown element described
by the stored target, then performs the select action.
Args:
the_actor (Actor): The |Actor| who will perform the
action.
Raises:
|UnableToPerformException|: if the actor does not have the
ability to |BrowseTheWeb|.
"""
element = self.target.found_by(the_actor)
select = SelSelect(element)
select.select_by_index(self.index)
def __init__(self, index: Union[int, str], target: Target = None) -> None:
self.target = target
self.index = str(index)
class SelectByValue:
"""
A specialized Select action that chooses the option by its value. This
class is meant to be accessed via the Select action's static
|Select.the_option_with_value| method. A typical invocation might look
like:
Select.the_option_with_value("jan").from_the(MONTH_DROPDOWN)
It can then be passed along to the |Actor| to perform the action.
"""
def from_the(self, target: Target) -> "SelectByValue":
"""
Provides the |Target| to select the option from.
Args:
target (Target): The |Target| describing the dropdown element
to select from
Returns:
|SelectByValue|
"""
self.target = target
return self
def from_(self, target: Target) -> "SelectByValue":
"""Syntactic sugar for |SelectByValue.from_the|."""
return self.from_the(target)
@beat(
"{0} selects the option with value '{value}' from the {target}.", gravitas=MINOR
)
def perform_as(self, the_actor: Actor) -> None:
"""
Asks the actor to attempt to find the dropdown element described
by the stored target, then performs the select action.
Args:
the_actor (Actor): The |Actor| who will perform the action.
Raises:
|UnableToPerformException|: if the actor does not have the
ability to |BrowseTheWeb|.
"""
element = self.target.found_by(the_actor)
select = SelSelect(element)
select.select_by_value(self.value)
def __init__(self, value: Union[int, str], target: Target = None) -> None:
self.target = target
self.value = str(value)
| 3.5625 | 4 |
lec2-1.py | cutz-j/PyMC | 0 | 12789406 | import pymc as pm
import matplotlib.pyplot as plt
import numpy as np
plt.rc('font', family='Malgun Gothic')
lambda_ = pm.Exponential("poisson_param", 1)
data_generator = pm.Poisson("data_generater", lambda_)
data_plus_one = data_generator + 1
print(lambda_.children)
print(data_generator.parents)
# value
print(lambda_.value)
betas = pm.Uniform("betas", 0, 1, size=5)
betas.value
## random
ld1 = pm.Exponential("lambda_1", 1) # first 행동의 prior
ld2 = pm.Exponential("lambda_2", 1) # second 행동의 prior
tau = pm.DiscreteUniform("tau", lower=0, upper=10) # 행동 변화에 대한 prior
print("init")
print(ld1.value)
print(ld2.value)
print(tau.value)
print(ld1.random(), ld2.random(), tau.random())
print("random call")
print(ld1.value)
print(ld2.value)
print(tau.value)
n_data_points = 5
@pm.deterministic
def labmda_(tau=tau, lambda_1=ld1, lambda_2=ld2):
out = np.zeros(n_data_points)
out[:tau] = lambda_1
out[tau:] = lambda_2
return out
####################################################
#### 모델에 관측 포함 ####
figsize = (12.5, 4)
plt.figure(figsize=figsize)
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 300
samples = [ld1.random() for i in range(20000)]
plt.hist(samples, bins=70, normed=True, histtype="stepfilled")
plt.xlim(0, 8)
plt.show()
# 고정 밸류
data = np.array([10, 25, 15, 20, 35])
obs = pm.Poisson("obs", lambda_, value=data, observed=True)
obs.value
##################
##### 모델링 #####
tau = pm.rdiscrete_uniform(0, 80)
alpha = 1./20.
lambda_1, lambda_2 = pm.rexponential(alpha, 2)
lambda_ = np.r_[lambda_1*np.ones(tau), lambda_2*np.ones(80-tau)]
data = pm.rpoisson(lambda_)
plt.bar(np.arange(80), data, color="#348ABD")
plt.bar(tau-1, data[tau-1], color='r', label='행동변화')
plt.xlable("time")
plt.ylabel("message")
plt.xlim(0, 80)
plt.legend()
| 2.484375 | 2 |
dicom_review/conf/urls.py | chop-dbhi/django-dicom-review | 6 | 12789407 | import re
from django.conf.urls import url, patterns, include
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
from dicom_review.views import review
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', review),
url(r'^login/$', 'django.contrib.auth.views.login', name="login"),
# Administrative components
url(r'^admin/', include(admin.site.urls)),
)
# In production, these two locations must be served up statically
urlpatterns += patterns('django.views.static',
url(r'^%s(?P<path>.*)$' % re.escape(settings.MEDIA_URL.lstrip('/')), 'serve', {
'document_root': settings.MEDIA_ROOT
}),
url(r'^%s(?P<path>.*)$' % re.escape(settings.STATIC_URL.lstrip('/')), 'serve', {
'document_root': settings.STATIC_ROOT
}),
)
| 1.875 | 2 |
tests/common/test_op/ascend/fake_quant_with_min_max_args.py | tianjiashuo/akg | 286 | 12789408 | <reponame>tianjiashuo/akg<gh_stars>100-1000
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: fake_quant_with_min_max_args"""
import akg
from akg import tvm, topi
from akg.utils.format_transform import get_shape
import akg.utils as utils
from akg.ops.math.ascend import Floor
def nudge_min_max(min, max, num_bits, narrow_range):
"""
Calculate the maximum and minimum values of the quantization
Args:
min: scalar, input min
max: input max
num_bits: scalar
Defaults to 8. num_bits is the bitwidth of the quantization, range [2,16]
narrow_range: bool
Returns:
nudged_min, nudged_max, scale
"""
quant_max = (2**num_bits) - 1
if narrow_range is False:
quant_min = 0.00
else:
quant_min = 1.00
scale = (max - min) / (float(quant_max) - quant_min)
zero_point_from_min = quant_min - min / scale
# Calculate the maximum and minimum values of the quantization
if zero_point_from_min < quant_min:
nudged_zero_point = quant_min
elif zero_point_from_min > quant_max:
nudged_zero_point = quant_max
else:
nudged_zero_point = (zero_point_from_min + 0.5) // 1
nudged_min = (quant_min - nudged_zero_point) * scale
nudged_max = (quant_max - nudged_zero_point) * scale
return nudged_min, nudged_max, scale
@utils.check_input_type(tvm.tensor.Tensor,
(float, int, type(None)),
(float, int, type(None)),
(int, type(None)),
(bool, type(None)))
def fake_quant_with_min_max_args(input_data, min=-6, max=6, num_bits=8,
narrow_range=False):
"""
Computes Fake-quantize the 'input_data' tensor,
type float32 to 'output_data' tensor of same type
output_data = (floor(clamped_shifted * inv_nudged_scale + 0.5f))) * scale
+ nudged_min
scale = (max-min) / (quant_max-quant_min)
Args:
data_x1 (tvm.tensor.Tensor): Tensor of dtype "float32"
min ([float, int]): scalar, defaults to -6
max ([float, int]): scalar, defaults to 6. [min; max] define the
clamping range for the input_data data
num_bits ([float, int]): Defaults to 8. num_bits is the bitwidth
of the quantization,between 2 and 16
narrow_range ([bool]):
True, quantized into the quantization range [1; 2^num_bits - 1]
False,quantized into the quantization range [0; 2^num_bits - 1]
Returns:
tvm.tensor.Tensor
"""
shape = get_shape(input_data)
utils.check_shape(shape)
dtype = input_data.dtype
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT32)
nudged_min, nudged_max, scale = nudge_min_max(min, max, num_bits,
narrow_range)
zero_tensor = tvm.compute(input_data.shape,
lambda *i: tvm.const(0, dtype="float32"),
name="zero_tensor")
nudged_max_tensor = topi.add(zero_tensor, nudged_max)
nudged_min_tensor = topi.add(zero_tensor, nudged_min)
inv_nudged_scale = 1.00 / scale
# Transform the input between nudged_max and nudged_min
clamped_vmin = topi.minimum(input_data, nudged_max_tensor)
clamped = topi.maximum(clamped_vmin, nudged_min_tensor)
# Calculate the quantized and dequantized results
clamped_shifted = topi.subtract(clamped, nudged_min_tensor)
vmul_shifted = topi.multiply(clamped_shifted, inv_nudged_scale)
vadds_shifted = topi.add(vmul_shifted, 0.5)
floor_vadds_shifted = Floor(vadds_shifted)
floor_cast = akg.lang.ascend.cast_to(floor_vadds_shifted, dtype)
res_scale = topi.multiply(floor_cast, scale)
res = topi.add(res_scale, nudged_min_tensor)
return res
| 2.515625 | 3 |
medial_axis/__init__.py | dipaco/cpma | 2 | 12789409 | <filename>medial_axis/__init__.py<gh_stars>1-10
from .cpma import cpma, cpma_3d
| 1.164063 | 1 |
setup.py | bijulette/ParallelFDTD | 4 | 12789410 | <filename>setup.py<gh_stars>1-10
import setuptools
import os
import shutil
import subprocess
from setuptools import Extension
from setuptools.command.build_py import build_py
from setuptools.command.build_ext import build_ext
from setuptools.command.install_lib import install_lib
PACKAGE_NAME = 'pyParallelFDTD'
class CMakeExtension(Extension):
def __init__(self, name, sources=[], libraries=[]):
super().__init__(name, sources, libraries=libraries)
self.sourcedir = os.path.abspath(".")
class InstallCMakeLibs(install_lib):
def run(self):
self.distribution.data_files = []
# make sure the cmake command is run
self.run_command('build_ext')
self.run_command('build_py')
library_dir = self.distribution.cmake_build_dir
library_dir = os.path.join(library_dir, 'python')
build_dir = os.path.join(self.build_dir, PACKAGE_NAME)
print(build_dir)
print(library_dir)
# check files in the binary directory and copy any libraries
# This is a bit more general than we need, but it solves the problem
# of different file extensions in Unix and Windows
for filename in os.listdir(library_dir):
print(library_dir, filename)
# check if the file is a library
if (os.path.isfile(os.path.join(library_dir, filename)) and
os.path.splitext(filename)[1] in [".dll", ".so"]):
libname = os.path.join(library_dir, filename)
# build_dir seems to be where the package folder goes
dist_path = os.path.join(build_dir)
dist_name = os.path.join(dist_path, filename)
print(libname, dist_path, dist_name)
shutil.copy(libname, dist_path)
self.distribution.data_files.append(dist_name)
super().run()
def get_outputs(self):
return self.distribution.data_files
class CMakeBuild(build_ext):
def run(self):
# Check that Cmake is installed
try:
subprocess.check_output(["cmake", "--version"])
except (OSError, subprocess.SubprocessError):
raise RuntimeError(
"Cannot find Cmake. Please install Cmake before continuing."
)
# Run the build command. The build_ext class contains a list
# of buildable extentions. There should be only one, but
# run through the whole list for compatiblity
for ext in self.extensions:
self.build_extension(ext)
super().run()
def build_extension(self, ext):
cmake_config = ["-DBUILD_PYTHON=on"]
self.distribution.cmake_build_dir = self.build_temp
# Create the build directory if necessary
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Run the Cmake command
subprocess.check_call(
["cmake", f"-H{ext.sourcedir}", f"-B{self.build_temp}"] +
cmake_config
)
# Build and install
subprocess.check_call(
["cmake", "--build", self.build_temp, '-t', 'libPyFDTD', '--']
)
#subprocess.check_call(
# ["cmake", "--install", self.build_temp]
#)
setuptools.setup(
name=PACKAGE_NAME,
version='0.0',
description="Python bindings to ParallelFDTD",
url="https://github.com/AaltoRSE/ParallelFDTD",
license="MIT License",
packages=[
PACKAGE_NAME,
],
package_dir={
PACKAGE_NAME: 'dist/libPyFDTD',
},
package_data={'pyParallelFDTD': ['liblibPyFDTD.so']},
ext_modules=[
CMakeExtension(PACKAGE_NAME+".liblibPyFDTD", libraries='liblibPyFDTD.so'),
],
cmdclass={
'build_ext': CMakeBuild,
'install_lib': InstallCMakeLibs,
},
)
| 2.03125 | 2 |
PythonProjects/TinderLike/main.py | carlosmpr/CodePortafolio | 0 | 12789411 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
chrome_driver_location = "E:\Development\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_location)
driver.get("https://tinder.com/app/recs")
base_window = driver.window_handles[0]
login = driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div/div/header/div/div[2]/div[2]/button')
login.click()
time.sleep(5)
google = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div[1]/div/div[3]/span/div[1]/div/button')
google.click()
time.sleep(5)
google_login_window = driver.window_handles[1]
driver.switch_to.window(google_login_window)
google_email = driver.find_element_by_xpath('/html/body/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div[1]/div/form/span/section/div/div/div[1]/div/div[1]/div/div[1]/input')
google_email.send_keys("<EMAIL>")
next_button_username = driver.find_element_by_xpath('//*[@id="identifierNext"]/div/button')
next_button_username.click()
time.sleep(5)
google_password = driver.find_element_by_xpath('//*[@id="password"]/div[1]/div/div[1]/input')
google_password.send_keys("<PASSWORD>")
next_button_password = driver.find_element_by_xpath('//*[@id="passwordNext"]/div/button')
next_button_password.click()
time.sleep(5)
driver.switch_to.window(base_window)
allow_location_button = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[1]')
allow_location_button.click()
not_interested = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[2]')
not_interested.click()
cookies = driver.find_element_by_xpath('//*[@id="content"]/div/div[2]/div/div/div[1]/button')
cookies.click()
time.sleep(5)
body = driver.find_element_by_xpath('//*[@id="Tinder"]/body')
for i in range(0, 101):
time.sleep(5)
body.send_keys(Keys.ARROW_RIGHT)
| 2.765625 | 3 |
analytics/object/language.py | dpatel257/Smart-City-Sample | 126 | 12789412 | #!/usr/bin/python3
text={
"object-detection": "object-detection",
"svcq-counting": "svcq-counting",
}
| 1.046875 | 1 |
src/bot/core.py | mdcg/youtube-telegram-webhook | 1 | 12789413 | import sys
from src.bot import logger
from src.bot.messages import (
AVAILABLE_COMMANDS_MESSAGE,
GREETINGS_MESSAGE,
NON_INFORMED_CHANNEL_ID_MESSAGE,
SUBSCRIPTION_ERROR_MESSAGE,
SUBSCRIPTION_MESSAGE,
UNKNOWN_MESSAGE,
)
from src.bot.requester import subscribe_in_pubsubhubbub
from src.database.utils import save_channel, save_user, subscribe_user
from src.settings import TELEGRAM_TOKEN
from telegram.ext import (
CommandHandler,
Filters,
MessageHandler,
Updater,
CallbackContext,
)
from telegram import Update
def start_command(update: Update, context: CallbackContext):
"""As soon as the bot is started, the first command that by default the
user sends to it is '/start'. Here we define what will be answered,
which in this case, is a customized message with the name of the user
in question informing how he can interact with the bot.
Parameters
----------
update : Update
This object represents an incoming update.
context : CallbackContext
This is a context object passed to the callback called by
telegram.ext.Handler.
"""
context.bot.send_message(
chat_id=update.effective_chat.id,
text=GREETINGS_MESSAGE.format(update.effective_chat.username),
)
return None
def help_command(update: Update, context: CallbackContext) -> None:
"""To assist the user in teaching how he will use the bot, we have
specified this function that will give all the necessary
instructions to him.
Parameters
----------
update : Update
This object represents an incoming update.
context : CallbackContext
This is a context object passed to the callback called by
telegram.ext.Handler.
"""
context.bot.send_message(
chat_id=update.effective_chat.id, text=AVAILABLE_COMMANDS_MESSAGE
)
return None
def unknown_command(update: Update, context: CallbackContext) -> None:
"""Some users can write commands that are not handled by the bot. In order
not to make him anxious without knowing if something went right or not,
any command that is not mapped by the service will be answered with a
redirect to him using the command '/help'
Parameters
----------
update : Update
This object represents an incoming update.
context : CallbackContext
This is a context object passed to the callback called by
telegram.ext.Handler.
"""
context.bot.send_message(
chat_id=update.effective_chat.id, text=UNKNOWN_MESSAGE
)
return None
def subscribe_command(update: Update, context: CallbackContext) -> None:
"""This function is our "flagship". Basically this is where the user will
be able to subscribe to a channel to receive notifications for new videos.
Parameters
----------
update : Update
This object represents an incoming update.
context : CallbackContext
This is a context object passed to the callback called by
telegram.ext.Handler.
"""
try:
channel_id = context.args[0]
except IndexError:
context.bot.send_message(
chat_id=update.effective_chat.id,
text=NON_INFORMED_CHANNEL_ID_MESSAGE,
)
return None
logger.info("Channel subscription requested. Initializing processing.")
chat_id = update.effective_chat.id
save_user(chat_id)
status = subscribe_in_pubsubhubbub(channel_id)
if status == 202:
logger.info("Request sent successfully.")
save_channel(channel_id)
subscribe_user(channel_id, chat_id)
context.bot.send_message(
chat_id=update.effective_chat.id, text=SUBSCRIPTION_MESSAGE
)
else:
logger.warning(
f"There was a problem sending your subscribe request. Status Code received: {status}"
)
context.bot.send_message(
chat_id=update.effective_chat.id, text=SUBSCRIPTION_ERROR_MESSAGE
)
return None
def main() -> None:
"""This is where the bot will actually start and handle requests with
Telegram users.
"""
updater = Updater(token=TELEGRAM_TOKEN)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start_command))
dispatcher.add_handler(CommandHandler("help", help_command))
dispatcher.add_handler(CommandHandler("subscribe", subscribe_command))
dispatcher.add_handler(MessageHandler(Filters.command, unknown_command))
updater.start_polling()
updater.idle()
return None
if __name__ == "__main__":
logger.info("Initializing bot...")
try:
main()
except (KeyboardInterrupt, SystemExit):
logger.info("Stopping bot...")
sys.exit(0)
| 2.484375 | 2 |
benchmark/citation/patgcn.py | ecom-research/pytorch_geometric | 1 | 12789414 | <filename>benchmark/citation/patgcn.py<gh_stars>1-10
import argparse
import torch
from torch import nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import torch_sparse
import MinkowskiEngine as ME
from datasets import get_planetoid_dataset
from train_eval import run, random_planetoid_splits
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cora')
parser.add_argument('--random_splits', type=bool, default=False)
parser.add_argument('--runs', type=int, default=100)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--weight_decay', type=float, default=0.0005)
parser.add_argument('--early_stopping', type=int, default=10)
parser.add_argument('--hidden', type=int, default=16)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--normalize_features', type=bool, default=True)
args = parser.parse_args()
class ConvAutoencoder(nn.Module):
def __init__(self, in_channel, out_channel, D):
super(ConvAutoencoder, self).__init__()
self.conv1 = ME.MinkowskiConvolution(in_channel, 16, 3, dimension=D)
self.conv2 = ME.MinkowskiConvolution(16, 4, 3, dimension=D)
self.pool = ME.MinkowskiMaxPooling(2, 2, dimension=D)
self.t_conv1 = ME.MinkowskiConvolutionTranspose(4, 16, 2, stride=2, dimension=D)
self.t_conv2 = ME.MinkowskiConvolutionTranspose(16, out_channel, 2, stride=2, dimension=D)
self.relu = ME.MinkowskiReLU()
self.sigmoid = ME.MinkowskiSigmoid()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool(x)
x = self.relu(self.conv2(x))
x = self.pool(x)
x = self.relu(self.t_conv1(x))
x = self.sigmoid(self.t_conv2(x))
return x
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.conv1.weight)
torch.nn.init.xavier_normal_(self.conv2.weight)
torch.nn.init.xavier_normal_(self.t_conv1.weight)
torch.nn.init.xavier_normal_(self.t_conv2.weight)
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool(x)
x = self.relu(self.conv2(x))
x = self.pool(x)
x = self.relu(self.t_conv1(x))
x = self.sigmoid(self.t_conv2(x))
return x
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.conv1.weight)
torch.nn.init.xavier_normal_(self.conv2.weight)
torch.nn.init.xavier_normal_(self.t_conv1.weight)
torch.nn.init.xavier_normal_(self.t_conv2.weight)
class Net(torch.nn.Module):
def __init__(self, dataset):
super(Net, self).__init__()
self.size = dataset.data.x.shape[0]
self.att_block = ConvAutoencoder(2, 2, D=1)
self.conv1 = GCNConv(dataset.num_features, args.hidden)
self.conv2 = GCNConv(args.hidden, dataset.num_classes)
def reset_parameters(self):
self.att_block.reset_parameters()
self.conv1.reset_parameters()
self.conv2.reset_parameters()
def path_transform(self, meta_path_edge_indicis, att_block):
device = meta_path_edge_indicis[0].device
coords = meta_path_edge_indicis[0].T
feats = torch.ones((meta_path_edge_indicis[0].shape[1], 2), device=device)
minkow_meta_path_sparse_mat = ME.SparseTensor(coords=coords, feats=feats)
att = att_block(minkow_meta_path_sparse_mat)
minkow_sparse_attended_adjs = minkow_meta_path_sparse_mat * att
coords, feats = minkow_sparse_attended_adjs.coords, minkow_sparse_attended_adjs.feats
sparse_attended_adjs = []
for feat_idx in range(feats.shape[1]):
idx = feats[:, feat_idx] != 0
sparse_attended_adjs.append(torch_sparse.coalesce(coords[idx].long().T, feats[idx, feat_idx], m=3, n=2, op='mean'))
sparse_adapted_adj = torch_sparse.eye(self.size, device=device)
for sparse_attended_adj in sparse_attended_adjs:
sparse_adapted_adj = torch_sparse.spspmm(*sparse_attended_adj, *sparse_attended_adj, m=3, k=3, n=2)
index, val = sparse_adapted_adj
return index, val / len(meta_path_edge_indicis)
def forward(self, data):
x, edge_index = data.x, data.edge_index
meta_path = [data.edge_index, data.edge_index]
meta_edge_index, edge_weight = self.path_transform(meta_path, self.att_block)
sparse_meta_adj_mat = torch.sparse.FloatTensor(
meta_edge_index,
edge_weight,
torch.Size([self.size, self.size])
)
sparse_adj_mat = torch.sparse.FloatTensor(
edge_index,
torch.ones(edge_index.shape[1], device=sparse_meta_adj_mat.device),
torch.Size([x.shape[0], x.shape[0]])
)
sparse_mean_adj_mat = (sparse_meta_adj_mat + sparse_adj_mat) / 2
sparse_mean_adj_mat = sparse_mean_adj_mat.coalesce()
adp_adj_edge_index, adp_adj_edge_weight = sparse_mean_adj_mat.indices(), sparse_mean_adj_mat.values()
x = F.relu(self.conv1(x, adp_adj_edge_index, edge_weight=adp_adj_edge_weight))
x = F.dropout(x, p=args.dropout, training=self.training)
x = self.conv2(x, adp_adj_edge_index, edge_weight=adp_adj_edge_weight)
# x = F.relu(self.conv1(x, edge_index))
# x = F.dropout(x, p=args.dropout, training=self.training)
# x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
dataset = get_planetoid_dataset(args.dataset, args.normalize_features)
Net(dataset)(dataset.data)
permute_masks = random_planetoid_splits if args.random_splits else None
run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,
args.early_stopping, permute_masks)
| 2.109375 | 2 |
common/HtmlUtils.py | loveflycforever/TypeY | 0 | 12789415 | import datetime
import os
import certifi
import urllib3
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
# 写入文件
def write_file(file_path, text_content):
with open(file_path, 'w', encoding='utf-8') as f:
f.write(datetime.datetime.now().strftime('<!-- [store time] %Y-%m-%d %H:%M:%S.%f -->\n'))
f.write(text_content)
# 读出文件
def read_file(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
file_content = f.read()
return file_content
# 检查文件
def check_file(file_path, file_size=0):
return os.path.exists(file_path) and os.path.isfile(file_path) and os.path.getsize(file_path) / 1024 > file_size
# 组装文件名
def __make_up__(directory, name=None):
if name:
path = '%s%s.html' % (directory, name)
else:
path = '%s%s.html' % (directory, datetime.datetime.now().strftime('%Y-%m-%d-%H'))
return path
# 浏览器打开地址
def browser_html(html_uri, storage_directory=None, file_name=None):
if storage_directory and file_name:
file_path = __make_up__(storage_directory, file_name)
if check_file(file_path):
store_html = read_file(file_path)
else:
store_html = __browser__(html_uri)
write_file(file_path, store_html)
else:
store_html = __browser__(html_uri)
return store_html
# 模拟浏览器访问
def __browser__(uri):
options = Options()
options.set_headless()
browser = webdriver.Firefox(options=options)
browser.maximize_window()
browser.get(uri)
html_content = browser.page_source
browser.quit()
return html_content
# 请求打开地址
def request_html(html_uri, need_https=True, storage_directory=None, file_name=None):
if storage_directory and file_name:
file_path = __make_up__(storage_directory, file_name)
if check_file(file_path):
store_html = read_file(file_path)
else:
store_html = __request__(html_uri, need_https)
write_file(file_path, store_html)
else:
store_html = __request__(html_uri, need_https)
return store_html
# 请求地址访问
def __request__(uri, need_https=True):
if need_https:
html_http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
else:
html_http = urllib3.PoolManager()
html_response = html_http.request('GET', uri)
html_content = html_response.data.decode()
return html_content
| 2.796875 | 3 |
coursera.py | ChutianShen/pointnet_kitti | 0 | 12789416 | <filename>coursera.py
import json
db = []
query_content_dict_index = {}
query_content_dict_index['id'] = 0
query_content_dict_index['last'] = 1
query_content_dict_index['first'] = 2
query_content_dict_index['location'] = 3
query_content_dict_index['active'] = 4
location_dict_index = {}
location_dict_index['city'] = 0
location_dict_index['state'] = 1
location_dict_index['postalCode'] = 3
def jsonQuery(order, content): # order is add, get or delete; content is the query
if order is 'add':
store_json(content)
elif order is 'get':
return return_json(content)
elif order is 'delete':
delete_json(content)
else:
return 'Wrong order!!!'
def store_json(content):
db.append(content)
def return_json(content):
res = []
for entry in db:
if match(entry, content):
res.append(entry)
return res
def delete_json(content):
for entry in db:
if match(entry, content):
db.remove(entry)
def match(entry, content):
for condition in content:
key = condition.keys()[0] # .keys()[0] get its key
if key is "location":
#print key
for location_condition in condition[key]:
#print location_condition
location_condition_key = location_condition.keys()[0]
print location_condition_key
print location_dict_index[location_condition_key]
print entry[3]["location"][location_dict_index[location_condition_key]][location_condition_key]
if entry[3]["location"][location_dict_index[location_condition_key]][location_condition_key] != location_condition[location_condition_key]:
return False
else:
if entry[query_content_dict_index[key]][key] != condition[key]:
return False
return True
entry1 = [
{"id": 1}, {"last": "Doe"}, {"first": "John"},
{"location": [{"city": "Oakland"}, {"state": "CA"}, {"postalCode": "94607"}]},
{"active": True}
]
query1 = [{"id": 1}]
query2 = [{"id": 2}]
query3 = [{"location": [{"city": "Oakland"}]}]
query4 = [{"location": [{"city": "LA"}]}]
def run():
jsonQuery("add", entry1)
for entry in db:
print entry
# print match(db[0], query1)
#
# print match(db[0], query3)
#
# print jsonQuery("get", query1)
#
# print jsonQuery("get", query2)
print match(db[0], query3)
print match(db[0], query4)
run() | 3.78125 | 4 |
pomito/plugins/task/trello.py | codito/pomito | 1 | 12789417 | # -*- coding: utf-8 -*-
"""Trello plugin for pomito."""
import logging
from trello import TrelloClient
from pomito.plugins import task
from pomito.task import Task
__all__ = ['TrelloTask']
logger = logging.getLogger('pomito.plugins.task.trello')
def _create_trello_client(api_key, api_secret):
"""Create default TrelloClient instance."""
return TrelloClient(api_key=api_key, api_secret=api_secret)
class TrelloTask(task.TaskPlugin):
"""Trello task plugin for pomito."""
def __init__(self, pomodoro_service, get_trello_api=_create_trello_client):
"""Create an instance of TrelloTask."""
if pomodoro_service is None:
raise ValueError("pomodoro_service must not be None.")
self._get_trello_client = get_trello_api
self._pomodoro_service = pomodoro_service
self.trello_api = None
self.trello_board = None
self.trello_list = None
def initialize(self):
"""Initialize the trello task plugin."""
def _get_config(config):
return self._pomodoro_service.get_config("task.trello", config)
api_key = _get_config("api_key")
api_secret = _get_config("api_secret")
self.trello_board = _get_config("board")
self.trello_list = _get_config("list")
self.trello_api = self._get_trello_client(api_key, api_secret)
if api_key is None or api_secret is None\
or self.trello_board is None or self.trello_list is None:
logger.error("Error initializing plugin: invalid configuration")
def get_tasks(self):
"""Get all incomplete tasks assigned to the user."""
# TODO support for dueDates
try:
def create_task(card):
"""Create a `Task` object from a trello dict."""
return Task(uid=card.id,
estimate=0,
actual=0,
tags=card.labels,
description=card.name)
for b in self.trello_api.list_boards():
if self.trello_board is not None and b.name != self.trello_board:
continue
if self.trello_list is not None:
lists = [lo for lo in b.list_lists() if lo.name == self.trello_list]
else:
lists = b.list_lists()
for l in lists:
yield from map(create_task, l.list_cards())
except AttributeError as attrib_error:
logger.error("Error getting tasklist: {0}".format(attrib_error))
| 2.5 | 2 |
binding.gyp | royalpinto/node-cares | 10 | 12789418 | <reponame>royalpinto/node-cares
{
"targets": [{
"variables": {
# Define `gcc_version` if it's not defined already
# as it is getting used below.
"gcc_version%": "unknown",
},
"target_name": "cares_wrap",
"include_dirs": [
"<!(node -e \"require('nan')\")",
"deps/cares/include",
"deps/cares/src",
"deps/utils"
],
"sources": [
"src/cares_wrap.cc"
],
"dependencies": [ "deps/cares/cares.gyp:cares" ],
# Exclude `-fno-tree-sink` by default as some older compiler versions
# does not support this flag.
# This flag is conditionally getting added again below.
'cflags!': [ '-fno-tree-sink' ],
"conditions": [
["OS!='win'", {
"libraries": [ "-Wl,-rpath,<!(pwd)/build/Release/" ],
}
],
# Conditionally add `-fno-tree-sink` only for supported versions.
['clang == 0 and gcc_version <= 44', {
'cflags': [ '-fno-tree-sink' ], # Work around compiler bug.
}],
]
}]
}
| 1.398438 | 1 |
Source/RenderPasses/TemporalDelayPass/Testing/testTemporalDelayPass.py | jeongsoopark/Falcor | 2 | 12789419 | <reponame>jeongsoopark/Falcor
def test_temporal_delay():
imageLoader = RenderPass("ImageLoader", {'filename': 'smoke-puff.png', 'mips': False, 'srgb': True})
depthPass = RenderPass("DepthPass")
forwardLightingPass = RenderPass("ForwardLightingPass")
temporalDelayPass = RenderPass("TemporalDelayPass", {"delay": 16})
graph = RenderGraph("Temporal Delay Graph")
graph.addPass(imageLoader, "ImageLoader")
graph.addPass(depthPass, "DepthPass")
graph.addPass(forwardLightingPass, "ForwardLightingPass")
graph.addPass(temporalDelayPass, "TemporalDelayPass")
graph.addEdge("ImageLoader.dst", "ForwardLightingPass.color")
graph.addEdge("DepthPass.depth", "ForwardLightingPass.depth")
graph.addEdge("ForwardLightingPass.color", "TemporalDelayPass.src")
graph.markOutput("TemporalDelayPass.maxDelay")
return graph
temporal_delay_graph = test_temporal_delay()
m.addGraph(temporal_delay_graph)
| 2.328125 | 2 |
Leetcode/Python/_1534.py | Xrenya/algorithms | 1 | 12789420 | class Solution:
def countGoodTriplets(self, arr: List[int], a: int, b: int, c: int) -> int:
# Brute force solution
counter = 0
lenght = len(arr)
for i in range(lenght-2):
for j in range(lenght-1):
for k in range(lenght):
if i<j and j<k and k<lenght:
if abs(arr[i]-arr[j])<=a and abs(arr[j]-arr[k])<=b and abs(arr[i]-arr[k])<=c:
print(arr[i], arr[j], arr[k])
counter += 1
else:
pass
return counter
class Solution:
def countGoodTriplets(self, arr: List[int], a: int, b: int, c: int) -> int:
size = len(arr)
counter = 0
for i in range(size-2):
for j in range(i+1, size-1):
for k in range(j+1, size):
ok_a = abs(arr[i] - arr[j]) <= a
ok_b = abs(arr[j] - arr[k]) <= b
ok_c = abs(arr[i] - arr[k]) <= c
if all((ok_a, ok_b, ok_c)):
counter += 1
return counter
| 3 | 3 |
nadl/core/tensor.py | heytanay/nadl | 13 | 12789421 | <filename>nadl/core/tensor.py
import numpy as np
from ..core.ops import HiddenOps
from ..other.utils import Utils
class Tensor:
"""
Core Tensor Class
This class will be responsible for all the heavy lifting.
"""
def __init__(self, data: np.ndarray, requires_grad: bool=True, _children: tuple=(), _op: str=''):
if not isinstance(data, np.ndarray):
raise TypeError("Only Numpy arrays are supported for now.")
self.data = data
self.requires_grad = requires_grad
self.grad = np.zeros_like(self.data)
self._prev = set(_children)
self._op = _op
def experimental_set_data(self, new_data):
"""
Explicitly change the data of an already initialized Tensor.
This will reset all the gradients back to 0
"""
self.data = new_data
self.grad = np.zeros_like(self.data)
@property
def shape(self):
"""
Returns the shape of the Tensor
"""
return self.data.shape
@property
def dtype(self):
"""
Returns the data of the Tensor
"""
return self.data.dtype
@property
def numpy(self):
"""
Returns the data of the tensor in a numpy array.
Use this method to retrieve the data
"""
return np.array(self.data)
@classmethod
def ones_like(cls, tensor):
"""
Returns a Tensor full of Ones with the same shape as the provided tensor
Just like np.ones_like(...) function
"""
return cls(data=np.ones(shape=tensor.shape, dtype=tensor.dtype))
@classmethod
def zeros_like(cls, tensor):
"""
Returns a Tensor full of Zeros with the same shape as the provided tensor
Just like np.zeros_like(...) function
"""
return cls(data=np.zeros(shape=tensor.shape, dtype=tensor.dtype))
@classmethod
def random_like(cls, tensor):
"""
Returns a Tensor full of Random Numbers with the same shape as the provided tensor
Just like np.ones_like(...) function but instead of ones, it generates random numbers
"""
return cls(data=np.random.rand(*tensor.shape))
def zero_grad(self):
"""
Zeros the currently gradients of the tensor.
"""
self.grad = np.zeros_like(self.data)
def __repr__(self):
"""
Returns the string representation of a Tensor
"""
return f"Tensor<shape={self.shape}, dtype={self.dtype}>"
def __add__(self, tensor):
"""
Overloaded function for "add" operator.
Use na_ops.add() instead
"""
if not isinstance(tensor, Tensor):
tensor = Tensor(data=tensor)
output = Tensor(data=self.data+tensor.data, _children=(self, tensor), _op='+')
def _backward():
__grad_check = Utils.checkGradDep(self, tensor)
if not __grad_check: raise RuntimeError("Cannot perform backward propagation on a Static Tensor")
self.grad += output.grad
tensor.grad += output.grad
output._backward = _backward
return output
def __mul__(self, scalar):
"""
Multiplication using the " * " operator is only supported for a Tensor and a scalar value
To multiply a Tensor with a Tensor, use the "tensor1.dot(tensor2)" method.
Use na_ops.smul()
"""
assert isinstance(scalar, (int, float, bool)), "Only multiplication with a scalar value is supported using '*' operator.\nFor Multiplication with a vector, use the '.dot()' function."
output = Tensor(data=self.data * scalar, _children=(self,), _op='*')
return output
def __neg__(self):
"""
Multiplies every element in the Tensor with -1
More fancy term: "Inverts all values in a Tensor"
"""
return self * -1
def __div__(self, tensor):
raise NotImplementedError("Division Operation is currently not implemented.")
def __pow__(self, scalar):
"""
Only raise to scalar powers
"""
output = HiddenOps.power(tensor=self, power=scalar, TensorDataTypeWrapper=Tensor)
return output
def __sub__(self, tensor):
"""
Subtraction between 2 tensors
"""
output = HiddenOps.subtract(tensor1=self, tensor2=tensor, TensorDataTypeWrapper=Tensor)
return output
def relu(self):
"""
Upper-level abstraction for ReLU
Use na_ops.activations.relu() instead
"""
output = HiddenOps.relu(tensor1=self, TensorDataTypeWrapper=Tensor)
return output
def matmul(self, tensor):
"""
Upper-level abstraction for the matrix multiplication function
Use na_ops.matmul() instead
"""
output = HiddenOps.matmul(tensor1=self, tensor2=tensor, TensorDataTypeWrapper=Tensor)
return output
def sum(self):
"""
Upper-level abstraction for Tensor sum function
"""
output = HiddenOps.tensor_sum(tensor=self, TensorDataTypeWrapper=Tensor)
return output
def backward(self):
"""
This function will perform the backward propagation
Recursively visit all the nodes in the graph and then call the backward function on
the nodes.
Topological Sort.
"""
topology = []
visited = set()
def build_topo(v):
if v not in visited:
visited.add(v)
for child in v._prev:
build_topo(v)
topology.append(v)
build_topo(self)
self.grad = np.ones_like(self.data)
for v in reversed(topology):
v._backward() | 2.578125 | 3 |
041.py | joserc87/project-euler | 0 | 12789422 | <filename>041.py
"""
Project Euler Problem 41
========================
We shall say that an n-digit number is pandigital if it makes use of all
the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital
and is also prime.
What is the largest n-digit pandigital prime that exists?
"""
# THOUGHTS:
#
# There is no 9-digit pandigital prime, because 1+2+3+..+9 = 45 and 45%3 = 0,
# so all the 9-digit pandigitals are also divisible by 3
# 1+2+..+8 = 36 and 36 % 3 = 0
# 1+2+..+7
from itertools import permutations
from util import PrimeFactory
def main():
factory = PrimeFactory()
for num_digits in range(7, 1, -1):
for num_list in permutations(range(num_digits, 0, -1)):
num = int(''.join(map(str, num_list)))
if factory.is_prime(num):
return num
return 'No pandigital prime found??'
if __name__ == "__main__":
print(main())
| 4.15625 | 4 |
src/main.py | Will2065/uestc-temperature | 0 | 12789423 | <reponame>Will2065/uestc-temperature
import os
import time
from reporter import Reporter
if __name__ == "__main__":
print(f'[{time.strftime("%F %H:%M:%S")}]', end=" ")
cookies = os.environ.get("COOKIES")
if cookies == None:
raise Exception("session id not provided")
else:
cookies = cookies.split("#")
results = []
for index, cookie in enumerate(cookies):
reporter = Reporter(cookie)
result, message = reporter.run()
results.append(result)
print(f"Student {index+1}: {message}")
if not all(results):
exit(-1)
| 2.53125 | 3 |
rollbar/contrib/django/tests.py | arthurio/pyrollbar | 177 | 12789424 | """
Unit tests
"""
from django.test import TestCase
from django.conf import settings
class BasicTests(TestCase):
def test_configuration(self):
"""
Test that the configuration is sane.
"""
self.assertTrue('ROLLBAR' in dir(settings),
msg='The ROLLBAR setting is not present.')
self.assertTrue(settings.ROLLBAR.get('access_token'),
msg='The ROLLBAR["access_token"] setting is blank.')
| 2.6875 | 3 |
util.py | tamamiyasita/Roguelike-Tutorial-2020 | 0 | 12789425 | <reponame>tamamiyasita/Roguelike-Tutorial-2020<filename>util.py
import arcade
from random import randint
from constants import *
# from data import IMAGE_ID
from functools import wraps
import time
def exp_calc(x=58, max_level=50, ratio=1.2, constant=40):
result = {}
result[1] = 0
for i in range(2,max_level+1):
result[i] = int(x)
x = x * ratio + constant
return result
print(exp_calc())
def Bresenham(start, end):
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
# Determine how steep the line is
is_steep = abs(dy) > abs(dx)
# Rotate line
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
# Swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
# Recalculate differentials
dx = x2 - x1
dy = y2 - y1
# Calculate error
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
# Iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
# coord = grid_to_pixel(coord)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
# Reverse the list if the coordinates were swapped
if swapped:
points.reverse()
return points
def grid_to_pixel(x, y):
"""tilepositionからsprite_sizeに変換する"""
px = x * SPRITE_SIZE * SPRITE_SCALE + SPRITE_SIZE / 2 * SPRITE_SCALE
py = (y * SPRITE_SIZE * SPRITE_SCALE + SPRITE_SIZE /
2 * SPRITE_SCALE) + STATES_PANEL_HEIGHT
return px, py
def pixel_to_grid(x, y):
"""sprite_sizeからtile_pixel_to_gridへの位置を指定する"""
px = x - SPRITE_SIZE / 2 * SPRITE_SCALE
px = round(px / GRID_SIZE)
py = y - SPRITE_SIZE / 2 * SPRITE_SCALE - STATES_PANEL_HEIGHT
py = round(py / GRID_SIZE)
return px, py
def dice(D, max_d, plus):
return D * randint(1, int(max_d)) + plus
def get_entity(x, y, sprite_lists):
px, py = grid_to_pixel(x, y)
get_sprite = arcade.SpriteList()
for sprite_list in sprite_lists:
s_list = arcade.get_sprites_at_exact_point((px, py), sprite_list)
for sprite in s_list:
if sprite.blocks:
get_sprite.append(sprite)
def get_blocking_entity(x, y, sprite_lists):
px, py = grid_to_pixel(x, y)
for sprite_list in sprite_lists:
s_list = arcade.get_sprites_at_exact_point((px, py), sprite_list)
for sprite in s_list:
if sprite.blocks:
return sprite
return None
def result_add(value=0):
"""関数やメソッドの出力に任意の値やリストの合計を加えるデコレータ
プロパティに使用する場合の例
@property
@result_add([i for i in range(10)])
def x(self):
x = 5
return x
"""
def _result_add(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if hasattr(value, "__iter__"):
result += sum(value)
return result
else:
result += value
return result
return wrapper
return _result_add
def stop_watch(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
elapsed_time = time.time() - start
print(f"{func.__name__}は{elapsed_time}秒かかりました")
return result
return wrapper
def get_tile_set(img, tile_size):
"""
読み込んだタイルセットイメージからtile_sizeに従って一つずつ画像オブジェクトに変換する
null_tileがTrueならタイルセットイメージの空白部分を取り除くようにがんばる(?)
"""
tile_img = arcade.load_texture(img)
# print(f"タイルセットの画像サイズ, {tile_img.width} x {tile_img.height}")
tile_column = tile_img.width // tile_size
# print("列のタイル数", tile_column)
tile_count = (tile_img.height // tile_size) * tile_column
# print("暫定タイルの数", tile_count)
textures = arcade.load_spritesheet(
img, tile_size, tile_size, tile_column, tile_count)
# 空白タイルを取り除く
textures = [i for i in textures if i.image.getbbox()]
# print("タイル総数:", len(textures), type(textures[0]), textures[0].width)
return textures
class TileImageTest(arcade.Window):
"""
get_tile_setで作られる画像オブジェクトのテスト
ついでに番号をつけた
"""
def __init__(self, width=1100, height=600, title="tile_test", textures=None, tile_size=32):
super().__init__(width, height, title)
self.tile_size = tile_size
arcade.set_background_color(arcade.color.AERO_BLUE)
self.textures = get_tile_set(textures, tile_size)
def on_draw(self):
arcade.start_render()
I = 0
c = 25
for i, v in enumerate(self.textures):
v.draw_scaled(center_x=I * self.tile_size + 25, center_y=c)
arcade.draw_text(str(i), start_x=I * self.tile_size + 25,
start_y=c+15, color=arcade.color.BLACK_BEAN, font_size=9, anchor_x="center")
if i >= 0:
I += 1
if i >= 2 and i % 50 == 0:
c += 40
I = 0
def on_key_press(self, symbol, modifiers):
if symbol == arcade.key.ESCAPE:
arcade.close_window()
def main():
t = r"image/bs_walls.png"
tst = TileImageTest(textures=t)
arcade.run()
if __name__ == "__main__":
main()
| 3.359375 | 3 |
main.py | JAkkerman/EvolutionaryComputing | 1 | 12789426 | <reponame>JAkkerman/EvolutionaryComputing
import sys, os
sys.path.insert(0, 'evoman')
from environment import Environment
# from demo_controller import player_controller
# from controller import Controller
from test_controller import test_controller
import numpy as np
def init():
"""
Initialise game environment
"""
# create map for outputs
experiment_name = 'testrun'
if not os.path.exists(experiment_name):
os.makedirs(experiment_name)
n_hidden_neurons = 0
# init environment class
# cont = Controller()
env = Environment(experiment_name=experiment_name,
speed='normal')
env.player_controller = test_controller
return env
if __name__ == '__main__':
env = init()
# use first EA
# TODO:
# use second EA
# TODO:
env.play()
yeet = env.player.sensors.get(env)
print(yeet)
print(env.logs)
| 2.71875 | 3 |
server/src/test/deployments/integration_tests/case01_direct/lab_config.py | zstars/weblabdeusto | 0 | 12789427 | <gh_stars>0
##################################
# Laboratory Server configuration #
##################################
laboratory_assigned_experiments = {
'exp1:dummy1@Dummy experiments':
{
'coord_address': 'experiment_dummy1:myprocess@myhost',
'checkers': ()
},
'exp1:dummy2@Dummy experiments':
{
'coord_address': 'experiment_dummy2:myprocess@myhost',
'checkers': ()
},
}
| 1.4375 | 1 |
tests/test_pydantic_config.py | Validus-Risk-Management/aws-appconfig-pydantic | 1 | 12789428 | import io
import json
import socket
from typing import Dict, Tuple, Union
import boto3
import pytest
import yaml
from botocore.client import BaseClient
from botocore.response import StreamingBody
from botocore.session import Session
from botocore.stub import Stubber
from pydantic import BaseModel, ValidationError
from pytest_mock import MockerFixture
from pydantic_appconfig import AppConfigHelper
class TestConfig(BaseModel):
"""Test pydantic parsing."""
__test__ = False
test_field_string: str
test_field_int: int
class Config:
"""The config, including title for the JSON schema."""
title = "TestConfig"
def test_config_returned_as_model(
appconfig_stub: Tuple[BaseClient, Stubber, Session],
mocker: MockerFixture,
) -> None:
"""Tests the config gets updated."""
client, stub, _ = appconfig_stub
stub.add_response(
"get_configuration",
_build_response(
{
"test_field_string": "testing_string",
"test_field_int": 42,
},
"1",
"application/json",
),
_build_request(),
)
mocker.patch.object(boto3, "client", return_value=client)
a: AppConfigHelper[TestConfig] = AppConfigHelper(
"AppConfig-App",
"AppConfig-Env",
"AppConfig-Profile",
15,
config_schema_model=TestConfig,
)
result = a.update_config()
assert result
assert a.config.test_field_string == "testing_string"
assert a.config.test_field_int == 42
assert a.config_version == "1"
def test_yaml_config_returned_as_model(
appconfig_stub: Tuple[BaseClient, Stubber, Session],
mocker: MockerFixture,
) -> None:
"""Tests the config gets updated."""
client, stub, _ = appconfig_stub
stub.add_response(
"get_configuration",
_build_response(
{
"test_field_string": "testing_string",
"test_field_int": 42,
},
"1",
"application/x-yaml",
),
_build_request(),
)
mocker.patch.object(boto3, "client", return_value=client)
a: AppConfigHelper[TestConfig] = AppConfigHelper(
"AppConfig-App",
"AppConfig-Env",
"AppConfig-Profile",
15,
config_schema_model=TestConfig,
)
result = a.update_config()
assert result
assert a.config.test_field_string == "testing_string"
assert a.config.test_field_int == 42
assert a.config_version == "1"
def test_config_model_parse_error(
appconfig_stub: Tuple[BaseClient, Stubber, Session], mocker: MockerFixture
) -> None:
"""Tests the config rejected."""
client, stub, _ = appconfig_stub
stub.add_response(
"get_configuration",
_build_response(
{
"xxx": "testing_string",
},
"1",
"application/json",
),
_build_request(),
)
mocker.patch.object(boto3, "client", return_value=client)
a: AppConfigHelper[TestConfig] = AppConfigHelper(
"AppConfig-App",
"AppConfig-Env",
"AppConfig-Profile",
15,
config_schema_model=TestConfig,
)
result = a.update_config()
assert result
with pytest.raises(ValidationError):
assert a.config.test_field_string
def _build_request(
app: str = "AppConfig-App",
env: str = "AppConfig-Env",
profile: str = "AppConfig-Profile",
client_id: str = None,
version: str = "null",
) -> Dict[str, str]:
if client_id is None:
client_id = socket.gethostname()
return {
"Application": app,
"ClientConfigurationVersion": str(version),
"ClientId": client_id,
"Configuration": profile,
"Environment": env,
}
def _build_response(
content: Union[Dict, str], version: str, content_type: str
) -> Dict[str, Union[str, StreamingBody]]:
if content_type == "application/json":
content_text = json.dumps(content).encode("utf-8")
elif content_type == "application/x-yaml":
content_text = str(yaml.dump(content)).encode("utf-8")
elif not isinstance(content, str):
raise ValueError("Unrecognised content.")
else:
content_text = content.encode("utf-8")
return {
"Content": StreamingBody(io.BytesIO(bytes(content_text)), len(content_text)),
"ConfigurationVersion": version,
"ContentType": content_type,
}
| 2.09375 | 2 |
pyramid_translogger/compat.py | podhmo/pyramid_translogger | 0 | 12789429 | <gh_stars>0
# -*- coding:utf-8 -*-
import sys
import types
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
long = int
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
long = long
if PY3:
from urllib.request import quote as url_quote
else:
from urllib import quote as url_quote
| 2.015625 | 2 |
matchsticks/game_types.py | nikihowe/matchsticks | 0 | 12789430 | <filename>matchsticks/game_types.py
# (c) <NAME> 2021
from typing import Optional
Point = tuple[float, float]
Line = tuple[Point, Point]
Move = Optional[tuple[int, int, int]]
| 2 | 2 |
Planning/Jpg2TFRecords.py | whong92/3D_DL | 35 | 12789431 | <filename>Planning/Jpg2TFRecords.py
import time
import tensorflow as tf
from six.moves import cPickle as pickle
import numpy as np
import matplotlib.pyplot as plt
import skimage.transform as skt
import skimage.io as io
import os
import imageio
root_folder = 'D:\\PycharmProjects\\product-image-dataset-v0.1' \
'\\5010171005204_2017-11-30_17.20.19'
label = 5010171005204
folder = os.path.join(root_folder, str(label))
IMAGE_WIDTH =1280
IMAGE_HEIGHT =1024
IMAGE_DEPTH = 3
image_files = []
for (dirpath, dirnames, filenames) in os.walk(folder):
image_files.extend(filenames)
break
image_data = np.ndarray(shape=(len(image_files), IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH),
dtype=np.uint8)
n_image_read = 0
for file in image_files:
image_file = os.path.join(folder, file)
image_data[n_image_read,:,:,:] = imageio.imread(image_file)
n_image_read += 1
print('Loaded all images into memory')
tfrecords_filename = str(label) + '.tfrecords'
'''writing into TFRecords format'''
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
for n in range(n_image_read):
img = image_data[n]
img_raw = img.tostring() # label already in string format
label_raw = np.int64(label)
# tf.train.Example is the default format for storing in TFRecords
# construct the Example proto object
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(IMAGE_WIDTH),
'width': _int64_feature(IMAGE_HEIGHT),
'image_raw': _bytes_feature(img_raw),
'label': _int64_feature(label_raw)}))
writer.write(example.SerializeToString())
writer.close()
"""
'''assessing TF record format'''
record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)
n = 0
num_diff = 0
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
img_string = (example.features.feature['image_raw']
.bytes_list
.value[0])
reconstructed_label = int(example.features.feature['label']
.int64_list
.value[0])
img_1d = np.fromstring(img_string, dtype=np.uint8)
reconstructed_img = img_1d.reshape((IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH))
original_img = image_data[n]
original_label = label
if(not(np.allclose(original_img, reconstructed_img)) or
not(np.allclose(original_label, reconstructed_label))):
num_diff += 1
n += 1
print('Number of mismatches: ' + str(num_diff))
""" | 2.4375 | 2 |
ion/services/dm/utility/provenance.py | ooici/coi-services | 3 | 12789432 | #!/usr/bin/env python
'''
@file ion/services/dm/utility/provenance.py
Contains an assortment of utilities for determining provenance
'''
from coverage_model import ParameterFunctionType
'''
An example of using graph()
Here's the output of the CTDMO data product's parameter dictionary
><> graph(pdict, 'seawater_density')
-->
{'cc_lat': {},
'cc_lon': {},
'sci_water_pracsal': {'seawater_conductivity': {'conductivity': {}},
'seawater_pressure': {'cc_p_range': {}, 'pressure': {}},
'seawater_temperature': {'temperature': {}}},
'seawater_pressure': {'cc_p_range': {}, 'pressure': {}},
'seawater_temperature': {'temperature': {}}}
'''
def graph(pdict, param_name):
'''
Essentially a depth-first-search of the dependency
tree for a particular parameter.
Returns a dictionary where the key is the named parameter
and the nested values are the dependencies. If a named
parameter does not contain a value, it is not a function.
'''
# if param_name is an integer, then just return it
if not isinstance(param_name, basestring):
return {}
# get the parameter context
ctx = pdict[param_name]
# we only care about parameter functions
if not isinstance(ctx.param_type, ParameterFunctionType):
return {}
# the parameter map describes what the function needs
pmap = ctx.param_type.function.param_map
retval = {}
deps = pmap.values()
# Recursively determine the graph for each dependency
for d in deps:
retval[d] = graph(pdict, d)
return retval
| 2.8125 | 3 |
classes/modules/multiframe/ctccnet2/ModelCTCCNet2.py | matteo-rizzo/cctcc | 0 | 12789433 | <reponame>matteo-rizzo/cctcc<gh_stars>0
from typing import Tuple, List
import torch
from auxiliary.settings import NUM_STAGES
from classes.modules.common.BaseModel import BaseModel
from classes.modules.multiframe.ctccnet2.CTCCNet2 import CTCCNet2
class ModelCTCCNet2(BaseModel):
def __init__(self):
super().__init__()
self._network = CTCCNet2().float().to(self._device)
def predict(self, seq_temp: torch.Tensor, seq_shot: torch.Tensor = None, return_preds: bool = False) -> List:
return self._network(seq_temp, seq_shot, return_preds)
@staticmethod
def get_multiply_accumulated_loss(l1: torch.Tensor,
l2: torch.Tensor,
l3: torch.Tensor,
a1: float = 0.33,
a2: float = 0.33) -> torch.Tensor:
return a1 * l1 + a2 * l2 + (1.0 - a1 - a2) * l3
def compute_loss(self, o: List, y: torch.Tensor) -> Tuple:
self.reset_gradient()
stages_loss, mal = self.get_loss(o, y)
mal.backward()
self.optimize()
return stages_loss, mal
def get_loss(self, o: List, y: torch.Tensor) -> Tuple:
stage_out, stages_loss = None, []
for stage in range(NUM_STAGES):
stage_out = torch.mul(stage_out, o[stage]) if stage - 1 > 0 else o[stage]
stages_loss.append(self.get_angular_loss(stage_out, y))
mal = sum(stages_loss)
return stages_loss, mal
def compute_corr_loss(self, o: List, y: torch.Tensor) -> Tuple:
self.reset_gradient()
cas_loss, cas_mal, cor_loss, cor_mal = self.get_corr_loss(o, y)
mal = cas_mal + cor_mal
mal.backward()
self.optimize()
return cas_loss, cas_mal, cor_loss, cor_mal
def get_corr_loss(self, o: List, y: torch.Tensor) -> Tuple:
outputs, preds = zip(*o)
cas_out, cor_out, cas_loss, cor_loss = None, None, [], []
for stage in range(NUM_STAGES):
cas_out = torch.mul(cas_out, outputs[stage]) if stage - 1 > 0 else outputs[stage]
cas_loss.append(self.get_angular_loss(cas_out, y[:, -1, :]))
cor_out = torch.mul(cor_out, preds[stage]) if stage - 1 > 0 else preds[stage]
cor_loss.append(self.get_angular_loss(cor_out.permute(1, 0, 2), y))
cas_mal, cor_mal = sum(cas_loss), sum(cor_loss)
return cas_loss, cas_mal, cor_loss, cor_mal
| 2.328125 | 2 |
tests/test_dimensions.py | man-of-eel/dpgv4 | 3 | 12789434 | <filename>tests/test_dimensions.py
# pylint: disable=missing-docstring
from dpgv4 import calculate_dimensions
from .util import sample_filename
def test_dimensions() -> None:
input_filename = sample_filename("World - 2.mp4")
assert calculate_dimensions(input_filename) == (256, 144)
| 2.109375 | 2 |
flight_booking_app/urls.py | Celoka/flight_booking_system | 3 | 12789435 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import FlightViewSet,TicketViewSet
router = DefaultRouter()
router.register('ticket', TicketViewSet)
router.register('flight', FlightViewSet)
urlpatterns = [
path('', include(router.urls))
]
| 1.84375 | 2 |
18Host/TMA4120/latex/PyplotTesting/Scripts/main.py | MarcusTL12/School | 0 | 12789436 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib2tikz.save as tikz_save
import math
def derivative(y, h, n: int=1):
if n == 1:
return lambda x: (y(x + h) - y(x - h)) / (2 * h)
else:
return derivative(derivative(y, h, n - 1), h, 1)
def integral(y, h, a, b):
ret = 0
sgn = 1
if a > b:
sgn = -1
a, b = b, a
if abs(b - a) < h:
h *= abs(b - a)
for i in np.arange(a, b, h):
ret += y(i) * h
return ret * sgn
def fourier(y, h, n, a, b):
L = (b - a) / 2
a_0 = integral(y, h, a, b) / (2 * L)
a_n = [0] * n
b_n = [0] * n
for i in range(1, n + 1):
a_n[i - 1] = (1 / L) * integral(lambda x: y(x) * np.cos(i * np.pi * x / L), h, a, b)
b_n[i - 1] = (1 / L) * integral(lambda x: y(x) * np.sin(i * np.pi * x / L), h, a, b)
return lambda x: fouriereval(x, a_0, a_n, b_n, L)
def fouriereval(x, a_0, a_n, b_n, l):
ret = a_0
for i in range(1, len(a_n) + 1):
ret += a_n[i - 1] * np.cos(i * np.pi * x / l)
ret += b_n[i - 1] * np.sin(i * np.pi * x / l)
return ret
# def f(x):
# if x > 2:
# return f(x - 4)
# if x < -2:
# return f(x + 4)
# return ((x**3) - 4 * x) / 4
# def f(x):
# if x < -1:
# return f(x + 2)
# if x > 1:
# return f(x - 2)
# return -1 if x < 0 else 1
def fx(x, n):
if n == 1:
return np.sin(x)
return fx(np.sin(x) * np.pi / 2, n - 1)
# def f(x):
# return np.cos(np.tan(np.sin(x)))
def sirc(x):
return np.sqrt(1 - x**2)
def f(x):
if x < -2:
return f(x + 4)
if x > 2:
return f(x - 4)
if x < 0:
return -sirc(x + 1)
else:
return sirc(x - 1)
h = 0.001
x = np.arange(-4, 4, 0.01)
# kr = lambda x: derivative(f, h, 2)(x) / ((1 + derivative(f, h)(x)**2)**(3 / 2))
# dkr = derivative(kr, h)
# dy = derivative(f, h)
fr = fourier(f, h, 101, -2, 2)
plt.plot(x, np.vectorize(f)(x))
# plt.plot(x, np.vectorize(kr)(x))
# plt.plot(x, np.vectorize(dkr)(x))
# plt.plot(x, np.vectorize(dy)(x))
plt.plot(x, np.vectorize(fr)(x))
plt.axis([-4, 4, -5, 5])
plt.title("$f(x)$")
plt.grid(True)
tikz_save("PyPlotTesting/Figurer/" + "f" + str(1) + ".tikz", figureheight='\\figureheight', figurewidth='\\figurewidth')
| 3.078125 | 3 |
magic_proxy/__init__.py | restran/magic-proxy | 1 | 12789437 | <filename>magic_proxy/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Created by restran on 2017/9/15
from __future__ import unicode_literals, absolute_import
__version__ = '0.1.0'
| 1.140625 | 1 |
jnpr/openclos/tests/unit/test_ztp.py | sysbot/OpenClos | 1 | 12789438 | <filename>jnpr/openclos/tests/unit/test_ztp.py<gh_stars>1-10
'''
Created on Sep 11, 2014
@author: moloyc
'''
import unittest
from flexmock import flexmock
from jnpr.openclos.ztp import ZtpServer
from test_model import createPod, createDevice, createPodDevice
class TestZtp(unittest.TestCase):
def setUp(self):
'''Creates with in-memory DB'''
self.conf = {}
self.conf['dbUrl'] = 'sqlite:///'
self.conf['httpServer'] = {'ipAddr': '127.0.0.1'}
self.ztpServer = ZtpServer(self.conf)
self.session = self.ztpServer.dao.Session()
def tearDown(self):
pass
def testGenerateDhcpConfWithNoPodDevice(self):
from jnpr.openclos.l3Clos import util
flexmock(util, isPlatformUbuntu = True)
dhcpConf = self.ztpServer.generateSingleDhcpConf()
self.assertFalse('{{' in dhcpConf)
self.assertFalse('}}' in dhcpConf)
self.assertEquals(1, dhcpConf.count('host-name')) # 1 global + 0 device
def testGenerateSingleDhcpConf(self):
from jnpr.openclos.l3Clos import util
flexmock(util, isPlatformUbuntu = True)
createDevice(self.session, 'dev1')
createDevice(self.session, 'dev2')
dhcpConf = self.ztpServer.generateSingleDhcpConf()
self.assertFalse('{{' in dhcpConf)
self.assertFalse('}}' in dhcpConf)
self.assertEquals(3, dhcpConf.count('host-name')) # 1 global + 2 device
def testGeneratePodSpecificDhcpConf(self):
from jnpr.openclos.l3Clos import util
flexmock(util, isPlatformUbuntu = True)
pod = createPod('pod1', self.session)
pod.spineJunosImage = 'testSpineImage'
pod.leafJunosImage = 'testLeafImage'
createPodDevice(self.session, 'dev1', pod)
dev2 = createPodDevice(self.session, 'dev2', pod)
dev3 = createPodDevice(self.session, 'dev3', pod)
dev3.role = 'leaf'
dev4 = createPodDevice(self.session, 'dev4', pod)
dev4.role = 'unknown'
dhcpConf = self.ztpServer.generatePodSpecificDhcpConf('pod1')
self.assertEquals(2, dhcpConf.count('testSpineImage'))
self.assertEquals(1, dhcpConf.count('testLeafImage'))
self.assertFalse('{{' in dhcpConf)
self.assertFalse('}}' in dhcpConf)
self.assertEquals(5, dhcpConf.count('host-name')) # 1 global + 4 device
def testPopulateDhcpGlobalSettings(self):
from jnpr.openclos.l3Clos import util
globalZtpConf = {'ztp': {'dhcpSubnet': '10.20.30.0/25', 'dhcpOptionRoute': '10.20.30.254', 'dhcpOptionRangeStart': '10.20.30.15','dhcpOptionRangeEnd': '10.20.30.20'}}
flexmock(util, loadClosDefinition = globalZtpConf)
globalSetting = self.ztpServer.populateDhcpGlobalSettings()
self.assertEquals('10.20.30.0', globalSetting['network'])
self.assertEquals('255.255.255.128', globalSetting['netmask'])
self.assertEquals('10.20.30.254', globalSetting['defaultRoute'])
self.assertEquals('10.20.30.15', globalSetting['rangeStart'])
self.assertEquals('10.20.30.20', globalSetting['rangeEnd'])
globalZtpConf = {'ztp': {'dhcpSubnet': '10.20.30.0/25'}}
flexmock(util, loadClosDefinition = globalZtpConf)
globalSetting = self.ztpServer.populateDhcpGlobalSettings()
self.assertEquals('10.20.30.1', globalSetting['defaultRoute'])
self.assertEquals('10.20.30.2', globalSetting['rangeStart'])
self.assertEquals('10.20.30.126', globalSetting['rangeEnd'])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | 2.09375 | 2 |
scripts/verify_items.py | ramalingam-cb/testrunner | 0 | 12789439 | #!/usr/bin/env python
"""Verify items exist, or were deleted from couchbase using an on disk kvstore"""
import time
import sys
import getopt
import pickle
import re
import mc_bin_client
import exceptions
import socket
from memcached.helper.data_helper import VBucketAwareMemcached
from membase.api.rest_client import RestConnection
def usage(err=None):
err_code = 0
if err:
err_code = 1
print "Error:", err
print
print "./verify_.py -m <master> -f <file>"
print ""
print " master the master node rest interface"
print " file file to write out the kvstore to"
print ""
print "./verify_items -m Administrator:[email protected]:8091 -f kvstore"
sys.exit(err_code)
class KVStore(object):
"""Simple key value store that handles sets and deletes, has the ability to read and write from a file"""
def __init__(self, filename=None):
if filename:
with open(filename) as f:
self.data = pickle.load(f)
else:
self.data = {}
self.index = 0
def save(self, filename):
"""Write out the current kvstore to a pickle file"""
with open(filename, 'w') as f:
pickle.dump(self.data, f)
def set(self, key, exp, flags, val):
"""Memcached set"""
if exp and exp <= 2592000:
exp += int(time.time())
self.data[key] = (exp, flags, val, True)
def delete(self, key):
"""Delete an item, but don't remove it from the kvstore completely"""
if key in self.data:
self.data[key] = (0, 0, "", False)
def __iter__(self):
return self.data.__iter__()
def iteritems(self):
return self.data.iteritems()
class Config(object):
def __init__(self, argv):
# defaults
self.master = None
self.filename = None
self.bucket = 'default'
try:
(opts, args) = getopt.getopt(argv, 'hm:f:b:', ['help', 'master=', 'file=', 'bucket='])
except IndexError:
usage()
except getopt.GetoptError, err:
usage(err)
for o, a in opts:
if o == "-h" or o == "--help":
usage()
if o == "-m" or o == "--master":
master_list = re.split('[:@]', a)
self.master = {}
self.master["username"] = master_list[0]
self.master["password"] = master_list[1]
self.master["ip"] = master_list[2]
self.master["port"] = master_list[3]
if o == "-f" or o == "--file":
self.filename = a
if o == "-b" or o == "--bucket":
self.bucket = a
if not self.master:
usage("missing master")
if not self.filename:
usage("missing file")
def get_aware(awareness, rest, key):
timeout = 60 + time.time()
passed = False
while time.time() < timeout and not passed:
try:
val = awareness.memcached(key).get(key)
passed = True
except mc_bin_client.MemcachedError as e:
if e.status == 7:
awareness.reset_vbuckets(rest, key)
else:
raise e
except exceptions.EOFError:
awareness.reset(rest)
except socket.error:
awareness.reset(rest)
if not passed:
raise Exception("failed get after 60 seconds")
return val
if __name__ == "__main__":
config = Config(sys.argv[1:])
kv = KVStore(config.filename)
rest = RestConnection(config.master)
awareness = VBucketAwareMemcached(rest, config.bucket)
undeleted = 0
missing = 0
badval = 0
for key, val_expected in kv.iteritems():
if val_expected[3]:
try:
val = get_aware(awareness, rest, key)
if val[2] != val_expected[2]:
badval += 1
except mc_bin_client.MemcachedError as e:
if e.status == 1:
missing += 1
else:
raise e
else:
try:
val = get_aware(awareness, rest, key)
undeleted += 1
except mc_bin_client.MemcachedError as e:
if e.status != 1:
raise e
awareness.done()
print "undeleted:", undeleted
print "missing:", missing
print "badval:", badval
| 2.453125 | 2 |
components/collector/tests/source_collectors/trello/test_issues.py | kargaranamir/quality-time | 33 | 12789440 | """Unit tests for the Trello issues collector."""
from datetime import datetime
from .base import TrelloTestCase
class TrelloIssuesTest(TrelloTestCase):
"""Unit tests for the Trello issues collector."""
METRIC_TYPE = "issues"
async def test_issues(self):
"""Test that the number of issues and the individual issues are returned."""
response = await self.collect(get_request_json_side_effect=self.json)
self.assert_measurement(response, value="2", entities=self.entities)
async def test_issues_with_ignored_list(self):
"""Test that lists can be ignored when counting issues."""
self.set_source_parameter("lists_to_ignore", ["list1"])
response = await self.collect(get_request_json_side_effect=self.json)
self.assert_measurement(response, value="1", entities=[self.entities[1]])
async def test_overdue_issues(self):
"""Test overdue issues."""
self.set_source_parameter("cards_to_count", ["overdue"])
response = await self.collect(get_request_json_side_effect=self.json)
self.assert_measurement(response, value="1", entities=[self.entities[1]])
async def test_inactive_issues(self):
"""Test inactive issues."""
self.set_source_parameter("cards_to_count", ["inactive"])
self.cards["cards"][0]["dateLastActivity"] = datetime.now().isoformat()
response = await self.collect(get_request_json_side_effect=self.json)
self.assert_measurement(response, value="1", entities=[self.entities[1]])
| 2.953125 | 3 |
tests/cpfcnpj_tests.py | vindi/pycpfcnpj | 0 | 12789441 | <reponame>vindi/pycpfcnpj
import unittest
from pycpfcnpj import cpfcnpj
class CPFCNPJTests(unittest.TestCase):
"""docstring for CPFCNPJTests"""
def setUp(self):
self.valid_cpf = '11144477735'
self.invalid_cpf = '11144477736'
self.invalid_cpf_size = '111444777'
self.valid_cnpj = '11444777000161'
self.invalid_cnpj = '11444777000162'
self.invalid_cnpj_size = '114447770001'
self.mascared_valid_cpf = '111.444.777-35'
self.mascared_invalid_cpf = '111.444.777-36'
self.mascared_invalid_cpf_size = '111.444.777'
self.mascared_valid_cnpj = '11.444.777/0001-61'
self.mascared_invalid_cnpj = '11.444.777/0001-62'
self.mascared_invalid_cnpj_size = '114.447/7700-01'
def test_validate_cpf_true(self):
self.assertTrue(cpfcnpj.validate(self.valid_cpf))
def test_validate_cpf_false(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cpf))
def test_validate_cnpj_true(self):
self.assertTrue(cpfcnpj.validate(self.valid_cnpj))
def test_validate_cnpj_false(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cnpj))
def test_wrong_cpf_size(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cpf_size))
def test_wrong_cnpj_size(self):
self.assertFalse(cpfcnpj.validate(self.invalid_cnpj_size))
def mascared_test_validate_cpf_true(self):
self.assertTrue(cpfcnpj.validate(self.mascared_valid_cpf))
def mascared_test_validate_cpf_false(self):
self.assertFalse(cpfcnpj.validate(self.mascared_invalid_cpf))
def mascared_test_validate_cnpj_true(self):
self.assertTrue(cpfcnpj.validate(self.mascared_valid_cnpj))
def mascared_test_validate_cnpj_false(self):
self.assertFalse(cpfcnpj.validate(self.mascared_invalid_cnpj))
def mascared_test_wrong_cpf_size(self):
self.assertFalse(cpfcnpj.validate(self.mascared_invalid_cpf_size))
def mascared_test_wrong_cnpj_size(self):
self.assertFalse(cpfcnpj.validate(self.mascared_invalid_cnpj_size))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 2.578125 | 3 |
ml-agents/mlagents/trainers/bc_custom/trainer.py | icaro56/ml-agents | 0 | 12789442 | <reponame>icaro56/ml-agents
# # Unity ML-Agents Toolkit
# ## ML-Agent Learning (Imitation)
# Contains an implementation of Behavioral Cloning Algorithm
import logging
import os
import numpy as np
import tensorflow as tf
import time
from collections import deque
from mlagents.envs import AllBrainInfo, BrainInfo
from mlagents.trainers.bc_custom.policy import BCCustomPolicy
from mlagents.trainers.buffer import Buffer
from mlagents.trainers.trainer import UnityTrainerException, Trainer
logger = logging.getLogger("mlagents.envs")
class BehavioralCloningCustomTrainer(Trainer):
"""The ImitationTrainer is an implementation of the imitation learning."""
def __init__(self, sess, brain, trainer_parameters, training, seed, run_id):
"""
Responsible for collecting experiences and training PPO model.
:param sess: Tensorflow session.
:param trainer_parameters: The parameters for the trainer (dictionary).
:param training: Whether the trainer is set for training.
"""
super(BehavioralCloningCustomTrainer, self).__init__(sess, brain, trainer_parameters, training, run_id)
self.param_keys = ['brain_to_imitate', 'batch_size', 'time_horizon',
'graph_scope', 'summary_freq', 'max_steps',
'batches_per_epoch', 'use_recurrent',
'hidden_units','learning_rate', 'num_layers',
'sequence_length', 'memory_size', 'epsilon', 'beta']
for k in self.param_keys:
if k not in trainer_parameters:
raise UnityTrainerException("The hyperparameter {0} could not be found for the Imitation trainer of "
"brain {1}.".format(k, brain.brain_name))
self.step = 0
self.policy = BCCustomPolicy(seed, brain, trainer_parameters, sess)
self.brain_name = brain.brain_name
self.brain_to_imitate = trainer_parameters['brain_to_imitate']
self.batches_per_epoch = trainer_parameters['batches_per_epoch']
self.n_sequences = max(int(trainer_parameters['batch_size'] / self.policy.sequence_length), 1)
self.cumulative_rewards = {}
# usado no curriculo learning
self._reward_buffer = deque(maxlen=1000)
self.episode_steps = {}
self.stats = {'losses': [], 'episode_length': [], 'cumulative_reward': [], 'value_estimate': [], 'entropy': [],
'value_loss': [], 'policy_loss': [], 'learning_rate': []}
self.training_buffer = Buffer()
self.training_buffer_ppo = Buffer()
self.summary_path = trainer_parameters['summary_path']
if not os.path.exists(self.summary_path):
os.makedirs(self.summary_path)
self.summary_writer = tf.summary.FileWriter(self.summary_path)
#criando arquivo
self.actionProbList = []
self.trueActionList = []
self.use_curiosity = False
def createTempFolder(self, foldername):
try:
if not os.path.exists(foldername):
os.makedirs(foldername)
except Exception:
print("Arquivo temporario nao foi criado: " + foldername)
def saveTemps(self):
timestr = time.strftime("%Y%m%d-%H%M%S")
folderName1 = './tmp/' + timestr + "/"
self.createTempFolder(folderName1)
arq = open(folderName1 + 'actionProb_' + self.brain_name + ".txt", 'w')
arq.writelines(self.actionProbList)
arq.close()
arq = open(folderName1 + 'trueAction_' + self.brain_name + ".txt", 'w')
arq.writelines(self.trueActionList)
arq.close()
def addStepInTempList(self, gStep):
self.actionProbList.append("Step: " + str(gStep) + "\n");
self.trueActionList.append("Step: " + str(gStep) + "\n");
def __str__(self):
return '''Hyperparameters for the Imitation Trainer of brain {0}: \n{1}'''.format(
self.brain_name, '\n'.join(['\t{0}:\t{1}'.format(x, self.trainer_parameters[x]) for x in self.param_keys]))
@property
def parameters(self):
"""
Returns the trainer parameters of the trainer.
"""
return self.trainer_parameters
@property
def get_max_steps(self):
"""
Returns the maximum number of steps. Is used to know when the trainer should be stopped.
:return: The maximum number of steps of the trainer
"""
return float(self.trainer_parameters['max_steps'])
@property
def get_step(self):
"""
Returns the number of steps the trainer has performed
:return: the step count of the trainer
"""
# return self.policy.get_current_step()
return self.step
@property
def reward_buffer(self):
"""
Returns the reward buffer. The reward buffer contains the cumulative
rewards of the most recent episodes completed by agents using this
trainer.
:return: the reward buffer.
"""
return self._reward_buffer
@property
def get_last_reward(self):
"""
Returns the last reward the trainer has had
:return: the new last reward
"""
if len(self.stats['cumulative_reward']) > 0:
return np.mean(self.stats['cumulative_reward'])
else:
return 0
def increment_step_and_update_last_reward(self):
"""
Increment the step count of the trainer and Updates the last reward
"""
if len(self.stats['cumulative_reward']) > 0:
mean_reward = np.mean(self.stats['cumulative_reward'])
self.policy.update_reward(mean_reward)
self.policy.increment_step()
self.step = self.policy.get_current_step()
def take_action(self, all_brain_info: AllBrainInfo):
"""
Decides actions using policy given current brain info.
:param all_brain_info: AllBrainInfo from environment.
:return: a tuple containing action, memories, values and an object
to be passed to add experiences
"""
if len(all_brain_info[self.brain_name].agents) == 0:
return [], [], [], None, None
agent_brain = all_brain_info[self.brain_name]
run_out = self.policy.evaluate(agent_brain)
self.stats['value_estimate'].append(run_out['value'].mean())
self.stats['entropy'].append(run_out['entropy'].mean())
self.stats['learning_rate'].append(run_out['learning_rate'])
if self.policy.use_recurrent:
return run_out['action'], run_out['memory_out'], None, None, None
else:
return run_out['action'], None, None, run_out['value'], run_out
def add_experiences(self, curr_info: AllBrainInfo, next_info: AllBrainInfo, take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method.
"""
# Used to collect teacher experience into training buffer
info_teacher = curr_info[self.brain_to_imitate]
next_info_teacher = next_info[self.brain_to_imitate]
for agent_id in info_teacher.agents:
self.training_buffer[agent_id].last_brain_info = info_teacher
for agent_id in next_info_teacher.agents:
stored_info_teacher = self.training_buffer[agent_id].last_brain_info
if stored_info_teacher is None:
continue
else:
idx = stored_info_teacher.agents.index(agent_id)
next_idx = next_info_teacher.agents.index(agent_id)
if stored_info_teacher.text_observations[idx] != "":
info_teacher_record, info_teacher_reset = \
stored_info_teacher.text_observations[idx].lower().split(",")
next_info_teacher_record, next_info_teacher_reset = next_info_teacher.text_observations[idx].\
lower().split(",")
if next_info_teacher_reset == "true":
self.training_buffer.reset_update_buffer()
else:
info_teacher_record, next_info_teacher_record = "true", "true"
if info_teacher_record == "true" and next_info_teacher_record == "true":
if not stored_info_teacher.local_done[idx]:
for i in range(self.policy.vis_obs_size):
self.training_buffer[agent_id]['visual_obs%d' % i]\
.append(stored_info_teacher.visual_observations[i][idx])
if self.policy.use_vec_obs:
self.training_buffer[agent_id]['vector_obs']\
.append(stored_info_teacher.vector_observations[idx])
if self.policy.use_recurrent:
if stored_info_teacher.memories.shape[1] == 0:
stored_info_teacher.memories = np.zeros((len(stored_info_teacher.agents),
self.policy.m_size))
self.training_buffer[agent_id]['memory'].append(stored_info_teacher.memories[idx])
self.training_buffer[agent_id]['actions'].append(next_info_teacher.
previous_vector_actions[next_idx])
info_student = curr_info[self.brain_name]
next_info_student = next_info[self.brain_name]
for agent_id in info_student.agents:
self.training_buffer_ppo[agent_id].last_brain_info = info_student
self.training_buffer_ppo[agent_id].last_take_action_outputs = take_action_outputs
# apenas modulo curiosity usa isso. Logo nao preciso adicionar isso aqui
# if info_student.agents != next_info_student.agents:
# curr_to_use = self.construct_curr_info(next_info_student)
# else:
# curr_to_use = info_student
# intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)
for agent_id in next_info_student.agents:
stored_info = self.training_buffer_ppo[agent_id].last_brain_info
stored_take_action_outputs = self.training_buffer_ppo[agent_id].last_take_action_outputs
if stored_info is not None:
idx = stored_info.agents.index(agent_id)
next_idx = next_info_student.agents.index(agent_id)
if not stored_info.local_done[idx]:
for i, _ in enumerate(stored_info.visual_observations):
self.training_buffer_ppo[agent_id]['visual_obs%d' % i].append(
stored_info.visual_observations[i][idx])
self.training_buffer_ppo[agent_id]['next_visual_obs%d' % i].append(
next_info_student.visual_observations[i][next_idx])
if self.policy.use_vec_obs:
self.training_buffer_ppo[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])
self.training_buffer_ppo[agent_id]['next_vector_in'].append(
next_info_student.vector_observations[next_idx])
if self.policy.use_recurrent:
if stored_info.memories.shape[1] == 0:
stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))
self.training_buffer_ppo[agent_id]['memory'].append(stored_info.memories[idx])
actions = stored_take_action_outputs['action']
# não trabalhamos com ação continua
if not self.policy.use_continuous_act:
self.training_buffer_ppo[agent_id]['action_mask'].append(stored_info.action_masks[idx])
a_dist = stored_take_action_outputs['log_probs']
value = stored_take_action_outputs['value']
self.training_buffer_ppo[agent_id]['actions'].append(actions[idx])
self.training_buffer_ppo[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])
self.training_buffer_ppo[agent_id]['masks'].append(1.0)
if not self.use_curiosity:
self.training_buffer_ppo[agent_id]['rewards'].append(next_info_student.rewards[next_idx])
self.training_buffer_ppo[agent_id]['action_probs'].append(a_dist[idx])
self.training_buffer_ppo[agent_id]['value_estimates'].append(value[idx][0])
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info_student.rewards[next_idx]
if not next_info_student.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1
def process_experiences(self, current_info: AllBrainInfo, next_info: AllBrainInfo):
"""
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo
"""
info_teacher = next_info[self.brain_to_imitate]
for l in range(len(info_teacher.agents)):
teacher_action_list = len(self.training_buffer[info_teacher.agents[l]]['actions'])
horizon_reached = teacher_action_list > self.trainer_parameters['time_horizon']
teacher_filled = len(self.training_buffer[info_teacher.agents[l]]['actions']) > 0
if ((info_teacher.local_done[l] or horizon_reached) and teacher_filled):
agent_id = info_teacher.agents[l]
self.training_buffer.append_update_buffer(agent_id, batch_size=None, training_length=self.policy.sequence_length)
self.training_buffer[agent_id].reset_agent()
info_student = next_info[self.brain_name]
for l in range(len(info_student.agents)):
agent_actions = self.training_buffer_ppo[info_student.agents[l]]['actions']
if ((info_student.local_done[l] or len(agent_actions) > self.trainer_parameters['time_horizon']) and len(agent_actions) > 0):
agent_id = info_student.agents[l]
if info_student.local_done[l] and not info_student.max_reached[l]:
value_next = 0.0
else:
if info_student.max_reached[l]:
bootstrapping_info = self.training_buffer_ppo[agent_id].last_brain_info
idx = bootstrapping_info.agents.index(agent_id)
else:
bootstrapping_info = info_student
idx = l
value_next = self.policy.get_value_estimate(bootstrapping_info, idx)
self.training_buffer_ppo[agent_id]['advantages'].set(
get_gae(
rewards=self.training_buffer_ppo[agent_id]['rewards'].get_batch(),
value_estimates=self.training_buffer_ppo[agent_id]['value_estimates'].get_batch(),
value_next=value_next,
gamma=self.trainer_parameters['gamma'],
lambd=self.trainer_parameters['lambd']))
self.training_buffer_ppo[agent_id]['discounted_returns'].set(
self.training_buffer_ppo[agent_id]['advantages'].get_batch()
+ self.training_buffer_ppo[agent_id]['value_estimates'].get_batch())
self.training_buffer_ppo.append_update_buffer(agent_id, batch_size=None, training_length=self.policy.sequence_length)
self.training_buffer_ppo[agent_id].reset_agent()
if info_student.local_done[l]:
self.stats['cumulative_reward'].append(
self.cumulative_rewards.get(agent_id, 0))
self.reward_buffer.appendleft(self.cumulative_rewards.get(agent_id, 0))
self.stats['episode_length'].append(
self.episode_steps.get(agent_id, 0))
self.cumulative_rewards[agent_id] = 0
self.episode_steps[agent_id] = 0
def end_episode(self):
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
self.training_buffer.reset_all()
self.training_buffer_ppo.reset_all()
for agent_id in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
for agent_id in self.episode_steps:
self.episode_steps[agent_id] = 0
def is_ready_update(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
cond_1 = len(self.training_buffer.update_buffer['actions']) > self.n_sequences
cond_2 = len(self.training_buffer_ppo.update_buffer['actions']) > self.n_sequences
return cond_1 or cond_2
def is_ready_update_bc(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
cond_1 = len(self.training_buffer.update_buffer['actions']) > self.n_sequences
return cond_1
def is_ready_update_ppo(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
cond_2 = len(self.training_buffer_ppo.update_buffer['actions']) > self.n_sequences
return cond_2
def update_policy(self):
"""
Updates the policy.
"""
self.n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)
num_epoch = self.trainer_parameters['num_epoch']
if self.is_ready_update_bc():
batch_losses = []
for k in range(num_epoch):
self.training_buffer.update_buffer.shuffle()
buffer = self.training_buffer.update_buffer
num_batches = min(len(self.training_buffer.update_buffer['actions']) // self.n_sequences,
self.batches_per_epoch)
for i in range(num_batches):
start = i * self.n_sequences
end = (i + 1) * self.n_sequences
mini_batch = buffer.make_mini_batch(start, end)
run_out = self.policy.update(mini_batch, self.n_sequences)
self.actionProbList.append(
"epoca: " + str(i) + "\n" + np.array2string(run_out['action_probs'], precision=2) + "\n")
self.trueActionList.append("epoca: " + str(i) + "\n" + str(run_out['action_oh']) + "\n")
loss = run_out['loss']
batch_losses.append(loss)
if len(batch_losses) > 0:
self.stats['losses'].append(np.mean(batch_losses))
else:
self.stats['losses'].append(0)
if self.is_ready_update_ppo():
value_total, policy_total, forward_total, inverse_total = [], [], [], []
advantages = self.training_buffer_ppo.update_buffer['advantages'].get_batch()
self.training_buffer_ppo.update_buffer['advantages'].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10))
for k in range(num_epoch):
self.training_buffer_ppo.update_buffer.shuffle()
buffer_ppo = self.training_buffer_ppo.update_buffer
num_batches_ppo = min(len(self.training_buffer_ppo.update_buffer['actions']) // self.n_sequences,
self.batches_per_epoch)
for i in range(num_batches_ppo):
start = i * self.n_sequences
end = (i + 1) * self.n_sequences
mini_batch = buffer_ppo.make_mini_batch(start, end)
run_out = self.policy.update_ppo(mini_batch, self.n_sequences)
value_total.append(run_out['value_loss'])
policy_total.append(np.abs(run_out['policy_loss']))
self.stats['value_loss'].append(np.mean(value_total))
self.stats['policy_loss'].append(np.mean(policy_total))
self.training_buffer_ppo.reset_update_buffer()
def discount_rewards(r, gamma=0.99, value_next=0.0):
"""
Computes discounted sum of future rewards for use in updating value estimate.
:param r: List of rewards.
:param gamma: Discount factor.
:param value_next: T+1 value estimate for returns calculation.
:return: discounted sum of future rewards as list.
"""
discounted_r = np.zeros_like(r)
running_add = value_next
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):
"""
Computes generalized advantage estimate for use in updating policy.
:param rewards: list of rewards for time-steps t to T.
:param value_next: Value estimate for time-step T+1.
:param value_estimates: list of value estimates for time-steps t to T.
:param gamma: Discount factor.
:param lambd: GAE weighing factor.
:return: list of advantage estimates for time-steps t to T.
"""
value_estimates = np.asarray(value_estimates.tolist() + [value_next])
delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]
advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)
return advantage
| 2.1875 | 2 |
backend/kesaseteli/common/tests/factories.py | jannetasa/yjdh | 0 | 12789443 | <gh_stars>0
import random
from datetime import date, timedelta
import factory
from shared.common.tests.factories import UserFactory
from applications.enums import (
ApplicationStatus,
ATTACHMENT_CONTENT_TYPE_CHOICES,
AttachmentType,
HiredWithoutVoucherAssessment,
SummerVoucherExceptionReason,
)
from applications.models import Application, Attachment, SummerVoucher
from companies.models import Company
class CompanyFactory(factory.django.DjangoModelFactory):
name = factory.Faker("company")
business_id = factory.Faker("numerify", text="#######-#")
company_form = "oy"
industry = factory.Faker("job")
street_address = factory.Faker("street_address")
postcode = factory.Faker("postcode")
city = factory.Faker("city")
ytj_json = factory.Faker("json")
class Meta:
model = Company
class AttachmentFactory(factory.django.DjangoModelFactory):
attachment_type = factory.Faker("random_element", elements=AttachmentType.values)
content_type = factory.Faker(
"random_element", elements=[val[1] for val in ATTACHMENT_CONTENT_TYPE_CHOICES]
)
attachment_file = factory.django.FileField(filename="file.pdf")
class Meta:
model = Attachment
class SummerVoucherFactory(factory.django.DjangoModelFactory):
summer_voucher_serial_number = factory.Faker("md5")
summer_voucher_exception_reason = factory.Faker(
"random_element", elements=SummerVoucherExceptionReason.values
)
employee_name = factory.Faker("name")
employee_school = factory.Faker("lexify", text="????? School")
employee_ssn = factory.Faker("bothify", text="######-###?")
employee_phone_number = factory.Faker("phone_number")
employee_home_city = factory.Faker("city")
employee_postcode = factory.Faker("postcode")
employment_postcode = factory.Faker("postcode")
employment_start_date = factory.Faker(
"date_between_dates",
date_start=date(date.today().year, 1, 1),
date_end=date.today() + timedelta(days=100),
)
employment_end_date = factory.LazyAttribute(
lambda o: o.employment_start_date + timedelta(days=random.randint(31, 364))
)
employment_work_hours = factory.Faker(
"pydecimal", left_digits=2, right_digits=1, min_value=1
)
employment_salary_paid = factory.Faker(
"pydecimal", left_digits=4, right_digits=2, min_value=1
)
employment_description = factory.Faker("sentence")
hired_without_voucher_assessment = factory.Faker(
"random_element", elements=HiredWithoutVoucherAssessment.values
)
class Meta:
model = SummerVoucher
class ApplicationFactory(factory.django.DjangoModelFactory):
company = factory.SubFactory(CompanyFactory)
user = factory.SubFactory(UserFactory)
status = factory.Faker("random_element", elements=ApplicationStatus.values)
street_address = factory.Faker("street_address")
contact_person_name = factory.Faker("name")
contact_person_email = factory.Faker("email")
contact_person_phone_number = factory.Faker("phone_number")
is_separate_invoicer = factory.Faker("boolean")
invoicer_name = factory.Faker("name")
invoicer_email = factory.Faker("email")
invoicer_phone_number = factory.Faker("phone_number")
class Meta:
model = Application
| 2.140625 | 2 |
bench.py | deseretdigital/statsite-proxy | 2 | 12789444 | <gh_stars>1-10
import socket
import time
import random
NUM = 1024 * 1024
KEYS = ["test", "foobar", "zipzap"]
VALS = [32, 100, 82, 101, 5, 6, 42, 73]
METS = []
for x in xrange(NUM):
key = random.choice(KEYS)
val = random.choice(VALS)
METS.append("%s:%f|c|@123\n" % (key, val))
s = socket.socket()
s.connect(("localhost", 8125))
start = time.time()
current = 0
while current < len(METS):
msg = "".join(METS[current:current + 1024])
current += 1024
s.sendall(msg)
s.close()
end = time.time()
print NUM / (end - start), "ops/sec", (end - start), "sec"
| 2.375 | 2 |
backend/project_requests/apps.py | mnieber/taskboard | 0 | 12789445 | <reponame>mnieber/taskboard<filename>backend/project_requests/apps.py<gh_stars>0
from django.apps import AppConfig
class ProjectRequestsConfig(AppConfig):
name = "project_requests"
| 1.203125 | 1 |
ace/system/database/request_tracking.py | ace-ecosystem/ace2-core | 0 | 12789446 | <reponame>ace-ecosystem/ace2-core<gh_stars>0
# vim: ts=4:sw=4:et:cc=120
import json
import datetime
from operator import itemgetter
from typing import Optional, Union
import ace
from ace.analysis import Observable, AnalysisModuleType
from ace.system.base import AnalysisRequestTrackingBaseInterface
from ace.system.database.schema import AnalysisRequestTracking, analysis_request_links
from ace.constants import TRACKING_STATUS_ANALYZING, EVENT_AR_EXPIRED
from ace.system.requests import AnalysisRequest
from ace.system.caching import generate_cache_key
from ace.exceptions import UnknownAnalysisModuleTypeError
from sqlalchemy import and_, text
from sqlalchemy.sql import delete, update, select
from sqlalchemy.orm import selectinload
class DatabaseAnalysisRequestTrackingInterface(AnalysisRequestTrackingBaseInterface):
# if we switched to TRACKING_STATUS_ANALYZING then we start the expiration timer
async def i_track_analysis_request(self, request: AnalysisRequest):
# XXX we're using server-side time instead of database time
expiration_date = None
if request.status == TRACKING_STATUS_ANALYZING:
expiration_date = datetime.datetime.now() + datetime.timedelta(request.type.timeout)
db_request = AnalysisRequestTracking(
id=request.id,
expiration_date=expiration_date,
analysis_module_type=request.type.name if request.type else None,
cache_key=request.cache_key,
root_uuid=request.root.uuid,
json_data=request.to_json(),
)
async with self.get_db() as db:
await db.merge(db_request)
await db.commit()
async def i_link_analysis_requests(self, source: AnalysisRequest, dest: AnalysisRequest) -> bool:
from sqlalchemy import select, bindparam, String, and_
# when we process an analysis request we "lock" it by setting the lock field
# so if we try to link against an analysis request that is "locked" it fails
# INSERT INTO analysis_request_links ( source_id, dest_id )
# SELECT :source.id, :dest.id FROM analysis_tracking_request
# WHERE source.id = :source.id AND lock IS NULL
sel = select(
bindparam("s", type_=String).label("source_id"),
bindparam("d", type_=String).label("dest_id"),
).where(
and_(AnalysisRequestTracking.id == source.id, AnalysisRequestTracking.lock == None)
) # noqa:E711
update = analysis_request_links.insert().from_select(["source_id", "dest_id"], sel)
async with self.get_db() as db:
count = (await db.execute(update, {"s": source.id, "d": dest.id})).rowcount
await db.commit()
return count == 1
async def i_get_linked_analysis_requests(self, source: AnalysisRequest) -> list[AnalysisRequest]:
async with self.get_db() as db:
source_request = (
# NOTE you cannot do lazy loading with async in sqlalchemy 1.4
(
await db.execute(
select(AnalysisRequestTracking)
.options(selectinload(AnalysisRequestTracking.linked_requests))
.where(AnalysisRequestTracking.id == source.id)
)
).one_or_none()
)
if source_request is None:
return None
# I think this is where you have to be careful with async
return [AnalysisRequest.from_dict(json.loads(_.json_data), self) for _ in source_request[0].linked_requests]
async def i_lock_analysis_request(self, request: AnalysisRequest) -> bool:
async with self.get_db() as db:
count = (
await db.execute(
update(AnalysisRequestTracking)
.where(
and_(AnalysisRequestTracking.id == request.id, AnalysisRequestTracking.lock == None)
) # noqa:E711
.values(lock=text("CURRENT_TIMESTAMP"))
)
).rowcount
await db.commit()
return count == 1
async def i_unlock_analysis_request(self, request: AnalysisRequest) -> bool:
async with self.get_db() as db:
count = (
await db.execute(
update(AnalysisRequestTracking)
.where(
and_(AnalysisRequestTracking.id == request.id, AnalysisRequestTracking.lock != None)
) # noqa:E711
.values(lock=None)
)
).rowcount
await db.commit()
return count == 1
async def i_delete_analysis_request(self, key: str) -> bool:
async with self.get_db() as db:
count = (
await db.execute(delete(AnalysisRequestTracking).where(AnalysisRequestTracking.id == key))
).rowcount
await db.commit()
return count == 1
async def i_get_expired_analysis_requests(self) -> list[AnalysisRequest]:
async with self.get_db() as db:
result = (
await db.execute(
select(AnalysisRequestTracking).where(
datetime.datetime.now() > AnalysisRequestTracking.expiration_date
)
)
).all()
return [AnalysisRequest.from_dict(json.loads(_[0].json_data), self) for _ in result]
# this is called when an analysis module type is removed (or expired)
async def i_clear_tracking_by_analysis_module_type(self, amt: AnalysisModuleType):
async with self.get_db() as db:
await db.execute(
delete(AnalysisRequestTracking).where(AnalysisRequestTracking.analysis_module_type == amt.name)
)
await db.commit()
async def i_get_analysis_request_by_request_id(self, key: str) -> Union[AnalysisRequest, None]:
async with self.get_db() as db:
result = (
await db.execute(select(AnalysisRequestTracking).where(AnalysisRequestTracking.id == key))
).one_or_none()
if result is None:
return None
return AnalysisRequest.from_dict(json.loads(result[0].json_data), self)
async def i_get_analysis_requests_by_root(self, key: str) -> list[AnalysisRequest]:
async with self.get_db() as db:
return [
AnalysisRequest.from_dict(json.loads(_[0].json_data), self)
for _ in (
await db.execute(select(AnalysisRequestTracking).where(AnalysisRequestTracking.root_uuid == key))
).all()
]
async def i_get_analysis_request_by_cache_key(self, key: str) -> Union[AnalysisRequest, None]:
assert isinstance(key, str)
async with self.get_db() as db:
result = (
await db.execute(select(AnalysisRequestTracking).where(AnalysisRequestTracking.cache_key == key))
).one_or_none()
if result is None:
return None
return AnalysisRequest.from_dict(json.loads(result[0].json_data), self)
async def i_process_expired_analysis_requests(self, amt: AnalysisModuleType) -> int:
assert isinstance(amt, AnalysisModuleType)
async with self.get_db() as db:
for db_request in await db.execute(
select(AnalysisRequestTracking).where(
and_(
AnalysisRequestTracking.analysis_module_type == amt.name,
datetime.datetime.now() > AnalysisRequestTracking.expiration_date,
)
)
):
request = AnalysisRequest.from_json(db_request[0].json_data, self)
await self.fire_event(EVENT_AR_EXPIRED, request)
try:
await self.queue_analysis_request(request)
except UnknownAnalysisModuleTypeError:
self.delete_analysis_request(request)
| 2.046875 | 2 |
src/selection/AttributeSelector.py | lyonva/Nue | 0 | 12789447 | <gh_stars>0
from utils import ps
class AttributeSelector(ps):
def __init__(self, name, as_class, parameters):
self.name = name
self.as_class = as_class
self.parameters = parameters
| 2.328125 | 2 |
model/attack.py | Michaeltaoma/SecureCovid | 1 | 12789448 | <reponame>Michaeltaoma/SecureCovid<filename>model/attack.py
import torch.nn as nn
class AttackModel(nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(AttackModel, self).__init__()
# Number of input features is 12.
self.layer_1 = nn.Linear(n_feature, n_hidden)
self.layer_2 = nn.Linear(n_hidden, n_hidden)
self.layer_out = nn.Linear(n_hidden, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.1)
self.batchnorm1 = nn.BatchNorm1d(n_hidden)
self.batchnorm2 = nn.BatchNorm1d(n_hidden)
def forward(self, x):
x = self.relu(self.layer_1(x))
x = self.batchnorm1(x)
x = self.relu(self.layer_2(x))
x = self.batchnorm2(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
| 2.828125 | 3 |
third_party/buildbot_slave_8_4/buildslave/commands/git.py | bopopescu/build | 0 | 12789449 | <reponame>bopopescu/build<filename>third_party/buildbot_slave_8_4/buildslave/commands/git.py
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from twisted.internet import defer
from buildslave.commands.base import SourceBaseCommand
from buildslave import runprocess
from buildslave.commands.base import AbandonChain
class Git(SourceBaseCommand):
"""Git specific VC operation. In addition to the arguments
handled by SourceBaseCommand, this command reads the following keys:
['repourl'] (required): the upstream GIT repository string
['branch'] (optional): which version (i.e. branch or tag)
to retrieve. Default: "master".
['submodules'] (optional): whether to initialize and update
submodules. Default: False.
['ignore_ignores'] (optional): ignore ignores when purging changes
(default true)
['reference'] (optional): use this reference repository
to fetch objects.
['gerrit_branch'] (optional): which virtual branch to retrieve.
['progress'] (optional): have git output progress markers,
avoiding timeouts for long fetches;
requires Git 1.7.2 or later.
['shallow'] (optional): if true, use shallow clones that do not
also fetch history
"""
header = "git operation"
def setup(self, args):
SourceBaseCommand.setup(self, args)
self.repourl = args['repourl']
self.branch = args.get('branch')
if not self.branch:
self.branch = "master"
self.sourcedata = "%s %s\n" % (self.repourl, self.branch)
self.submodules = args.get('submodules')
self.ignore_ignores = args.get('ignore_ignores', True)
self.reference = args.get('reference', None)
self.gerrit_branch = args.get('gerrit_branch', None)
def _fullSrcdir(self):
return os.path.join(self.builder.basedir, self.srcdir)
def sourcedirIsUpdateable(self):
return os.path.isdir(os.path.join(self._fullSrcdir(), ".git"))
def _dovccmd(self, command, cb=None, **kwargs):
git = self.getCommand("git")
c = runprocess.RunProcess(self.builder, [git] + command, self._fullSrcdir(),
sendRC=False, timeout=self.timeout,
maxTime=self.maxTime, usePTY=False, **kwargs)
self.command = c
d = c.start()
if cb:
d.addCallback(self._abandonOnFailure)
d.addCallback(cb)
return d
def sourcedataMatches(self):
# If the repourl matches the sourcedata file, then we can say that the
# sourcedata matches. We can ignore branch changes, since Git can work
# with many branches fetched, and we deal with it properly in
# doVCUpdate. So, basically, as long as the file exists, consider it
# to match
try:
self.readSourcedata()
except IOError:
return False
return True
def _cleanSubmodules(self, res):
command = ['submodule', 'foreach', 'git', 'clean', '-f', '-d']
if self.ignore_ignores:
command.append('-x')
return self._dovccmd(command)
def _updateSubmodules(self, res):
return self._dovccmd(['submodule', 'update'], self._cleanSubmodules)
def _initSubmodules(self, res):
if self.submodules:
return self._dovccmd(['submodule', 'init'], self._updateSubmodules)
else:
return defer.succeed(0)
def _didHeadCheckout(self, res):
# Rename branch, so that the repo will have the expected branch name
# For further information about this, see the commit message
command = ['branch', '-M', self.branch]
return self._dovccmd(command, self._initSubmodules)
def _didFetch(self, res):
if self.revision:
head = self.revision
else:
head = 'FETCH_HEAD'
# That is not sufficient. git will leave unversioned files and empty
# directories. Clean them up manually in _didReset.
command = ['reset', '--hard', head]
return self._dovccmd(command, self._didHeadCheckout)
def maybeNotDoVCFallback(self, res):
# If we were unable to find the branch/SHA on the remote,
# clobbering the repo won't help any, so just abort the chain
if hasattr(self.command, 'stderr'):
if "Couldn't find remote ref" in self.command.stderr:
raise AbandonChain(-1)
# Update first runs "git clean", removing local changes,
# if the branch to be checked out has changed. This, combined
# with the later "git reset" equates clobbering the repo,
# but it's much more efficient.
def doVCUpdate(self):
try:
# Check to see if our branch has changed
diffbranch = self.sourcedata != self.readSourcedata()
except IOError:
diffbranch = False
if diffbranch:
command = ['clean', '-f', '-d']
if self.ignore_ignores:
command.append('-x')
return self._dovccmd(command, self._didClean)
return self._didClean(None)
def _doFetch(self, dummy, branch):
# The plus will make sure the repo is moved to the branch's
# head even if it is not a simple "fast-forward"
command = ['fetch', '-t', self.repourl, '+%s' % branch]
# If the 'progress' option is set, tell git fetch to output
# progress information to the log. This can solve issues with
# long fetches killed due to lack of output, but only works
# with Git 1.7.2 or later.
if self.args.get('progress'):
command.append('--progress')
self.sendStatus({"header": "fetching branch %s from %s\n"
% (branch, self.repourl)})
return self._dovccmd(command, self._didFetch, keepStderr=True)
def _didClean(self, dummy):
branch = self.gerrit_branch or self.branch
# After a clean, try to use the given revision if we have one.
if self.revision:
# We know what revision we want. See if we have it.
d = self._dovccmd(['reset', '--hard', self.revision],
self._initSubmodules)
# If we are unable to reset to the specified version, we
# must do a fetch first and retry.
d.addErrback(self._doFetch, branch)
return d
else:
# No known revision, go grab the latest.
return self._doFetch(None, branch)
def _didInit(self, res):
# If we have a reference repository specified, we need to also set that
# up after the 'git init'.
if self.reference:
git_alts_path = os.path.join(self._fullSrcdir(), '.git', 'objects', 'info', 'alternates')
git_alts_content = os.path.join(self.reference, 'objects')
self.setFileContents(git_alts_path, git_alts_content)
return self.doVCUpdate()
def doVCFull(self):
git = self.getCommand("git")
# If they didn't ask for a specific revision, we can get away with a
# shallow clone.
if not self.args.get('revision') and self.args.get('shallow'):
cmd = [git, 'clone', '--depth', '1']
# If we have a reference repository, pass it to the clone command
if self.reference:
cmd.extend(['--reference', self.reference])
cmd.extend([self.repourl, self._fullSrcdir()])
c = runprocess.RunProcess(self.builder, cmd, self.builder.basedir,
sendRC=False, timeout=self.timeout,
maxTime=self.maxTime, usePTY=False)
self.command = c
cmdexec = c.start()
cmdexec.addCallback(self._didInit)
return cmdexec
else:
os.makedirs(self._fullSrcdir())
return self._dovccmd(['init'], self._didInit)
def parseGotRevision(self):
command = ['rev-parse', 'HEAD']
def _parse(res):
hash = self.command.stdout.strip()
if len(hash) != 40:
return None
return hash
return self._dovccmd(command, _parse, keepStdout=True)
| 1.84375 | 2 |
tests/test_backend_sql.py | savi-dev/keystone | 0 | 12789450 | <filename>tests/test_backend_sql.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone import catalog
from keystone.catalog.backends import sql as catalog_sql
from keystone.common.sql import util as sql_util
from keystone import config
from keystone import exception
from keystone.identity.backends import sql as identity_sql
from keystone import test
from keystone.token.backends import sql as token_sql
import default_fixtures
import test_backend
CONF = config.CONF
class SqlIdentity(test.TestCase, test_backend.IdentityTests):
def setUp(self):
super(SqlIdentity, self).setUp()
self.config([test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')])
sql_util.setup_test_database()
self.identity_api = identity_sql.Identity()
self.load_fixtures(default_fixtures)
def test_delete_user_with_tenant_association(self):
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex}
self.identity_api.create_user(user['id'], user)
self.identity_api.add_user_to_tenant(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_tenants_for_user,
user['id'])
def test_create_null_user_name(self):
user = {'id': uuid.uuid4().hex,
'name': None,
'password': uuid.uuid4().hex}
self.assertRaises(exception.ValidationError,
self.identity_api.create_user,
user['id'],
user)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user_by_name,
user['name'])
def test_create_null_tenant_name(self):
tenant = {'id': uuid.uuid4().hex,
'name': None}
self.assertRaises(exception.ValidationError,
self.identity_api.create_tenant,
tenant['id'],
tenant)
self.assertRaises(exception.TenantNotFound,
self.identity_api.get_tenant,
tenant['id'])
self.assertRaises(exception.TenantNotFound,
self.identity_api.get_tenant_by_name,
tenant['name'])
def test_create_null_role_name(self):
role = {'id': uuid.uuid4().hex,
'name': None}
self.assertRaises(exception.Conflict,
self.identity_api.create_role,
role['id'],
role)
self.assertRaises(exception.RoleNotFound,
self.identity_api.get_role,
role['id'])
def test_delete_tenant_with_user_association(self):
user = {'id': 'fake',
'name': 'fakeuser',
'password': '<PASSWORD>'}
self.identity_api.create_user('fake', user)
self.identity_api.add_user_to_tenant(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_tenant(self.tenant_bar['id'])
tenants = self.identity_api.get_tenants_for_user(user['id'])
self.assertEquals(tenants, [])
def test_delete_user_with_metadata(self):
user = {'id': 'fake',
'name': 'fakeuser',
'password': '<PASSWORD>'}
self.identity_api.create_user('fake', user)
self.identity_api.create_metadata(user['id'],
self.tenant_bar['id'],
{'extra': 'extra'})
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.MetadataNotFound,
self.identity_api.get_metadata,
user['id'],
self.tenant_bar['id'])
def test_delete_tenant_with_metadata(self):
user = {'id': 'fake',
'name': 'fakeuser',
'password': '<PASSWORD>'}
self.identity_api.create_user('fake', user)
self.identity_api.create_metadata(user['id'],
self.tenant_bar['id'],
{'extra': 'extra'})
self.identity_api.delete_tenant(self.tenant_bar['id'])
self.assertRaises(exception.MetadataNotFound,
self.identity_api.get_metadata,
user['id'],
self.tenant_bar['id'])
class SqlToken(test.TestCase, test_backend.TokenTests):
def setUp(self):
super(SqlToken, self).setUp()
self.config([test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')])
sql_util.setup_test_database()
self.token_api = token_sql.Token()
class SqlCatalog(test.TestCase, test_backend.CatalogTests):
def setUp(self):
super(SqlCatalog, self).setUp()
self.config([test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')])
sql_util.setup_test_database()
self.catalog_api = catalog_sql.Catalog()
self.catalog_man = catalog.Manager()
self.load_fixtures(default_fixtures)
| 1.8125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.