id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3227393
|
<filename>player/python/songs.py
# Song format: Note, octave, length
# C major scale
song1_tempo = 220
song1 = [
["Cn", 2, 1],
["Dn", 2, 1],
["En", 2, 1],
["Fn", 2, 1],
["Gn", 2, 1],
["An", 2, 1],
["Bn", 2, 1],
["Cn", 3, 1],
["Bn", 2, 1],
["An", 2, 1],
["Gn", 2, 1],
["Fn", 2, 1],
["En", 2, 1],
["Dn", 2, 1],
["Cn", 2, 1]
]
# Imperial March
song2_tempo = 104 * 8
song2 = [
["Gn", 1, 8 ],
["Gn", 1, 8 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ],
["Dn", 2, 8 ],
["Dn", 2, 8 ],
["Dn", 2, 8 ],
["Ef", 2, 6 ],
["Bf", 1, 2 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ],
["Gn", 2, 8 ],
["Gn", 1, 6 ],
["Gn", 1, 2 ],
["Gn", 2, 8 ],
["Gf", 2, 6 ],
["Fn", 2, 2 ],
["En", 2, 2 ],
["Ds", 2, 2 ],
["En", 2, 4 ],
["Zz", 0, 4 ],
["Gs", 1, 4 ],
["Cs", 2, 8 ],
["Bs", 2, 6 ],
["Bn", 1, 2 ],
["Bf", 1, 2 ],
["An", 1, 2 ],
["Bf", 1, 4 ],
["Zz", 0, 4 ],
["Ef", 1, 4 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Gf", 1, 2 ],
["Bf", 1, 8 ],
["Gn", 1, 6 ],
["Bf", 1, 2 ],
["Dn", 2, 16 ],
["Gn", 2, 8 ],
["Gn", 1, 6 ],
["Gn", 1, 2 ],
["Gn", 2, 8 ],
["Gf", 2, 6 ],
["Fn", 2, 2 ],
["En", 2, 2 ],
["Ds", 2, 2 ],
["En", 2, 4 ],
["Zz", 0, 4 ],
["Gs", 1, 4 ],
["Cs", 2, 8 ],
["Bs", 2, 6 ],
["Bn", 1, 2 ],
["Bf", 1, 2 ],
["An", 1, 2 ],
["Bf", 1, 4 ],
["Zz", 0, 4 ],
["Ef", 1, 4 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ]
]
# Metal Crusher
song3_tempo = 115 * 4
song3 = [
["Ef", 3, 3 ], # Bar 1
["Ef", 3, 1 ],
["Ef", 3, 2 ],
["Ef", 3, 2 ],
["Bn", 2, 3 ],
["Bn", 2, 1 ],
["Ef", 1, 4 ],
["Ef", 1, 3 ],
["Ef", 1, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 2 ],
["Af", 2, 8 ], # End of intro
["Ef", 2, 1 ],
["En", 2, 1 ],
["Ef", 2, 1 ],
["Dn", 2, 1 ],
["Ef", 2, 2 ],
["Dn", 2, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Gn", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Bn", 2, 1 ],
["Df", 2, 2 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Af", 2, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 3, 2 ],
["Ef", 2, 1 ], # Repeat
["En", 2, 1 ],
["Ef", 2, 1 ],
["Dn", 2, 1 ],
["Ef", 2, 2 ],
["Dn", 2, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Gn", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Bn", 2, 1 ],
["Df", 2, 2 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Af", 2, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 3, 2 ]
]
song4_tempo = 135*2
song4 = [
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["An", 1, 1],
["Af", 1, 1],
["Gf", 1, 1],
["Fn", 1, 2],
["Zz", 0, 1],
["Fn", 1, 2],
["Fn", 1, 1],
["Fn", 1, 2],
["Dn", 1, 2],
["Zz", 0, 1],
["Dn", 1, 2],
["Dn", 1, 1],
["En", 1, 1],
["Fn", 1, 1],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["An", 1, 1],
["Af", 1, 1],
["Gf", 1, 1],
["Fn", 1, 2],
["Zz", 0, 1],
["Fn", 1, 2],
["Fn", 1, 1],
["Fn", 1, 2],
["Dn", 1, 2],
["Zz", 0, 1],
["Dn", 1, 2],
["Dn", 1, 1],
["En", 1, 1],
["Fn", 1, 1],
# Part 2
["Gf", 2, 2],
["An", 2, 2],
["Gf", 2, 2],
["Df", 2, 2],
["Df", 2, 2],
["Df", 2, 1],
["Gf", 1, 3],
["Df", 2, 1],
["Df", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 1],
["Cn", 2, 1],
["An", 1, 2],
["An", 1, 2],
["Dn", 2, 2],
["En", 2, 1],
["Fn", 2, 1],
["Gf", 2, 2],
["An", 2, 2],
["Gf", 2, 2],
["Df", 2, 2],
["Df", 2, 2],
["Df", 2, 1],
["Gf", 1, 3],
["Df", 2, 1],
["Df", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 1],
["Cn", 2, 1],
["An", 1, 1],
["An", 1, 1],
["An", 1, 1],
["An", 1, 1],
["Gf", 1, 4],
]
song5_tempo = 135*2
song5 = [
["Gf", 1, 2],
["An", 1, 1],
["An", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["An", 1, 1],
["An", 1, 2],
["Gf", 1, 1],
["An", 1, 1],
["Bn", 1, 1],
["Fn", 1, 2],
["Af", 1, 1],
["Af", 1, 2],
["Fn", 1, 1],
["Af", 1, 2],
["Dn", 1, 2],
["Gf", 1, 1],
["Gf", 1, 2],
["Dn", 1, 1],
["An", 1, 1],
["Af", 1, 1],
]
# song5a_tempo = 160*2
# song5a = [
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ]
cantina_tempo = 132*4
cantina = [
# Part 1
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
# Part 2
["Fs", 1, 1],
["Fn", 1, 1],
["Fs", 1, 1],
["En", 1, 1],
["Zz", 1, 1],
["Ef", 1, 1],
["En", 1, 1],
["Ef", 1, 1],
["Dn", 1, 3],
["Bn", 0, 5],
# Part 3
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
["En", 1, 2],
["En", 1, 3],
["Ef", 1, 1],
["En", 1, 2],
["An", 1, 1],
["Gn", 1, 2],
["Fs", 1, 2],
["En", 1, 3],
# Part 4
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
["An", 1, 2],
["An", 1, 3],
["Fs", 1, 1],
["En", 1, 2],
["Dn", 1, 3],
["Bn", 0, 5],
# Leadup
["Bn", 0, 4],
["Dn", 1, 4],
["Fs", 1, 4],
["An", 1, 4],
["Cn", 2, 2],
["Bn", 1, 2],
["Fn", 1, 1],
["Fs", 1, 2],
["Dn", 1, 6],
["Zz", 1, 4]
]
|
StarcoderdataPython
|
1630563
|
# -*- coding: utf-8 -*-
"""
website.tcp.services
~~~~~~~~~~~~~~~~
TCP Proxy services api.
"""
import os
import sys
import subprocess
from flask import flash, current_app
from flask.ext.babel import gettext
from website.services import exec_command
from website import db
from website.tcp.models import Connection
from sqlalchemy import and_
iptables_command_prerouting = 'iptables -t nat {command} PREROUTING -p tcp --dport {local_port} -j DNAT --to-destination {dest_ip}:{dest_port};'
iptables_command_postrouting = 'iptables -t nat {command} POSTROUTING -d {dest_ip} -p tcp --dport {dest_port} -j MASQUERADE'
def get_connections():
connections = Connection.query.all()
return connections
def create_connection(local_port, dest_ip, dest_port):
# 查看数据库中是否已经存在
con = Connection.query.filter(and_(Connection.local_port == local_port, Connection.dest_ip == dest_ip, Connection.dest_port == dest_port)).first()
if con is None:
add_command_prerouting = iptables_command_prerouting.format(command='-A', local_port=local_port, dest_ip=dest_ip, dest_port=dest_port)
add_command_postrouting = iptables_command_postrouting.format(command='-A', local_port=local_port, dest_ip=dest_ip, dest_port=dest_port)
execute_command(add_command_prerouting)
execute_command(add_command_postrouting)
connection = Connection(dest_ip, dest_port, local_port)
db.session.add(connection)
db.session.commit()
return connection
def delete_connection(connection_id):
connection = Connection.query.get(connection_id)
if connection is not None:
del_command_prerouting = iptables_command_prerouting.format(command='-D', local_port=connection.local_port, dest_ip=connection.dest_ip, dest_port=connection.dest_port)
del_command_postrouting = iptables_command_postrouting.format(command='-D', local_port=connection.local_port, dest_ip=connection.dest_ip, dest_port=connection.dest_port)
execute_command(del_command_prerouting)
execute_command(del_command_postrouting)
Connection.query.filter(Connection.id == connection_id).delete()
db.session.commit()
def execute_command(command):
return subprocess.Popen(command.split())
def _iptables_get_dnat_rules(message=True):
cmd = ['iptables', '-t', 'nat', '--list-rules']
try:
r = exec_command(cmd)
except:
current_app.logger.error('[SNAT]: exec_command error: %s:%s', cmd,
sys.exc_info()[1])
if message:
flash(gettext('iptables crashed, please contact your system admin.'), 'alert')
return False
if r['return_code'] != 0:
current_app.logger.error('[SNAT]: exec_command return: %s:%s:%s', cmd,
r['return_code'], r['stderr'])
if message:
message = gettext("get rules failed: %(err)s", err=r['stderr'])
flash(message, 'alert')
return False
rules = []
for item in r['stdout'].split('\n'):
if '-j DNAT' in item:
t = item.split()
if '--dport' in t and '--to-destination' in t:
p = t[t.index('--to-destination')+1]
address_port = p.split(':')
if len(address_port) == 2:
rules.append((t[t.index('--dport')+1], address_port[0], address_port[1]))
return rules
def ensure_iptables():
rules_iptable = _iptables_get_dnat_rules()
connections = Connection.query.all()
rules_database = []
for connection in connections:
_connection = (str(connection.local_port), connection.dest_ip, str(connection.dest_port))
rules_database.append(_connection)
# 根据数据库内容调整iptables中的值
for i in rules_database:
if i in rules_iptable:
rules_iptable.remove(i)
else:
add_command_prerouting = iptables_command_prerouting.format(command='-A', local_port=i[0], dest_ip=i[1], dest_port=i[2])
add_command_postrouting = iptables_command_postrouting.format(command='-A', local_port=i[0], dest_ip=i[1], dest_port=i[2])
execute_command(add_command_prerouting)
execute_command(add_command_postrouting)
# 去除iptables中存在但数据库中不存在的数据
for i in rules_iptable:
del_command_prerouting = iptables_command_prerouting.format(command='-D', local_port=i[0], dest_ip=i[1], dest_port=i[2])
del_command_postrouting = iptables_command_postrouting.format(command='-D', local_port=i[0], dest_ip=i[1], dest_port=i[2])
execute_command(del_command_prerouting)
execute_command(del_command_postrouting)
def reset_iptables():
rules_iptable = _iptables_get_dnat_rules()
connections = Connection.query.all()
rules_database = []
for connection in connections:
_connection = (str(connection.local_port), connection.dest_ip, str(connection.dest_port))
rules_database.append(_connection)
# 根据iptables在数据库中内容增加相应数据
for i in rules_iptable:
if i in rules_database:
rules_database.remove(i)
else:
connection = Connection(i[1], i[2], i[0])
db.session.add(connection)
db.session.commit()
# 去除数据库中多余数据
for i in rules_database:
Connection.query.filter_by(local_port=i[0]).filter_by(dest_ip=i[1]).filter_by(dest_port=i[2]).delete()
db.session.commit()
|
StarcoderdataPython
|
1775530
|
<reponame>arterial-io/mesh
from scheme import *
from mesh.standard import *
class Example(Resource):
name = 'example'
version = 1
endpoints = 'create delete get put query update'
class schema:
required = Text(required=True, nonnull=True, sortable=True,
operators=['eq', 'ne', 'pre', 'suf', 'cnt'])
deferred = Text(deferred=True)
default = Integer(default=1)
constrained = Integer(minimum=2, maximum=4)
readonly = Integer(readonly=True)
boolean = Boolean()
integer = Integer(sortable=True, operators=['eq', 'in', 'gte', 'gt', 'lte', 'lt'])
class ExampleController(Controller):
resource = Example
version = (1, 0)
ExampleBundle = Bundle('examples',
mount(Example, ExampleController),
)
|
StarcoderdataPython
|
3240428
|
<reponame>nerdfiles/jahjah_works<filename>mini_charge/views.py
# -*- coding: utf-8 -*-
from django.template.context import RequestContext
from django.shortcuts import render_to_response
from django.views.decorators.http import require_POST
from payments import models
import stripe
def _ajax_response(request, template, **kwargs):
response = {
"html": render_to_string(
template,
RequestContext(request, kwargs)
)
}
if "location" in kwargs:
response.update({"location": kwargs["location"]})
return HttpResponse(json.dumps(response), content_type="application/json")
def charge(request, form_class=ChargeForm):
data = {}
form = form_class(request.POST)
if form.is_valid():
try:
try:
customer = request.user.customer
except ObjectDoesNotExist:
customer = Customer.create(request.user)
except stripe.StripeError as e:
data['form'] = form
try:
data['error'] = e.args[0]
except IndexError:
data['error'] = 'Unknown error'
else:
data['error'] = form.errors
data['form'] = form
return _ajax_response(request, "mini_charge/_charge_form.html", **data)
|
StarcoderdataPython
|
1628479
|
<filename>lib/bindings/samples/server/test/test_algorithm_scheduler.py<gh_stars>100-1000
import unittest
from algorithm.algorithm_runner import AlgorithmRunner
from project_manager import ProjectManager
from algorithm.algorithm_scheduler import AlgorithmScheduler
from utils.settings_manager import SETTINGS
TEST_PTV_FILEPATH = "./test_data/procedural-4K-nvenc.ptv"
class TestAlgorithmScheduler(unittest.TestCase):
def setUp(self):
SETTINGS.current_audio_source = ProjectManager.AUDIO_NOAUDIO
self.project_manager = ProjectManager(TEST_PTV_FILEPATH)
self.stitcher_controller = self.project_manager.create_controller()
self.algorithm = AlgorithmRunner("testAlgo")
self.scheduler = AlgorithmScheduler()
def test_get_runner_no_scheduled(self):
"""
Test that when no algorithm scheduled - None is returned
"""
self.assertIsNone(self.scheduler.get_next_algorithm())
def test_get_algo_when_scheduled(self):
self.scheduler.schedule(self.algorithm)
algorithm = self.scheduler.get_next_algorithm()
self.assertEqual(algorithm, self.algorithm)
# def test_get_algo_when_scheduled_with_delay(self):
# self.scheduler.schedule(self.algorithm, 0.001)
# time.sleep(0.1)
# algorithm = self.scheduler.get_next_algorithm()
# self.assertEqual(algorithm, self.algorithm)
# As tornado is not running delay won't work -_-
def test_unique_schedule(self):
self.scheduler.schedule(self.algorithm)
self.scheduler.schedule(self.algorithm)
algorithm = self.scheduler.get_next_algorithm()
self.assertEqual(algorithm, self.algorithm)
self.assertIsNone(self.scheduler.get_next_algorithm())
def test_reschedule(self):
self.algorithm.repeat = True
self.scheduler.reschedule(self.algorithm)
algorithm = self.scheduler.get_next_algorithm()
self.assertEqual(algorithm, self.algorithm)
|
StarcoderdataPython
|
3337392
|
<filename>Library/settings.py<gh_stars>0
"""
Django settings for Library project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import datetime
# from Library.local_settings import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
PRODUCTION = (os.getenv('PRODUCTION', False) == 'True')
DOKCER = (os.getenv('DOCKER', False) == 'True')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'django_nose',
# 'haystack',
'rest_framework',
'rest_framework_swagger',
'api',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'Library.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Library.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if not DOKCER:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'library',
'HOST': 'db',
'PORT': '3306',
'USER': 'dbadmin',
'PASSWORD': '<PASSWORD>',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_URL = '/admin/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-Hant'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = True
FILE_CHARSET = 'utf-8'
DEFAULT_CHARSET = 'utf-8'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), ]
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
# REST FRAMEWORK SETTING
AUTH_USER_MODEL = 'api.CustomUser'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DATETIME_FORMAT': '%Y-%m-%d %H:%M',
'DATETIME_INPUT_FORMATS': '%Y-%m-%d %H:%M',
'DATE_FORMAT': '%Y-%m-%d',
'DATE_INPUT_FORMATS': ['%Y-%m-%d', ],
'TIME_FORMAT': '%H:%M',
'TIME_INPUT_FORMATS': '%H:%M',
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# JWT Setting
JWT_AUTH = {
'JWT_ENCODE_HANDLER':
'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER':
'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER':
'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_response_payload_handler',
'JWT_SECRET_KEY': SECRET_KEY,
'JWT_PUBLIC_KEY': None,
'JWT_PRIVATE_KEY': None,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1),
'JWT_AUDIENCE': None,
'JWT_ISSUER': None,
'JWT_ALLOW_REFRESH': True,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=30),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
## SWAGGER_SETTING
SWAGGER_SETTINGS = {
'USE_SESSION_AUTH': True,
'SECURITY_DEFINITIONS': {
'basic': {
'type': 'basic'
},
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
},
},
'DOC_EXPANSION': 'list',
"JSON_EDITOR": True,
"APIS_SORTER": "alpha",
"SHOW_REQUEST_HEADERS": True,
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--nocapture',
'--nologcapture',
]
# if PRODUCTION:
# HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
# 'URL': 'http://127.0.0.1:9200/',
# 'INDEX_NAME': 'haystack',
# },
# }
# EMAIL_USE_TLS = True
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_HOST = 'smtp.gmail.com'
#
# EMAIL_PORT = 587
# DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
|
StarcoderdataPython
|
3224155
|
<filename>Basic Programs/tut_10.py<gh_stars>0
# Dictionary is nothing but key value pairs
d1 = {"Ahtisham":"Beaf", "Waleed": "Burger", "Shakir": {"B":"breakfast", "L":"lunch", "D":"Dinner"}, "Naveeda":"Roti"}
print(d1["Ahtisham"])
d1["Ayesha"]= "choclate"
print(d1["Ayesha"])
del d1["Ayesha"]
print(d1)
d2 = d1.copy()
print(d2)
print(d1.keys())
print(d1.values())
d2.update({"Fati": "Rice"})
print(d2)
|
StarcoderdataPython
|
142023
|
from typing import Dict
import numpy as np
class ZDataProcessor:
def __init__(self):
self.source_to_index = {
'acoustic': 0,
'electronic': 1,
'synthetic': 2
}
self.quality_to_index = {
'bright': 0,
'dark': 1,
'distortion': 2,
'fast_decay': 3,
'long_release': 4,
'multiphonic': 5,
'nonlinear_env': 6,
'percussive': 7,
'reverb': 8,
'tempo_sync': 9
}
self.pitch = 60
def process(self, inputs: Dict) -> Dict:
velocity = inputs.get('velocity') or 75
pitch = inputs.get('velocity') or 60
source = inputs.get('source') or 'acoustic'
qualities = inputs.get('qualities') or []
latent_sample = inputs.get('latent_sample') or [0.] * 16
self.pitch = pitch # storing the midi pitch value before normalizing
velocity = np.expand_dims([velocity / 127.], axis=0).astype('float32')
pitch = np.expand_dims([pitch / 127.], axis=0).astype('float32')
source = np.expand_dims([self.source_to_index[source] / 2.], axis=0).astype('float32')
latent_sample = np.expand_dims(latent_sample, axis=0).astype('float32')
qualities_one_hot = np.zeros((1, 10))
for _, q in enumerate(qualities):
qualities_one_hot[0, self.quality_to_index[q]] = 1.
return {
'velocity': velocity,
'pitch': pitch,
'instrument_source': source,
'qualities': qualities_one_hot.astype('float32'),
'z_input': latent_sample
}
|
StarcoderdataPython
|
1647065
|
import gevent
from locust.env import Environment
from locust.stats import stats_printer, stats_history
from locust.log import setup_logging
setup_logging("INFO", None)
def create_env(user_class, ip_address="127.0.0.1"):
env = Environment(user_classes=[user_class])
env.create_local_runner()
env.create_web_ui(ip_address, 8089)
gevent.spawn(stats_printer(env.stats))
gevent.spawn(stats_history, env.runner)
return env
def start_test(env, user_count=1, spawn_rate=10, test_time=60):
env.runner.start(user_count, spawn_rate=spawn_rate)
gevent.spawn_later(test_time, lambda: env.runner.quit())
env.runner.greenlet.join()
env.web_ui.stop()
|
StarcoderdataPython
|
1606540
|
<reponame>ztfmars/OpenCV_Tutorial
# -*- coding: utf-8 -*-
import cv2
a=cv2.imread(r"../image/lena256.bmp",cv2.IMREAD_UNCHANGED)
b=cv2.cvtColor(a,cv2.COLOR_GRAY2BGR)
bb,bg,br=cv2.split(b)
cv2.imshow("bb",bb)
cv2.imshow("bg",bg)
cv2.imshow("br",br)
cv2.waitKey()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
123471
|
<gh_stars>1-10
import functools
import logging
import time
import traceback
def debug_func(func, _cls=None): # pragma: no cover
"""
Decorator: applies set of debug features (such as logging and performance counter) to a function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
name = func.__name__ if _cls is None else f"{_cls.__name__}::{func.__name__}"
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.debug(f"START {name}")
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
log.debug(f"END {name}. Finished in {run_time:.4f} secs")
return value
except Exception as ex:
log.error("=============================================")
log.error("\n\n" + traceback.format_exc())
log.error("=============================================")
raise ex
return wrapper
def debug_class(_cls=None): # pragma: no cover
"""
Decorator: applies set of debug features (such as logging and performance counter) to a all methods of a class
"""
def wrap(cls):
for attr in cls.__dict__:
if callable(getattr(cls, attr)):
setattr(cls, attr, debug_func(getattr(cls, attr), cls))
return cls
if _cls is None:
return wrap
return wrap(_cls)
|
StarcoderdataPython
|
4810040
|
# -*- coding: utf-8 -*-
"""English language translation
.. module:: client.plugins.gitclient.translation.en.messages
:platform: Windows, Unix
:synopsis: English language translation
.. moduleauthor:: <NAME> <<EMAIL>>
"""
language = {
'name': 'English',
'ISO-639-1': 'en'
}
msg = {
'htk_gitclient_menu' : "Git client",
'htk_gitclient_menu_clone' : "Clone repository",
'htk_gitclient_menu_repomanager' : "Repository manager",
'htk_gitclient_mandatory_field' : "{0} is mandatory field",
'htk_gitclient_clone_title' : "Clone repository",
'htk_gitclient_clone_url' : "Repository URL",
'htk_gitclient_clone_user' : "Username",
'htk_gitclient_clone_password' : "Password",
'htk_gitclient_clone_dirpath' : "Directory",
'htk_gitclient_clone_button' : "Clone",
'htk_gitclient_clone_start' : "Cloning Git repository from {0}",
'htk_gitclient_clone_finish' : "Repository was cloned",
'htk_gitclient_clone_project_exist' : "Project {0} already exists",
'htk_gitclient_repomanager_title' : "Repository manager",
'htk_gitclient_repomanager_config_title' : "Configuration",
'htk_gitclient_repomanager_config_url' : "URL",
'htk_gitclient_repomanager_config_user' : "Username",
'htk_gitclient_repomanager_config_password' : "Password",
'htk_gitclient_repomanager_config_name' : "Name",
'htk_gitclient_repomanager_config_email' : "Email",
'htk_gitclient_repomanager_config_save' : "Save",
'htk_gitclient_repomanager_config_saved' : "Git configuration for project {0} was saved",
'htk_gitclient_repomanager_commit_title' : "Commit",
'htk_gitclient_repomanager_commit_message' : "Message",
'htk_gitclient_repomanager_commit_author' : "Author",
'htk_gitclient_repomanager_commit_push' : "Push",
'htk_gitclient_repomanager_commit_commit' : "Commit",
'htk_gitclient_repomanager_commit_files' : "Changed files",
'htk_gitclient_repomanager_commit_select_all' : "Select all",
'htk_gitclient_repomanager_commit_no_files_selected' : "No files were selected for commit",
'htk_gitclient_repomanager_commit_finish' : "Commit for Git repository {0} changes was performed",
'htk_gitclient_repomanager_push' : 'Push',
'htk_gitclient_repomanager_push_start' : 'Pushing {0} Git repository to remote',
'htk_gitclient_repomanager_push_finish' : 'Repository was pushed',
'htk_gitclient_repomanager_pull' : 'Pull',
'htk_gitclient_repomanager_pull_start' : 'Pulling {0} Git repository from remote',
'htk_gitclient_repomanager_pull_finish' : 'Repository was pulled'
}
|
StarcoderdataPython
|
1669241
|
from graphviz import Digraph
colors = ["#ff7675", "#fdcb6e", "#74b9ff", "#a29bfe", "#fd79a8", "#81ecec"]
def get_all_names(data):
result = set(data.keys())
for value in data.values():
result.update(value.keys())
return result
def get_charity_names(data):
return sorted(list(set(get_all_names(data) - data.keys())))
def generate_graph(data, output):
users = sorted(data.keys())
charities = get_charity_names(data)
g = Digraph('G', filename=output)
g.attr(rankdir='LR', overlap='scale', newrank='true', fontname='helvetica')
g.attr('node', fontname='helvetica')
with g.subgraph(name='users') as c:
c.attr(
'node',
shape='square',
style='rounded,filled',
width="0.8",
fixedsize="true",
rankdir='LR',
)
c.graph_attr['rankdir'] = 'LR'
for index,user in enumerate(users):
color = colors[index % len(colors)]
c.node(user, color=color)
with g.subgraph(name='charities') as c:
c.attr('node',
shape='circle',
style='rounded,filled',
width="0.9",
color="#00b894",
fixedsize="true",
rankdir="TB",
)
c.graph_attr['rankdir'] = 'TB'
c.graph_attr['rank'] = 'same'
for index,charity in enumerate(charities):
c.node(charity)
g.attr('edge', arrowhead="diamond", color="#808080", fontname='helvetica')
for user,values in data.items():
for destination,percent in values.items():
g.edge(user, destination, label=f"{percent}%")
return g
|
StarcoderdataPython
|
142087
|
"""
WSGI config for todolist project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
sys.path.append('/django/todolist')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todolist.settings")
application = get_wsgi_application()
"""
path = '/Django/todolist'
if path not in sys.path:
sys.path.insert(0, '/Django/todolist')
os.environ['DJANGO_SETTINGS_MODULE'] = 'todolist_app.settings'
#import django.core.handlers.wsgi #old version use
#application = django.core.handlers.wsgi.WSGIHandler()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
"""
"""
from os.path import join,dirname,abspath
PROJECT_DIR = dirname(dirname(abspath(__file__)))
sys.path.insert(0,PROJECT_DIR)
os.environ["DJANGO_SETTINGS_MODULE"] = "todolist_app.settings"
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
"""
|
StarcoderdataPython
|
1658692
|
"""bc URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from apps.accounts import views
router = DefaultRouter()
router.register('accounts', views.AccountViewSet, base_name='accounts')
router.register('airdrops', views.AirDropViewSet, base_name='airdrops')
router.register('operations', views.OperationViewSet, base_name='operations')
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'', include(router.urls))
]
|
StarcoderdataPython
|
4823845
|
<filename>xcube/core/gen2/generator.py
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import traceback
from abc import ABC, abstractmethod
from typing import Optional, TypeVar, Type
from xcube.core.store import DataStorePool
from xcube.core.store import DataStorePoolLike
from xcube.util.assertions import assert_instance
from xcube.util.assertions import assert_true
from .error import CubeGeneratorError
from .progress import ConsoleProgressObserver
from .remote.config import ServiceConfigLike
from .request import CubeGeneratorRequestLike
from .response import CubeGeneratorResult
from .response import CubeInfoResult
from .response import GenericCubeGeneratorResult
R = TypeVar('R', bound=GenericCubeGeneratorResult)
class CubeGenerator(ABC):
"""
Abstract base class for cube generators.
Use the ``CubeGenerator.load()`` method to instantiate new
cube generators.
"""
@classmethod
def new(cls,
service_config: Optional[ServiceConfigLike] = None,
stores_config: Optional[DataStorePoolLike] = None,
raise_on_error: bool = False,
verbosity: int = 0,
**kwargs) -> 'CubeGenerator':
"""
Create a new cube generator from given configurations.
If *service_config* is given, it describes a remote xcube
generator remote, otherwise a local cube generator is configured
using optional *stores_config*.
The *service_config* parameter can be passed in different ways:
* An instance of :class:ServiceConfig.
* A ``str``. Then it is interpreted as a path to a YAML or JSON file
and the remote configuration is loaded from this file.
The file content may include template variables that are
interpolated by environment variables,
e.g. "${XCUBE_GEN_CLIENT_SECRET}".
* A ``dict``. Then it is interpreted as a remote configuration
JSON object.
If *stores_config* is given, it describes a pool of data stores to be
used as input and output for the cube generator. *stores_config*
if a mapping of store instance identifiers to configured store
instances. A store instance is a dictionary that has a mandatory
"store_id" property which is a name of a registered xcube data store.
as well as an optional "store_params" property that may define data
store specific parameters.
Similar to *service_config*, the *stores_config* parameter
can be passed in different ways:
* An instance of :class:DataStorePool.
* A ``str``. Then it is interpreted as a YAML or JSON file path
and the stores configuration is loaded from this file.
* A ``dict``. Then it is interpreted as a stores configuration
JSON object.
The *service_config* and *stores_config* parameters cannot
be given both.
:param service_config: Service configuration.
:param stores_config: Data stores configuration.
:param raise_on_error: Whether to raise a CubeGeneratorError
exception on generator failures. If False, the default,
the returned result will have the "status" field set to "error"
while other fields such as "message", "traceback", "output"
provide more failure details.
:param verbosity: Level of verbosity, 0 means off.
:param kwargs: Extra arguments passed to the generator constructors.
"""
if service_config is not None:
from .remote.config import ServiceConfig
from .remote.generator import RemoteCubeGenerator
assert_true(stores_config is None,
'service_config and stores_config cannot be'
' given at the same time.')
assert_instance(service_config,
(str, dict, ServiceConfig, type(None)),
'service_config')
service_config = ServiceConfig.normalize(service_config) \
if service_config is not None else None
return RemoteCubeGenerator(service_config=service_config,
raise_on_error=raise_on_error,
verbosity=verbosity,
**kwargs)
else:
from .local.generator import LocalCubeGenerator
assert_instance(stores_config,
(str, dict, DataStorePool, type(None)),
'stores_config')
store_pool = DataStorePool.normalize(stores_config) \
if stores_config is not None else None
return LocalCubeGenerator(store_pool=store_pool,
raise_on_error=raise_on_error,
verbosity=verbosity)
def __init__(self,
raise_on_error: bool = False,
verbosity: int = 0):
self._raise_on_error = raise_on_error
self._verbosity = verbosity
def get_cube_info(self, request: CubeGeneratorRequestLike) \
-> CubeInfoResult:
"""
Get data cube information for given *request*.
The *request* argument can be
* an instance of ``CubeGeneratorRequest``;
* a ``dict``. In this case it is interpreted as JSON object and
parsed into a ``CubeGeneratorRequest``;
* a ``str``. In this case it is interpreted as path to a
YAML or JSON file, which is loaded and
parsed into a ``CubeGeneratorRequest``.
:param request: Cube generator request.
:return: a cube information result
of type :class:CubeInfoResult
:raises CubeGeneratorError: if cube info generation failed
:raises DataStoreError: if data store access failed
"""
try:
result = self._get_cube_info(request)
except CubeGeneratorError as e:
if self._raise_on_error:
raise e
return self._new_cube_generator_error_result(
CubeInfoResult, e
)
if result.status == 'error':
if self._raise_on_error:
raise self._new_generator_error_from_result(result)
return result
def generate_cube(self, request: CubeGeneratorRequestLike) \
-> CubeGeneratorResult:
"""
Generate the data cube for given *request*.
The *request* argument can be
* an instance of ``CubeGeneratorRequest``;
* a ``dict``. In this case it is interpreted as JSON object and
parsed into a ``CubeGeneratorRequest``;
* a ``str``. In this case it is interpreted as path to a
YAML or JSON file, which is loaded and
parsed into a ``CubeGeneratorRequest``.
Returns the cube reference which can be used as ``data_id`` in
``store.open_data(data_id)`` where *store* refers to the
store configured in ``output_config`` of the cube generator request.
:param request: Cube generator request.
:return: the cube generation result
of type :class:CubeGeneratorResult
:raises CubeGeneratorError: if cube generation failed
:raises DataStoreError: if data store access failed
"""
if self._verbosity:
ConsoleProgressObserver().activate()
try:
result = self._generate_cube(request)
except CubeGeneratorError as e:
if self._raise_on_error:
raise e
return self._new_cube_generator_error_result(
CubeGeneratorResult, e
)
finally:
if self._verbosity:
ConsoleProgressObserver().deactivate()
if result.status == 'error':
if self._raise_on_error:
raise self._new_generator_error_from_result(result)
return result
@abstractmethod
def _get_cube_info(self, request: CubeGeneratorRequestLike) \
-> CubeInfoResult:
"""
The implementation of the :meth:`get_cube_info` method
"""
@abstractmethod
def _generate_cube(self, request: CubeGeneratorRequestLike) \
-> CubeGeneratorResult:
"""
The implementation of the :meth:`generate_cube` method
"""
@classmethod
def _new_cube_generator_error_result(
cls,
result_type: Type[R],
e: CubeGeneratorError
) -> R:
tb = e.remote_traceback
if tb is None and e.__traceback__ is not None:
tb = traceback.format_tb(e.__traceback__)
return result_type(status='error',
message=f'{e}',
status_code=e.status_code,
output=e.remote_output,
traceback=tb)
@classmethod
def _new_generator_error_from_result(cls,
result: GenericCubeGeneratorResult):
return CubeGeneratorError(result.message,
status_code=result.status_code,
remote_output=result.output,
remote_traceback=result.traceback)
|
StarcoderdataPython
|
156878
|
<gh_stars>0
from scene_generator import generate_scene, clean_object
def find_object(id, obj_list):
for obj in obj_list:
if obj['id'] == id:
return obj
return None
def test_generate_scene_target_enclosed():
for _ in range(20):
scene = generate_scene('test', 'interaction', False)
metadata = scene['goal']['metadata']
type_list = scene['goal']['type_list']
assert len(type_list) == len(set(type_list))
for target_key in ('target', 'target_1', 'target_2'):
if target_key in metadata:
target_md = metadata[target_key]
target = find_object(target_md['id'], scene['objects'])
if target.get('locationParent', None) is None:
assert 'target_not_enclosed' in type_list
else:
assert 'target_enclosed' in type_list
def test_generate_scene_goal_info():
scene = generate_scene('test', 'interaction', False)
info_list = scene['goal']['info_list']
info_set = set(info_list)
assert len(info_list) == len(info_set)
for obj in scene['objects']:
obj_info_set = set(obj.get('info', []))
assert obj_info_set <= info_set
def test_clean_object():
obj = {
'id': 'thing1',
'dimensions': {
'x': 13,
'z': 42
},
'intphys_option': 'stuff',
'shows': [{
'stepBegin': 0,
'bounding_box': 'dummy'
}]
}
expected = {
'id': 'thing1',
'shows': [{
'stepBegin': 0
}]
}
clean_object(obj)
assert obj == expected
|
StarcoderdataPython
|
3317835
|
import unittest
from scapy.layers.ntp import NTPHeader
from cp1_client import CP1Client
from cp1_helper import generate_address_hash, generate_version_hash
from cp1_package import CP1Package
from cp1_session import CP1Session
from ntp_utils import bit_to_long
from test_constants import KEY_BITS_192
_first_32_bit = '00101111111111111110111010000111'
_last_24_bit = '000000000000000000000000'
_last_30_bit = '000000000000000000000000000000'
_last_32_bit = '00000000000000000000000000000000'
class CP1Tests(unittest.TestCase):
def test_generate_address_hash(self):
# Arrange
nonce = '011101011'
address = '0101'
expected_hash = '010011' # complete hex = 4c2e2f7c4b571674aac9f9d780d601d6
# Act
result_hash = generate_address_hash(nonce, address)
# Assert
self.assertEqual(expected_hash, result_hash)
class CP1ClientTests(unittest.TestCase):
def test_address_and_version_check_true(self):
# Arrange
raw_ntp = CP1Package()
address = '011011'
hash_6bit = generate_address_hash(_first_32_bit, address)[:6]
last_32_bit = _last_24_bit + hash_6bit + generate_version_hash(_first_32_bit, '00')
raw_ntp.set_transmit_timestamp(_first_32_bit + last_32_bit)
raw_ntp = CP1Package(raw_ntp.ntp()) # Create a new raw in order to validate, that the transformation works.
cp1_client = CP1Client(address=address, static_key='')
# Act
result = cp1_client.address_and_version_check(raw_ntp)
# Assert
self.assertTrue(result)
class CP1PackageTests(unittest.TestCase):
def test_hash_nonce(self):
# Arrange
ntp = NTPHeader()
time_value = bit_to_long(_first_32_bit + _last_32_bit)
ntp.sent = time_value
cp1_pck = CP1Package(ntp)
# Act
nonce = cp1_pck.hash_nonce()
# Assert
self.assertEqual(_first_32_bit, nonce)
def test_aes_nonce(self):
# Arrange
ntp = NTPHeader()
ntp.sent = bit_to_long(_first_32_bit + _last_32_bit)
ntp.ref = ntp.sent
cp1_pck = CP1Package(ntp)
# Act
nonce = cp1_pck.aes_nonce_bits()
# Assert
self.assertEqual(_first_32_bit + _first_32_bit, nonce)
class CP1SessionTests(unittest.TestCase):
def test_generate_init_pck_nonce_is_not_null_after_init(self):
# Arrange
session = CP1Session()
# Act
session.generate_init_pck(address='111101')
# Assert
self.assertIsNotNone(session.aes_nonce)
def test_generate_init_pck_nonce_address_and_version_are_hashed(self):
# Arrange
session = CP1Session()
# Act
result = session.generate_init_pck(address='111101')
# Assert
self.assertIsNotNone(session.aes_nonce)
def test_generate_aes_key_key_has_length_32(self):
# Arrange
session = CP1Session()
session.generate_init_pck('1.1.1.1')
# Act
# TODO repair
result_bytes = session.generate_aes_key(KEY_BITS_192)
# Assert
self.assertTrue(len(result_bytes) == 32)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
160248
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from azure.ai.ml._schema.core.fields import NestedField
from marshmallow import post_load
from azure.ai.ml.constants import AutoMLConstants
from azure.ai.ml._schema import StringTransformedEnum
from azure.ai.ml._schema.automl.table_vertical.table_vertical import AutoMLTableVerticalSchema
from azure.ai.ml._schema.automl.training_settings import RegressionTrainingSettingsSchema
from azure.ai.ml._utils.utils import camel_to_snake
from azure.ai.ml._restclient.v2022_02_01_preview.models import (
RegressionPrimaryMetrics,
TaskType,
)
class AutoMLRegressionSchema(AutoMLTableVerticalSchema):
task_type = StringTransformedEnum(
allowed_values=TaskType.REGRESSION,
casing_transform=camel_to_snake,
data_key=AutoMLConstants.TASK_TYPE_YAML,
required=True,
)
primary_metric = StringTransformedEnum(
allowed_values=[o.value for o in RegressionPrimaryMetrics],
casing_transform=camel_to_snake,
load_default=camel_to_snake(RegressionPrimaryMetrics.NORMALIZED_ROOT_MEAN_SQUARED_ERROR),
)
training = NestedField(RegressionTrainingSettingsSchema(), data_key=AutoMLConstants.TRAINING_YAML)
@post_load
def make(self, data, **kwargs) -> "RegressionJob":
from azure.ai.ml.entities._job.automl.tabular import RegressionJob
data.pop("task_type")
loaded_data = data
data_settings = {
"training_data": loaded_data.pop("training_data"),
"target_column_name": loaded_data.pop("target_column_name"),
"weight_column_name": loaded_data.pop("weight_column_name", None),
"validation_data": loaded_data.pop("validation_data", None),
"validation_data_size": loaded_data.pop("validation_data_size", None),
"cv_split_column_names": loaded_data.pop("cv_split_column_names", None),
"n_cross_validations": loaded_data.pop("n_cross_validations", None),
"test_data": loaded_data.pop("test_data", None),
"test_data_size": loaded_data.pop("test_data_size", None),
}
job = RegressionJob(**loaded_data)
job.set_data(**data_settings)
return job
|
StarcoderdataPython
|
3293573
|
<gh_stars>0
from flask_sqlalchemy import SQLAlchemy
from gspackage.flask_sqlalchemy_mysql import config
def init_db(app):
app.config.from_object(config)
db = SQLAlchemy()
db.init_app(app)
return db
|
StarcoderdataPython
|
1748464
|
from typing import Tuple
import gdb
class ASTSelectQueryPrinter:
def __init__(self, val: gdb.Value) -> None:
self.val: gdb.Value = val
def to_string(self) -> str:
eval_string = "info vtbl (*("+str(self.val.type).strip('&')+" *)("+str(self.val.address)+"))"
#example: "info vtbl (*(DB::IAST *)(0x7fff472e9b18))"
type_name=gdb.execute(eval_string, to_string=True).split('\n')[1].split("::")[1]
#eval_string = "DB::queryToString(*("+str(self.val.type).strip('&')+"*)("+str(self.val.address)+"))"
eval_string = "DB::serializeAST(*(DB::"+type_name+" *)("+str(self.val.address)+"), true)"
sql_string=gdb.parse_and_eval(eval_string)
distinct=self.val["distinct"]
group_by_with_totals=self.val["group_by_with_totals"]
group_by_with_rollup=self.val["group_by_with_rollup"]
group_by_with_cube=self.val["group_by_with_cube"]
limit_with_ties=self.val["limit_with_ties"]
positions=self.val["positions"]
#return "type={}, sql={}".format(type_name, sql_string)
return "type={}, sql={}, distinct={}, group_by_with_totals={}, group_by_with_rollup={}, group_by_with_cube={}, limit_with_tie={}, positions={}".format(type_name, sql_string,distinct,group_by_with_totals,group_by_with_rollup,group_by_with_cube,limit_with_ties,positions)
def display_hint(self) -> str:
return "ASTSelectQuery"
|
StarcoderdataPython
|
4830711
|
import os
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
import segmentation_models_v1 as sm
sm.set_framework('tf.keras')
from unet_std import unet # standard unet architecture
from helper_function import plot_deeply_history, plot_history, save_history
from helper_function import precision, recall, f1_score
from sklearn.metrics import confusion_matrix
from helper_function import plot_history_for_callback, save_history_for_callback
def str2bool(value):
return value.lower() == 'true'
def generate_folder(folder_name):
if not os.path.exists(folder_name):
os.system('mkdir -p {}'.format(folder_name))
parser = argparse.ArgumentParser()
parser.add_argument("--docker", type=str2bool, default = True)
parser.add_argument("--gpu", type=str, default = '0')
parser.add_argument("--epoch", type=int, default = 2)
parser.add_argument("--batch", type=int, default = 2)
parser.add_argument("--dataset", type=str, default = 'live_dead')
parser.add_argument("--lr", type=float, default = 1e-3)
parser.add_argument("--train", type=int, default = None)
parser.add_argument("--loss", type=str, default = 'focal+dice')
args = parser.parse_args()
print(args)
model_name = 'unet-set-{}-lr-{}-train-{}-loss-{}-bt-{}-ep-{}'.format(args.dataset, args.lr,\
args.train, args.loss, args.batch, args.epoch)
print(model_name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.dataset == 'live_dead':
val_dim = 832
test_dim = val_dim
train_image_set = 'train_images2'
val_image_set = 'val_images2'
test_image_set = 'test_images2'
DATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './data/{}'.format(args.dataset)
x_train_dir = os.path.join(DATA_DIR, train_image_set)
y_train_dir = os.path.join(DATA_DIR, 'train_masks')
x_valid_dir = os.path.join(DATA_DIR, val_image_set)
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, test_image_set)
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
print(x_train_dir); print(x_valid_dir); print(x_test_dir)
# classes for data loading
class Dataset:
"""
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
"""
CLASSES = ['bk', 'live', 'inter', 'dead']
def __init__(
self,
images_dir,
masks_dir,
classes=None,
nb_data=None,
augmentation=None,
preprocessing=None,
):
id_list = os.listdir(images_dir)
if nb_data ==None:
self.ids = id_list
else:
self.ids = id_list[:int(min(nb_data,len(id_list)))]
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
print(len(self.images_fps)); print(len(self.masks_fps))
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=-1).astype('float')
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(tf.keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
BATCH_SIZE = args.batch
CLASSES = ['live', 'inter', 'dead']
LR = args.lr
EPOCHS = args.epoch
n_classes = (len(CLASSES) + 1)
#create model
model = unet(classes=n_classes, activation='softmax')
# define optomizer
optim = tf.keras.optimizers.Adam(LR)
class_weights = [1,1,1,1]
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
# set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
if args.loss =='focal+dice':
dice_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))
focal_loss = sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + focal_loss
elif args.loss =='dice':
total_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))
elif args.loss == 'focal':
total_loss = sm.losses.CategoricalFocalLoss()
elif args.loss == 'ce':
total_loss = sm.losses.CategoricalCELoss()
elif args.loss == 'wce':
# weighted wce (live, injured, dead, bk)
#ratios: 0.01 , 0.056, 0.004, 0.929
class_weights = [100., 17.86, 250., 1.08]
total_loss = sm.losses.CategoricalCELoss(class_weights=np.array(class_weights))
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
# compile keras model with defined optimozer, loss and metrics
model.compile(optimizer=optim, loss=total_loss, metrics = metrics)
# Dataset for train images
train_dataset = Dataset(
x_train_dir,
y_train_dir,
classes=CLASSES,
nb_data=args.train
)
# Dataset for validation images
valid_dataset = Dataset(
x_valid_dir,
y_valid_dir,
classes=CLASSES
)
train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)
print(train_dataloader[0][0].shape)
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, val_dim, val_dim, 3)
assert train_dataloader[0][1].shape == (BATCH_SIZE, val_dim, val_dim, n_classes)
model_folder = '/data/natcom_models/std_unet/{}'.format(model_name) if args.docker else './models/natcom_models/std_unet/{}'.format(model_name)
generate_folder(model_folder)
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
def save_images(file_name, vols):
shp = vols.shape
ls, lx, ly, lc = shp
sx, sy = int(lx/256), int(ly/256)
vols = vols[:,::sx,::sy,:]
slice_list, rows = [], []
for si in range(vols.shape[0]):
slice = vols[si,:,:,:]
rows.append(slice)
if si%4 == 3 and not si == vols.shape[0]-1:
slice_list.append(rows)
rows = []
save_img = concat_tile(slice_list)
cv2.imwrite(file_name, save_img)
def map2rgb(maps):
shp = maps.shape
rgb_maps = np.zeros((shp[0], shp[1], shp[2], 3), dtype=np.uint8)
rgb_maps[:,:,:,0] = np.uint8((maps==0)*255)
rgb_maps[:,:,:,1] = np.uint8((maps==1)*255)
rgb_maps[:,:,:,2] = np.uint8((maps==2)*255)
return rgb_maps
class HistoryPrintCallback(tf.keras.callbacks.Callback):
def __init__(self):
super(HistoryPrintCallback, self).__init__()
self.history = {}
def on_epoch_end(self, epoch, logs=None):
if logs:
for key in logs.keys():
if epoch == 0:
self.history[key] = []
self.history[key].append(logs[key])
if epoch%5 == 0:
plot_history_for_callback(model_folder+'/train_history.png', self.history)
save_history_for_callback(model_folder, self.history)
gt_vols, pr_vols = [],[]
for i in range(0, len(valid_dataset),int(len(valid_dataset)/36)):
gt_vols.append(valid_dataloader[i][1])
pr_vols.append(self.model.predict(valid_dataloader[i]))
gt_vols = np.concatenate(gt_vols, axis = 0); gt_map = map2rgb(np.argmax(gt_vols,axis =-1))
pr_vols = np.concatenate(pr_vols, axis = 0); pr_map = map2rgb(np.argmax(pr_vols,axis =-1))
if epoch == 0:
save_images(model_folder+'/ground_truth.png'.format(epoch), gt_map)
save_images(model_folder+'/pr-{}.png'.format(epoch), pr_map)
# define callbacks for learning rate scheduling and best checkpoints saving
callbacks = [
tf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', save_weights_only=True, save_best_only=True, mode='min'),
HistoryPrintCallback(),
]
# train model
history = model.fit_generator(
train_dataloader,
steps_per_epoch=len(train_dataloader),
epochs=EPOCHS,
callbacks=callbacks,
validation_data=valid_dataloader,
validation_steps=len(valid_dataloader),
)
|
StarcoderdataPython
|
78673
|
<filename>test/mynet.py
import os
import torch
import torch.nn as nn
import torch.optim as optim
from flearn.client import net
class MyNet(net):
def __init__(self, model_fpath, init_model_name):
super(MyNet, self).__init__(model_fpath, init_model_name)
self.criterion = nn.CrossEntropyLoss()
def get(self):
seq = False
# net_local = MLP(28 * 28, 10) # mnist
net_local = MLP(3 * 224 * 224, 2) # covid2019
torch.save(net_local.state_dict(), self.init_model_name)
self.optimizer = optim.SGD(net_local.parameters(), lr=1e-3, momentum=0.9)
return net_local, seq
|
StarcoderdataPython
|
101153
|
import sys
from postagger.utils.classifier import MaximumEntropyClassifier
from postagger.utils.common import timeit, get_data_path
from postagger.utils.common import get_tags
from postagger.utils.preprocess import load_save_preprocessed_data
from postagger.utils.decoder import CompData
from postagger.utils.classifier import save_load_init_model
# params
load_model = False
load_matrices = False
load_preprocess = False
model_name = 'model2.pickle'
model_matrices = 'model2_matrices.pickle'
model_preprocess = 'model2_preprocess.pickle'
verbose = 1
# data files
train = 'train.wtag'
test = 'train2.wtag'
comp = 'comp.words'
# hyper params
# features
min_occurrence_dict = {
'wordtag-f100': 0,
'suffix-f101': 0,
'prefix-f102': 0,
'trigram-f103': 0,
'bigram-f104': 0,
'unigram-f105': 0,
'previousword-f106': 2,
'nextword-f107': 2,
'starting_capital': 0,
'capital_inside': 0,
'number_inside': 0,
'hyphen_inside': 0,
'pre_pre_word': 2,
'next_next_word': 2
}
# model
regularization = 1
@timeit
def main():
train_path = get_data_path(train)
train_sentences = CompData(train_path)
if load_model:
clf = save_load_init_model(clf=None, filename=model_name)
else:
if load_matrices:
clf = save_load_init_model(clf=None, filename=model_matrices)
else:
# count features occurrences
preprocessor = load_save_preprocessed_data(model_preprocess, train_sentences, load=load_preprocess)
# apply filtering
pdict = preprocessor.summarize_counts(method='cut', dict=min_occurrence_dict)
# init classifier with known tags
tags = get_tags(train)
clf = MaximumEntropyClassifier(train_sentences, pdict, tags)
save_load_init_model(clf=clf, filename=model_matrices)
print("Start fitting %d features" % clf.get_num_features())
print("Top enabled features per tag: " + str(clf.get_enabled_features_per_tag()))
clf.fit(reg=regularization, verbose=verbose)
save_load_init_model(clf=clf, filename=model_name)
# evaluate
# train
print("Evaluate train:")
train_predict = clf.predict(train_sentences)
print(train_predict)
# test
print("Evaluate test:")
test_path = get_data_path(test)
test_sentences = CompData(test_path)
t_predict = clf.predict(test_sentences)
print(t_predict)
"""
comp_path = get_data_path(comp)
comp_sentences = CompData(comp_path, comp=True)
comp_predict = clf.predict(comp_sentences)
print(comp_predict)
"""
def training():
train_path = get_data_path(train)
train_sentences = CompData(train_path)
test_path = get_data_path(test)
test_sentences = CompData(test_path)
preprocessor = load_save_preprocessed_data(model_preprocess, train_sentences, load=load_preprocess)
# apply filtering
pdict = preprocessor.summarize_counts(method='cut', dict=min_occurrence_dict)
# init classifier with known tags
tags = get_tags(train)
clf = MaximumEntropyClassifier(train_sentences, pdict, tags)
reg = [5e-3, 1e-2, 5e-2, 1e-1, 1, 3, 5, 10, 25, 50, 100, 500, 1000]
best_model = 'best_model.pickle'
best_acc = 0
test_acc = 0
results = {}
for r in reg:
print("Start fitting model, reg: ", str(r))
clf.fit(reg=r)
try:
print("Evaluate train:")
train_pred = clf.predict(train_sentences)
train_acc = train_pred['accuracy']
print("Evaluate test:")
test_pred = clf.predict(test_sentences)
test_acc = test_pred['accuracy']
results[('reg', r)] = {'train_acc': train_acc, 'test_acc': test_acc}
if test_acc > best_acc:
best_acc = test_acc
save_load_init_model(clf=clf, filename=best_model)
except:
pass
print("Current results", results)
print("\n\n")
print("Final results")
print(results)
def training2():
# data files
train = 'train2.wtag'
train_path = get_data_path(train)
train_sentences = CompData(train_path, slice=(0, 630))
validation_sentences = CompData(train_path, slice=(630, 700))
preprocessor = load_save_preprocessed_data(model_preprocess, train_sentences, load=load_preprocess)
min_occurrence_dict = {
'wordtag-f100': 0,
'suffix-f101': 0,
'prefix-f102': 0,
'trigram-f103': 0,
'bigram-f104': 0,
'unigram-f105': 0,
'previousword-f106': 0,
'nextword-f107': 0,
'starting_capital': 0,
'capital_inside': 0,
'number_inside': 0,
'hyphen_inside': 0,
'pre_pre_word': 0,
'next_next_word': 0
}
reg = [1e-3, 1e-2, 1e-1]
min_occur = [ {'previousword-f106': 1, 'nextword-f107':1, 'pre_pre_word': 0, 'next_next_word': 0},
{'previousword-f106': 1, 'nextword-f107':1, 'pre_pre_word': 1, 'next_next_word': 1},
{'previousword-f106': 1, 'nextword-f107': 1, 'pre_pre_word': 2, 'next_next_word': 2},
{'previousword-f106': 0, 'nextword-f107': 0, 'pre_pre_word': 2, 'next_next_word': 2},
{'previousword-f106': 0, 'nextword-f107': 0, 'pre_pre_word': 1, 'next_next_word': 1},
{'previousword-f106': 2, 'nextword-f107': 2, 'pre_pre_word': 0, 'next_next_word': 0}]
best_model = 'best_model.pickle'
best_acc = 0
test_acc = 0
results = {}
for occur_dict in min_occur:
# apply filtering
# update dict
print("Init new classifier with updated occurrence dict")
print(occur_dict)
for key, value in occur_dict.items():
min_occurrence_dict[key] = value
pdict = preprocessor.summarize_counts(method='cut', dict=min_occurrence_dict)
# init classifier with known tags
tags = get_tags(train)
clf = MaximumEntropyClassifier(train_sentences, pdict, tags)
for r in reg:
print("Start fitting model, reg: ", str(r))
clf.fit(reg=r)
try:
print("Evaluate train:")
train_pred = clf.predict(train_sentences)
train_acc = train_pred['accuracy']
print("Evaluate validation:")
test_pred = clf.predict(validation_sentences)
test_acc = test_pred['accuracy']
results[('reg', r, str(occur_dict))] = {'train_acc': train_acc, 'validation_acc': test_acc}
if test_acc > best_acc:
best_acc = test_acc
save_load_init_model(clf=clf, filename=best_model)
except:
pass
print("Current results", results)
print("\n\n")
print("Final results")
print(results)
if __name__ == '__main__':
mode = None
try:
mode = sys.argv[1]
except Exception as e:
pass
if mode == '-t':
training()
elif mode == '-t2':
training2()
else:
main()
|
StarcoderdataPython
|
3311734
|
<reponame>felipeaugustogudes/devito
#==============================================================================
# -*- encoding: utf-8 -*-
#==============================================================================
#==============================================================================
# Módulos Importados do Python / Devito / Examples
#==============================================================================
#==============================================================================
# Pyhton Modules and Imports
#==============================================================================
import numpy as np
import matplotlib.pyplot as plot
import math as mt
import sys
import time as tm
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import ticker
from numpy import linalg as la
from matplotlib import cm
#==============================================================================
#==============================================================================
# Configurações de Plot
#==============================================================================
plot.rc('font' , size = 12) # controls default text sizes
plot.rc('axes' , titlesize = 12) # fontsize of the axes title
plot.rc('axes' , labelsize = 12) # fontsize of the x and y labels
plot.rc('xtick' , labelsize = 12) # fontsize of the tick labels
plot.rc('ytick' , labelsize = 12) # fontsize of the tick labels
plot.rc('legend', fontsize = 12) # legend fontsize
plot.rc('figure', titlesize = 12) # fontsize of the figure title
#==============================================================================
#==============================================================================
plot.close("all")
#==============================================================================
#==============================================================================
# Testes de Leitura de Dados
#==============================================================================
ptype = 1
normtype = 2
if(ptype==1):
nptx = 101
npty = 101
t0 = 0
tn = 2000
nrefesp = 10
xpositionv = np.array([500.0,1500.0,500.0,1500.0])
ypositionv = np.array([500.0,500.0,1500.0,1500.0])
timevalue = 600
setup1 = ('data_save/teste1/dt1/',4000,5,1)
setup2 = ('data_save/teste1/dt2/',2000,10,2)
setup3 = ('data_save/teste1/dt3/',1250,16,3)
setup4 = ('data_save/teste1/dt4/',1000,20,4)
setup5 = ('data_save/teste1/dt5/',800,25,5)
figsave = 'figures/teste1/'
vdts = np.array([0.5,1.0,1.6,2.0,2.5])
setup_list = [setup1,setup2,setup3,setup4,setup5]
orders_cho = np.array([1,3,5,7]) # 4, 8, 12, 16
times_cho = np.array([0,1,3,4])
domain_setup = (0,2000,0,2000,0,2000)
if(ptype==2):
nptx = 601
npty = 201
t0 = 0
tn = 4000
nrefesp = 5
xpositionv = np.array([4000.0,4000.0,4000.0,6000.0,6000.0,6000.0,8000.0,8000.0,8000.0,])
ypositionv = np.array([2000.0,2500.0,1500.0,3000.0,2000.0,2500.0,1500.0,3000.0,2000.0,2500.0,1500.0,3000.0])
timevalue = 1600
setup1 = ('data_save/teste2/dt1/',10000,2,1)
setup2 = ('data_save/teste2/dt2/',4000,5,2)
setup3 = ('data_save/teste2/dt3/',2500,8,3)
setup4 = ('data_save/teste2/dt4/',2000,10,4)
setup5 = ('data_save/teste2/dt5/',1666,12,5)
figsave = 'figures/teste2/'
vdts = np.array([0.4,1.0,1.6,2.0,2.4])
setup_list = [setup1,setup2,setup3,setup4,setup5]
orders_cho = np.array([1,3,5,7]) # 4, 8, 12, 16
times_cho = np.array([0,1,2,3])
domain_setup = (0,12000,0,4000,0,4000)
if(ptype==3):
nptx = 201
npty = 201
t0 = 0
tn = 2000
nrefesp = 5
xpositionv = np.array([500.0,1500.0,500.0,1500.0])
ypositionv = np.array([500.0,500.0,1500.0,1500.0])
timevalue = 500
setup1 = ('data_save/teste3/dt1/',4000,5,1)
setup2 = ('data_save/teste3/dt2/',2000,10,2)
setup3 = ('data_save/teste3/dt3/',1250,16,3)
setup4 = ('data_save/teste3/dt4/',1000,20,4)
setup5 = ('data_save/teste3/dt5/',800,25,5)
figsave = 'figures/teste3/'
vdts = np.array([0.5,1.0,1.6,2.0,2.5])
setup_list = [setup1,setup2,setup3,setup4,setup5]
orders_cho = np.array([1,3,5,7]) # 4, 8, 12, 16
times_cho = np.array([0,1,2,3])
domain_setup = (0,2000,0,2000,0,2000)
if(ptype==4):
nptx = 401
npty = 311
t0 = 0
tn = 3000
nrefesp = 5
xpositionv = np.array([30000.0,30000.0,30000.0,40000.0,40000.0,40000.0])
ypositionv = np.array([2500.0,5000.0,7500.0,2500.0,5000.0,7500.0,2500.0,5000.0,7500.0])
timevalue = 3000
setup1 = ('data_save/teste4/dt1/',7500,2,1)
setup2 = ('data_save/teste4/dt2/',3000,5,2)
setup3 = ('data_save/teste4/dt3/',1875,8,3)
setup4 = ('data_save/teste4/dt4/',1500,10,4)
setup5 = ('data_save/teste4/dt5/',1250,12,5)
figsave = 'figures/teste4/'
vdts = np.array([0.4,1.0,1.6,2.0,2.4])
setup_list = [setup1,setup2,setup3,setup4,setup5]
orders_cho = np.array([1,3,5,7]) # 4, 8, 12, 16
times_cho = np.array([0,1,2,3])
domain_setup = (25000,45000,0,9920,0,3000)
#==============================================================================
#==============================================================================
# Vetores de Configurações
#==============================================================================
vmethod0 = np.array([0,1,1,1,1,1,1,1,1])
vmethod1 = np.array([1,1,3,7,7,7,7,7,7])
vmethod2 = np.array([1,4,2,1,2,1,2,1,2])
vmethod3 = np.array([2,4,6,8,10,12,14,16,18,20])
nteste = vmethod3.shape[0]
l1 = np.zeros(nteste)
l2 = np.zeros(nteste)
l3 = np.zeros(nteste)
l4 = np.zeros(nteste)
l5 = np.zeros(nteste)
l6 = np.zeros(nteste)
l7 = np.zeros(nteste)
l8 = np.zeros(nteste)
l9 = np.zeros(nteste)
for i in range(0,nteste):
l1[i] = 1
l2[i] = 1
l3[i] = 1
l4[i] = 1
l5[i] = 1
l6[i] = int(vmethod3[i]/2)
l7[i] = int(vmethod3[i]/2)
l8[i] = int(vmethod3[i]/4 + 1)
l9[i] = int(vmethod3[i]/4 + 1)
vmethod4 = [l1,l2,l3,l4,l5,l6,l7,l8,l9]
total_configs = 0
list_config = []
for i in range(0,vmethod0.shape[0]):
scheme = i
peso = vmethod0[i]
wauthor = vmethod1[i]
wtype = vmethod2[i]
vnvalue = vmethod4[i]
for l in range(0,vmethod3.shape[0]):
mvalue = vmethod3[l]
nvalue = int(vnvalue[l])
config = (peso,wauthor,wtype,mvalue,nvalue,scheme)
total_configs = total_configs + 1
list_config.append(config)
list_config = list(set(list_config))
nconfig = len(list_config)
#==============================================================================
#==============================================================================
# Carregando Soluções
#==============================================================================
setup = setup1
mnormas_disp1 = np.load('%smnormas_disp_%d.npy'%(setup[0],setup[3]))
mnormas_rec1 = np.load('%smnormas_rec_%d.npy'%(setup[0],setup[3]))
mnormas_disp_select1 = np.load('%smnormas_disp_select_%d.npy'%(setup[0],setup[3]))
rec_num1 = np.load('%srec_num_%d.npy'%(setup[0],setup[3]))
solplot1 = np.load('%ssolplot_%d.npy'%(setup[0],setup[3]))
rec_select_num1 = np.load('%srec_select_num_%d.npy'%(setup[0],setup[3]))
solplot_select1 = np.load('%ssolplot_select_%d.npy'%(setup[0],setup[3]))
solplot_ref1 = np.load('%ssolplot_ref_%d.npy'%(setup[0],setup[3]))
rec_ref1 = np.load('%srec_ref_%d.npy'%(setup[0],setup[3]))
rec_select_ref1 = np.load('%srec_select_ref_%d.npy'%(setup[0],setup[3]))
timev_disp1 = np.load('%stimev_disp_%d.npy'%(setup[0],setup[3]))
timev_rec1 = np.load('%stimev_rec_%d.npy'%(setup[0],setup[3]))
ordersv1 = np.load('%sordersv_%d.npy'%(setup[0],setup[3]))
setup = setup2
mnormas_disp2 = np.load('%smnormas_disp_%d.npy'%(setup[0],setup[3]))
mnormas_rec2 = np.load('%smnormas_rec_%d.npy'%(setup[0],setup[3]))
mnormas_disp_select2 = np.load('%smnormas_disp_select_%d.npy'%(setup[0],setup[3]))
rec_num2 = np.load('%srec_num_%d.npy'%(setup[0],setup[3]))
solplot2 = np.load('%ssolplot_%d.npy'%(setup[0],setup[3]))
rec_select_num2 = np.load('%srec_select_num_%d.npy'%(setup[0],setup[3]))
solplot_select2 = np.load('%ssolplot_select_%d.npy'%(setup[0],setup[3]))
solplot_ref2 = np.load('%ssolplot_ref_%d.npy'%(setup[0],setup[3]))
rec_ref2 = np.load('%srec_ref_%d.npy'%(setup[0],setup[3]))
rec_select_ref2 = np.load('%srec_select_ref_%d.npy'%(setup[0],setup[3]))
timev_disp2 = np.load('%stimev_disp_%d.npy'%(setup[0],setup[3]))
timev_rec2 = np.load('%stimev_rec_%d.npy'%(setup[0],setup[3]))
ordersv2 = np.load('%sordersv_%d.npy'%(setup[0],setup[3]))
setup = setup3
mnormas_disp3 = np.load('%smnormas_disp_%d.npy'%(setup[0],setup[3]))
mnormas_rec3 = np.load('%smnormas_rec_%d.npy'%(setup[0],setup[3]))
mnormas_disp_select3 = np.load('%smnormas_disp_select_%d.npy'%(setup[0],setup[3]))
rec_num3 = np.load('%srec_num_%d.npy'%(setup[0],setup[3]))
solplot3 = np.load('%ssolplot_%d.npy'%(setup[0],setup[3]))
rec_select_num3 = np.load('%srec_select_num_%d.npy'%(setup[0],setup[3]))
solplot_select3 = np.load('%ssolplot_select_%d.npy'%(setup[0],setup[3]))
solplot_ref3 = np.load('%ssolplot_ref_%d.npy'%(setup[0],setup[3]))
rec_ref3 = np.load('%srec_ref_%d.npy'%(setup[0],setup[3]))
rec_select_ref3 = np.load('%srec_select_ref_%d.npy'%(setup[0],setup[3]))
timev_disp3 = np.load('%stimev_disp_%d.npy'%(setup[0],setup[3]))
timev_rec3 = np.load('%stimev_rec_%d.npy'%(setup[0],setup[3]))
ordersv3 = np.load('%sordersv_%d.npy'%(setup[0],setup[3]))
setup = setup4
mnormas_disp4 = np.load('%smnormas_disp_%d.npy'%(setup[0],setup[3]))
mnormas_rec4 = np.load('%smnormas_rec_%d.npy'%(setup[0],setup[3]))
mnormas_disp_select4 = np.load('%smnormas_disp_select_%d.npy'%(setup[0],setup[3]))
rec_num4 = np.load('%srec_num_%d.npy'%(setup[0],setup[3]))
solplot4 = np.load('%ssolplot_%d.npy'%(setup[0],setup[3]))
rec_select_num4 = np.load('%srec_select_num_%d.npy'%(setup[0],setup[3]))
solplot_select4 = np.load('%ssolplot_select_%d.npy'%(setup[0],setup[3]))
solplot_ref4 = np.load('%ssolplot_ref_%d.npy'%(setup[0],setup[3]))
rec_ref4 = np.load('%srec_ref_%d.npy'%(setup[0],setup[3]))
rec_select_ref4 = np.load('%srec_select_ref_%d.npy'%(setup[0],setup[3]))
timev_disp4 = np.load('%stimev_disp_%d.npy'%(setup[0],setup[3]))
timev_rec4 = np.load('%stimev_rec_%d.npy'%(setup[0],setup[3]))
ordersv4 = np.load('%sordersv_%d.npy'%(setup[0],setup[3]))
setup = setup5
mnormas_disp5 = np.load('%smnormas_disp_%d.npy'%(setup[0],setup[3]))
mnormas_rec5 = np.load('%smnormas_rec_%d.npy'%(setup[0],setup[3]))
mnormas_disp_select5 = np.load('%smnormas_disp_select_%d.npy'%(setup[0],setup[3]))
rec_num5 = np.load('%srec_num_%d.npy'%(setup[0],setup[3]))
solplot5 = np.load('%ssolplot_%d.npy'%(setup[0],setup[3]))
rec_select_num5 = np.load('%srec_select_num_%d.npy'%(setup[0],setup[3]))
solplot_select5 = np.load('%ssolplot_select_%d.npy'%(setup[0],setup[3]))
solplot_ref5 = np.load('%ssolplot_ref_%d.npy'%(setup[0],setup[3]))
rec_ref5 = np.load('%srec_ref_%d.npy'%(setup[0],setup[3]))
rec_select_ref5 = np.load('%srec_select_ref_%d.npy'%(setup[0],setup[3]))
timev_disp5 = np.load('%stimev_disp_%d.npy'%(setup[0],setup[3]))
timev_rec5 = np.load('%stimev_rec_%d.npy'%(setup[0],setup[3]))
ordersv5 = np.load('%sordersv_%d.npy'%(setup[0],setup[3]))
mnormas_disp = np.zeros((5,mnormas_disp1.shape[0],mnormas_disp1.shape[1]))
mnormas_disp[0,:,:] = mnormas_disp1[:,:]
mnormas_disp[1,:,:] = mnormas_disp2[:,:]
mnormas_disp[2,:,:] = mnormas_disp3[:,:]
mnormas_disp[3,:,:] = mnormas_disp4[:,:]
mnormas_disp[4,:,:] = mnormas_disp5[:,:]
mnormas_rec = np.zeros((5,mnormas_rec1.shape[0],mnormas_rec1.shape[1]))
mnormas_rec[0,:,0:mnormas_rec1.shape[1]] = mnormas_rec1[:,:]
mnormas_rec[1,:,0:mnormas_rec2.shape[1]] = mnormas_rec2[:,:]
mnormas_rec[2,:,0:mnormas_rec3.shape[1]] = mnormas_rec3[:,:]
mnormas_rec[3,:,0:mnormas_rec4.shape[1]] = mnormas_rec4[:,:]
mnormas_rec[4,:,0:mnormas_rec5.shape[1]] = mnormas_rec5[:,:]
mnormas_disp_select = np.zeros((5,mnormas_disp_select1.shape[0],mnormas_disp_select1.shape[1]))
mnormas_disp_select[0,:,:] = mnormas_disp_select1[:,:]
mnormas_disp_select[1,:,:] = mnormas_disp_select2[:,:]
mnormas_disp_select[2,:,:] = mnormas_disp_select3[:,:]
mnormas_disp_select[3,:,:] = mnormas_disp_select4[:,:]
mnormas_disp_select[4,:,:] = mnormas_disp_select5[:,:]
rec_num = np.zeros((5,rec_num1.shape[0],rec_num1.shape[1],rec_num1.shape[2]))
rec_num[0,:,0:rec_num1.shape[1],:] = rec_num1[:,:,:]
rec_num[1,:,0:rec_num2.shape[1],:] = rec_num2[:,:,:]
rec_num[2,:,0:rec_num3.shape[1],:] = rec_num3[:,:,:]
rec_num[3,:,0:rec_num4.shape[1],:] = rec_num4[:,:,:]
rec_num[4,:,0:rec_num5.shape[1],:] = rec_num5[:,:,:]
solplot = np.zeros((5,solplot1.shape[0],solplot1.shape[1],solplot1.shape[2],solplot1.shape[3]))
solplot[0,:,:,:,:] = solplot1[:,:,:,:]
solplot[1,:,:,:,:] = solplot2[:,:,:,:]
solplot[2,:,:,:,:] = solplot3[:,:,:,:]
solplot[3,:,:,:,:] = solplot4[:,:,:,:]
solplot[4,:,:,:,:] = solplot5[:,:,:,:]
rec_select_num = np.zeros((5,rec_select_num1.shape[0],rec_select_num1.shape[1],rec_select_num1.shape[2]))
rec_select_num[0,:,0:rec_select_num1.shape[1],:] = rec_select_num1[:,:,:]
rec_select_num[1,:,0:rec_select_num2.shape[1],:] = rec_select_num2[:,:,:]
rec_select_num[2,:,0:rec_select_num3.shape[1],:] = rec_select_num3[:,:,:]
rec_select_num[3,:,0:rec_select_num4.shape[1],:] = rec_select_num4[:,:,:]
rec_select_num[4,:,0:rec_select_num5.shape[1],:] = rec_select_num5[:,:,:]
solplot_select = np.zeros((5,solplot_select1.shape[0],solplot_select1.shape[1],solplot_select1.shape[2]))
solplot_select[0,:,:,0:solplot_select1.shape[2]] = solplot_select1[:,:,:]
solplot_select[1,:,:,0:solplot_select2.shape[2]] = solplot_select2[:,:,:]
solplot_select[2,:,:,0:solplot_select3.shape[2]] = solplot_select3[:,:,:]
solplot_select[3,:,:,0:solplot_select4.shape[2]] = solplot_select4[:,:,:]
solplot_select[4,:,:,0:solplot_select5.shape[2]] = solplot_select5[:,:,:]
solplot_ref = np.zeros((5,solplot_ref1.shape[0],solplot_ref1.shape[1],solplot_ref1.shape[2]))
solplot_ref[0,:,:,:] = solplot_ref1[:,:,:]
solplot_ref[1,:,:,:] = solplot_ref2[:,:,:]
solplot_ref[2,:,:,:] = solplot_ref3[:,:,:]
solplot_ref[3,:,:,:] = solplot_ref4[:,:,:]
solplot_ref[4,:,:,:] = solplot_ref5[:,:,:]
rec_ref = np.zeros((5,rec_ref1.shape[0],rec_ref1.shape[1]))
rec_ref[0,0:rec_ref1.shape[0],:] = rec_ref1[:,:]
rec_ref[1,0:rec_ref2.shape[0],:] = rec_ref2[:,:]
rec_ref[2,0:rec_ref3.shape[0],:] = rec_ref3[:,:]
rec_ref[3,0:rec_ref4.shape[0],:] = rec_ref4[:,:]
rec_ref[4,0:rec_ref5.shape[0],:] = rec_ref5[:,:]
rec_select_ref = np.zeros((5,rec_select_ref1.shape[0],rec_select_ref1.shape[1]))
rec_select_ref[0,0:rec_select_ref1.shape[0],:] = rec_select_ref1[:,:]
rec_select_ref[1,0:rec_select_ref2.shape[0],:] = rec_select_ref2[:,:]
rec_select_ref[2,0:rec_select_ref3.shape[0],:] = rec_select_ref3[:,:]
rec_select_ref[3,0:rec_select_ref4.shape[0],:] = rec_select_ref4[:,:]
rec_select_ref[4,0:rec_select_ref5.shape[0],:] = rec_select_ref5[:,:]
timev_disp = np.zeros((5,timev_disp1.shape[0]))
timev_disp[0,:] = timev_disp1[:]
timev_disp[1,:] = timev_disp2[:]
timev_disp[2,:] = timev_disp3[:]
timev_disp[3,:] = timev_disp4[:]
timev_disp[4,:] = timev_disp5[:]
timev_rec = np.zeros((5,timev_rec1.shape[0]))
timev_rec[0,0:timev_rec1.shape[0]] = timev_rec1[:]
timev_rec[1,0:timev_rec2.shape[0]] = timev_rec2[:]
timev_rec[2,0:timev_rec3.shape[0]] = timev_rec3[:]
timev_rec[3,0:timev_rec4.shape[0]] = timev_rec4[:]
timev_rec[4,0:timev_rec5.shape[0]] = timev_rec5[:]
ordersv = np.zeros((5,ordersv1.shape[0]))
ordersv[0,:] = ordersv1[:]
ordersv[1,:] = ordersv2[:]
ordersv[2,:] = ordersv3[:]
ordersv[3,:] = ordersv4[:]
ordersv[4,:] = ordersv5[:]
vnames = ['Classic', 'Cross2009', 'Cross2013', 'Cross2016_TE', 'Cross2016_LS', 'Cross_Rb2016_TE',
'Cross_Rb2016_LS', 'Rhombus2016_TE', 'RHombus2016_LS']
vdts_select = np.zeros(times_cho.shape[0])
for k in range(0,times_cho.shape[0]):
vdts_select[k] = vdts[int(times_cho[k])]
#==============================================================================
#==============================================================================
# Manipulando Solução de Referência
#==============================================================================
timepos = np.zeros(5)
timeposrec = np.zeros(5)
for i in range(0,5):
for j in range(0,mnormas_disp[i].shape[1]):
if(timevalue==timev_disp[i][j]): timepos[i] = j
for j in range(0,mnormas_rec[i].shape[1]):
if(timevalue==timev_rec[i][j]): timeposrec[i] = j
#==============================================================================
#==============================================================================
list0 = []
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list6 = []
list7 = []
list8 = []
for i in range(0,nconfig):
config = list_config[i]
position = i
peso = config[0]
wauthor = config[1]
wtype = config[2]
mvalue = config[3]
nvalue = config[4]
scheme = config[5]
pair = (peso,wauthor,wtype,mvalue,nvalue,position)
if(scheme==0): list0.append(pair)
if(scheme==1): list1.append(pair)
if(scheme==2): list2.append(pair)
if(scheme==3): list3.append(pair)
if(scheme==4): list4.append(pair)
if(scheme==5): list5.append(pair)
if(scheme==6): list6.append(pair)
if(scheme==7): list7.append(pair)
if(scheme==8): list8.append(pair)
list0 = list(sorted(list0))
list1 = list(sorted(list1))
list2 = list(sorted(list2))
list3 = list(sorted(list3))
list4 = list(sorted(list4))
list5 = list(sorted(list5))
list6 = list(sorted(list6))
list7 = list(sorted(list7))
list8 = list(sorted(list8))
list_scheme = [list0,list1,list2,list3,list4,list5,list6,list7,list8]
#==============================================================================
#==============================================================================
# Plotando Resultados - Rotina 1
#==============================================================================
def plot1(mnormas_disp,timev_disp,ordersv,list_scheme,vnames,vdts_select,times_cho,normtype,ptype,figsave):
time_disp = (10**-3)*timev_disp
nscheme = len(vnames)
plot.figure(figsize = (12,12))
if(normtype==2): plot.suptitle('Quadratic Error of Full Displacement at Final Time = %.2f s by dt'%time_disp[0][-1])
if(normtype==np.inf): plot.suptitle('Maximum Error Full Displacement at Final Time = %.2f s by dt'%time_disp[0][-1])
grid = plot.GridSpec(2,2,wspace=0.1,hspace=0.5)
position_plot_listx = np.array([0,0,1,1])
position_plot_listy = np.array([0,1,0,1])
ntimes = len(vdts_select)
min_value = 100000000
max_value = -100000000
for m1 in range(0,mnormas_disp.shape[0]):
for m2 in range(0,mnormas_disp.shape[1]):
value = mnormas_disp[m1,m2,-1]
if((np.isfinite(value)==True) and (value>0) and (value<min_value)): min_value = value
if((np.isfinite(value)==True) and (value<1) and (value>max_value)): max_value = value
min_value = 0.8*min_value
max_value = 1.4*max_value
vticks = ['s', '+', '+', '+', '+', '^', '^', 'D', 'D']
vline = ['-', '-', '-', '--', '-.', '--', '-.', '--', '-.']
vcolors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'purple', 'teal']
for k in range(0,ntimes):
kpostion = int(times_cho[k])
xpos = int(position_plot_listx[k])
ypos = int(position_plot_listy[k])
nvdt = kpostion
plot.subplot(grid[xpos,ypos])
for i in range(0,nscheme):
listm = list_scheme[i]
ntestesloc = len(listm)
list_norms = []
for j in range(0,ntestesloc):
index = listm[j][-1]
norm_value = mnormas_disp[nvdt,index,-1]
if(norm_value<1):
list_norms.append(norm_value)
else:
list_norms.append(np.nan)
plot.plot(ordersv[nvdt],list_norms,color=vcolors[i],linestyle=vline[i],marker=vticks[i],label=vnames[i])
plot.grid()
plot.title('dt = %.3f ms'%vdts[nvdt])
if(xpos==0 and ypos==0): plot.legend(loc="lower center",ncol=3,bbox_to_anchor=(1.05, -0.4))
plot.xticks(ordersv[nvdt])
plot.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
plot.ylim((min_value,max_value))
ax = plot.gca()
ax.set_yscale('log')
if(xpos==0 and ypos==0):
ax.axes.xaxis.set_ticklabels([])
plot.ylabel('Error')
if(xpos==0 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
ax.axes.xaxis.set_ticklabels([])
if(xpos==1 and ypos==0):
plot.xlabel('Order')
plot.ylabel('Error')
if(xpos==1 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
plot.xlabel('Order')
plot.show()
if(normtype==2): plot.savefig('%scomp_methods/plot_norm2_disp_tf_%d.png'%(figsave,ptype),dpi=200,bbox_inches='tight')
if(normtype==np.inf): plot.savefig('%scomp_methods/plot_normmax_disp_tf_%d.png'%(figsave,ptype),dpi=200,bbox_inches='tight')
plot.close()
return
#==============================================================================
#==============================================================================
# Plotando Resultados - Rotina 2
#==============================================================================
def plot2(mnormas_rec,timev_rec,ordersv,list_scheme,vnames,vdts_select,times_cho,setup,normtype,ptype,figsave):
timev_rec = (10**-3)*timev_rec
nscheme = len(vnames)
plot.figure(figsize = (12,12))
if(normtype==2): plot.suptitle('Quadratic Error of Receivers at Final Time = %.2f s by dt'%timev_rec[0][-1])
if(normtype==np.inf): plot.suptitle('Maximum Error Receivers at Final Time = %.2f s by dt'%timev_rec[0][-1])
grid = plot.GridSpec(2,2,wspace=0.1,hspace=0.5)
position_plot_listx = np.array([0,0,1,1])
position_plot_listy = np.array([0,1,0,1])
ntimes = len(vdts_select)
min_value = 100000000
max_value = -100000000
for m1 in range(0,mnormas_rec.shape[0]):
for m2 in range(0,mnormas_rec.shape[1]):
value = mnormas_rec[m1,m2,-1]
if((np.isfinite(value)==True) and (value>0) and (value<min_value)): min_value = value
if((np.isfinite(value)==True) and (value<1) and (value>max_value)): max_value = value
min_value = 0.8*min_value
max_value = 1.4*max_value
vticks = ['s', '+', '+', '+', '+', '^', '^', 'D', 'D']
vline = ['-', '-', '-', '--', '-.', '--', '-.', '--', '-.']
vcolors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'purple', 'teal']
for k in range(0,ntimes):
kpostion = int(times_cho[k])
xpos = int(position_plot_listx[k])
ypos = int(position_plot_listy[k])
nvdt = kpostion
setup = setup_list[kpostion]
plot.subplot(grid[xpos,ypos])
for i in range(0,nscheme):
listm = list_scheme[i]
ntestesloc = len(listm)
list_norms = []
for j in range(0,ntestesloc):
index = listm[j][-1]
posfinal = setup[1]
norm_value = mnormas_rec[nvdt,index,posfinal]
if(norm_value<1):
list_norms.append(norm_value)
else:
list_norms.append(np.nan)
plot.plot(ordersv[nvdt],list_norms,color=vcolors[i],linestyle=vline[i],marker=vticks[i],label=vnames[i])
plot.grid()
plot.title('dt = %.3f ms'%vdts[nvdt])
if(xpos==0 and ypos==0): plot.legend(loc="lower center",ncol=3,bbox_to_anchor=(1.05, -0.4))
plot.xticks(ordersv[nvdt])
plot.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
plot.ylim((min_value,max_value))
ax = plot.gca()
ax.set_yscale('log')
if(xpos==0 and ypos==0):
ax.axes.xaxis.set_ticklabels([])
plot.ylabel('Error')
if(xpos==0 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
ax.axes.xaxis.set_ticklabels([])
if(xpos==1 and ypos==0):
plot.xlabel('Order')
plot.ylabel('Error')
if(xpos==1 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
plot.xlabel('Order')
plot.show()
if(normtype==2): plot.savefig('%scomp_methods/plot_norm2_rec_tf_%d.png'%(figsave,ptype),dpi=200,bbox_inches='tight')
if(normtype==np.inf): plot.savefig('%scomp_methods/plot_normmax_rec_tf_%d.png'%(figsave,ptype),dpi=200,bbox_inches='tight')
plot.close()
return
#==============================================================================
#==============================================================================
# Plotando Resultados - Rotina 3
#==============================================================================
def plot3(mnormas_disp_select,timev_disp,ordersv,list_scheme,vnames,vdts_select,times_cho,xpositionv,ypositionv,normtype,ptype,figsave):
tn = (10**-3)*timev_disp[0][-1]
nscheme = len(vnames)
nposition = xpositionv.shape[0]
min_value = 100000000
max_value = -100000000
for m1 in range(0,mnormas_disp_select.shape[0]):
for m2 in range(0,mnormas_disp_select.shape[1]):
for m3 in range(0,mnormas_disp_select.shape[2]):
value = mnormas_disp_select[m1,m2,m3]
if((np.isfinite(value)==True) and (value>0) and (value<min_value)): min_value = value
if((np.isfinite(value)==True) and (value<1) and (value>max_value)): max_value = value
min_value = 0.8*min_value
max_value = 1.4*max_value
vticks = ['s', '+', '+', '+', '+', '^', '^', 'D', 'D']
vline = ['-', '-', '-', '--', '-.', '--', '-.', '--', '-.']
vcolors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'purple', 'teal']
for m in range(0,nposition):
plot.figure(figsize = (12,12))
if(normtype==2): plot.suptitle('Quadratic Error of Selected Displacement by dt \n Total Time = %.2f s - Position: x = %.2f m and y = %.2f m'%(tn,xpositionv[m],ypositionv[m]))
if(normtype==np.inf): plot.suptitle('Maximum Error of Selected Displacement by dt \n Total Time = %.2f s - Position: x = %.2f m and y = %.2f m'%(tn,xpositionv[m],ypositionv[m]))
grid = plot.GridSpec(2,2,wspace=0.1,hspace=0.5)
position_plot_listx = np.array([0,0,1,1])
position_plot_listy = np.array([0,1,0,1])
ntimes = len(vdts_select)
for k in range(0,ntimes):
kpostion = int(times_cho[k])
xpos = int(position_plot_listx[k])
ypos = int(position_plot_listy[k])
nvdt = kpostion
plot.subplot(grid[xpos,ypos])
for i in range(0,nscheme):
listm = list_scheme[i]
ntestesloc = len(listm)
list_norms = []
for j in range(0,ntestesloc):
index = listm[j][-1]
norm_value = mnormas_disp_select[nvdt,index,m]
if(norm_value<1):
list_norms.append(norm_value)
else:
list_norms.append(np.nan)
plot.plot(ordersv[nvdt],list_norms,color=vcolors[i],linestyle=vline[i],marker=vticks[i],label=vnames[i])
plot.grid()
plot.title('dt = %.3f ms'%vdts[nvdt])
if(xpos==0 and ypos==0): plot.legend(loc="lower center",ncol=3,bbox_to_anchor=(1.05, -0.4))
plot.xticks(ordersv[nvdt])
plot.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
plot.ylim((min_value,max_value))
ax = plot.gca()
ax.set_yscale('log')
if(xpos==0 and ypos==0):
ax.axes.xaxis.set_ticklabels([])
plot.ylabel('Error')
if(xpos==0 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
ax.axes.xaxis.set_ticklabels([])
if(xpos==1 and ypos==0):
plot.xlabel('Order')
plot.ylabel('Error')
if(xpos==1 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
plot.xlabel('Order')
plot.show()
if(normtype==2): plot.savefig('%scomp_methods/plot_norm2_x=%.2f_y=%.2f_%d.png'%(figsave,xpositionv[m],ypositionv[m],ptype),dpi=200,bbox_inches='tight')
if(normtype==np.inf): plot.savefig('%scomp_method/plot_normmax_x=%.2f_y=%.2f_%d.png'%(figsave,xpositionv[m],ypositionv[m],ptype),dpi=200,bbox_inches='tight')
plot.close()
return
#==============================================================================
#==============================================================================
# Plotando Resultados - Rotina 4
#==============================================================================
def plot4(mnormas_disp,timev_disp,ordersv,list_scheme,vnames,vdts,orders_cho,normtype,ptype,figsave):
time_disp = (10**-3)*timev_disp
nscheme = len(vnames)
min_value = 100000000
max_value = -100000000
for m1 in range(0,mnormas_disp.shape[0]):
for m2 in range(0,mnormas_disp.shape[1]):
value = mnormas_disp[m1,m2,-1]
if((np.isfinite(value)==True) and (value>0) and (value<min_value)): min_value = value
if((np.isfinite(value)==True) and (value<1) and (value>max_value)): max_value = value
min_value = 0.8*min_value
max_value = 1.4*max_value
vticks = ['s', '+', '+', '+', '+', '^', '^', 'D', 'D']
vline = ['-', '-', '-', '--', '-.', '--', '-.', '--', '-.']
vcolors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'purple', 'teal']
plot.figure(figsize = (12,12))
if(normtype==2): plot.suptitle('Quadratic Error of Full Displacement at Final Time = %.2f s by Order'%time_disp[0][-1])
if(normtype==np.inf): plot.suptitle('Maximum Error of Full Displacement at Final Time = %.2f s by Order'%time_disp[0][-1])
grid = plot.GridSpec(2,2,wspace=0.1,hspace=0.5)
position_plot_listx = np.array([0,0,1,1])
position_plot_listy = np.array([0,1,0,1])
norders = len(orders_cho)
ntimes = len(vdts)
for k in range(0,norders):
xpos = int(position_plot_listx[k])
ypos = int(position_plot_listy[k])
index_order = orders_cho[k]
plot.subplot(grid[xpos,ypos])
for i in range(0,nscheme):
listm = list_scheme[i]
ntestesloc = len(listm)
list_norms = []
for j in range(0,ntimes):
index = listm[index_order][-1]
norm_value = mnormas_disp[j,index,-1]
if(norm_value<1):
list_norms.append(norm_value)
else:
list_norms.append(np.nan)
plot.plot(vdts,list_norms,color=vcolors[i],linestyle=vline[i],marker=vticks[i],label=vnames[i])
plot.grid()
ordem = 2*(orders_cho[k]+1)
plot.title('Order = %d'%(ordem))
if(xpos==0 and ypos==0): plot.legend(loc="lower center",ncol=3,bbox_to_anchor=(1.05, -0.4))
plot.xticks(vdts)
plot.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
plot.ylim((min_value,max_value))
ax = plot.gca()
ax.set_yscale('log')
if(xpos==0 and ypos==0):
ax.axes.xaxis.set_ticklabels([])
plot.ylabel('Error')
if(xpos==0 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
ax.axes.xaxis.set_ticklabels([])
if(xpos==1 and ypos==0):
plot.xlabel('dt [ms]')
plot.ylabel('Error')
if(xpos==1 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
plot.xlabel('dt [ms]')
plot.show()
if(normtype==2): plot.savefig('%scomp_methods/plot_norm2_disp_tf_bydt_%d.png'%(figsave,ptype),dpi=200,bbox_inches='tight')
if(normtype==np.inf): plot.savefig('%scomp_methods/plot_normmax_disp_tf_bydt_%d.png'%(figsave,ptype),dpi=200,bbox_inches='tight')
plot.close()
return
#==============================================================================
#==============================================================================
# Plotando Resultados - Rotina 5
#==============================================================================
def plot5(mnormas_rec,timev_rec,ordersv,list_scheme,vnames,vdts,orders_cho,setup,normtype,ptype,figsave):
time_disp = (10**-3)*timev_disp
nscheme = len(vnames)
min_value = 100000000
max_value = -100000000
for m1 in range(0,mnormas_rec.shape[0]):
for m2 in range(0,mnormas_rec.shape[1]):
value = mnormas_rec[m1,m2,-1]
if((np.isfinite(value)==True) and (value>0) and (value<min_value)): min_value = value
if((np.isfinite(value)==True) and (value<1) and (value>max_value)): max_value = value
min_value = 0.8*min_value
max_value = 1.4*max_value
vticks = ['s', '+', '+', '+', '+', '^', '^', 'D', 'D']
vline = ['-', '-', '-', '--', '-.', '--', '-.', '--', '-.']
vcolors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'purple', 'teal']
plot.figure(figsize = (12,12))
if(normtype==2): plot.suptitle('Quadratic Error of Full Displacement at Final Time = %.2f s by Order'%time_disp[0][-1])
if(normtype==np.inf): plot.suptitle('Maximum Error of Full Displacement at Final Time = %.2f s by Order'%time_disp[0][-1])
grid = plot.GridSpec(2,2,wspace=0.1,hspace=0.5)
position_plot_listx = np.array([0,0,1,1])
position_plot_listy = np.array([0,1,0,1])
norders = len(orders_cho)
ntimes = len(vdts)
for k in range(0,norders):
xpos = int(position_plot_listx[k])
ypos = int(position_plot_listy[k])
index_order = orders_cho[k]
plot.subplot(grid[xpos,ypos])
for i in range(0,nscheme):
listm = list_scheme[i]
ntestesloc = len(listm)
list_norms = []
for j in range(0,ntimes):
setup = setup_list[j]
index = listm[index_order][-1]
posfinal = setup[1]
norm_value = mnormas_rec[j,index,posfinal]
if(norm_value<1):
list_norms.append(norm_value)
else:
list_norms.append(np.nan)
plot.plot(vdts,list_norms,color=vcolors[i],linestyle=vline[i],marker=vticks[i],label=vnames[i])
plot.grid()
ordem = 2*(orders_cho[k]+1)
plot.title('Order = %d'%(ordem))
if(xpos==0 and ypos==0): plot.legend(loc="lower center",ncol=3,bbox_to_anchor=(1.05, -0.4))
plot.xticks(vdts)
plot.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
plot.ylim((min_value,max_value))
ax = plot.gca()
ax.set_yscale('log')
if(xpos==0 and ypos==0):
ax.axes.xaxis.set_ticklabels([])
plot.ylabel('Error')
if(xpos==0 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
ax.axes.xaxis.set_ticklabels([])
if(xpos==1 and ypos==0):
plot.xlabel('dt [ms]')
plot.ylabel('Error')
if(xpos==1 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
plot.xlabel('dt [ms]')
plot.show()
if(normtype==2): plot.savefig('%scomp_methods/plot_norm2_rec_tf_bydt_%d.png'%(figsave,ptype),dpi=200,bbox_inches='tight')
if(normtype==np.inf): plot.savefig('%scomp_methods/plot_normmax_rec_tf_bydt_%d.png'%(figsave,ptype),dpi=200,bbox_inches='tight')
plot.close()
return
#==============================================================================
#==============================================================================
# Plotando Resultados - Rotina 6
#==============================================================================
def plot6(mnormas_disp_select,timev_disp,ordersv,list_scheme,vnames,vdts,orders_cho,xpositionv,ypositionv,normtype,ptype,figsave):
tn = (10**-3)*timev_disp[0][-1]
min_value = 100000000
max_value = -100000000
for m1 in range(0,mnormas_disp_select.shape[0]):
for m2 in range(0,mnormas_disp_select.shape[1]):
for m3 in range(0,mnormas_disp_select.shape[2]):
value = mnormas_disp_select[m1,m2,m3]
if((np.isfinite(value)==True) and (value>0) and (value<min_value)): min_value = value
if((np.isfinite(value)==True) and (value<1) and (value>max_value)): max_value = value
min_value = 0.8*min_value
max_value = 1.4*max_value
vticks = ['s', '+', '+', '+', '+', '^', '^', 'D', 'D']
vline = ['-', '-', '-', '--', '-.', '--', '-.', '--', '-.']
vcolors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'purple', 'teal']
nscheme = len(vnames)
nposition = xpositionv.shape[0]
for m in range(0,nposition):
plot.figure(figsize = (12,12))
if(normtype==np.inf): plot.suptitle('Quadratic Error of Selected Displacement by Order \n Total Time = %.2f s Position: x = %.2f m and y = %.2f m'%(tn,xpositionv[m],ypositionv[m]))
if(normtype==2): plot.suptitle('Quadratic Error of Selected Displacement by Order \n Total Time = %.2f s Position: x = %.2f m and y = %.2f m'%(tn,xpositionv[m],ypositionv[m]))
grid = plot.GridSpec(2,2,wspace=0.1,hspace=0.5)
position_plot_listx = np.array([0,0,1,1])
position_plot_listy = np.array([0,1,0,1])
ntimes = len(vdts)
norders = len(orders_cho)
for k in range(0,norders):
xpos = int(position_plot_listx[k])
ypos = int(position_plot_listy[k])
index_order = orders_cho[k]
plot.subplot(grid[xpos,ypos])
for i in range(0,nscheme):
listm = list_scheme[i]
ntestesloc = len(listm)
list_norms = []
for j in range(0,ntimes):
index = listm[index_order][-1]
norm_value = mnormas_disp_select[j,index,m]
if(norm_value<1):
list_norms.append(norm_value)
else:
list_norms.append(np.nan)
plot.plot(vdts,list_norms,color=vcolors[i],linestyle=vline[i],marker=vticks[i],label=vnames[i])
plot.grid()
ordem = 2*(orders_cho[k]+1)
plot.title('Order = %d'%(ordem))
if(xpos==0 and ypos==0): plot.legend(loc="lower center",ncol=3,bbox_to_anchor=(1.05, -0.4))
plot.xticks(vdts)
plot.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
plot.ylim((min_value,max_value))
ax = plot.gca()
ax.set_yscale('log')
if(xpos==0 and ypos==0):
ax.axes.xaxis.set_ticklabels([])
plot.ylabel('Error')
if(xpos==0 and ypos==1):
ax.xaxis.set_ticklabels([])
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
if(xpos==1 and ypos==0):
plot.xlabel('dt [ms]')
plot.ylabel('Error')
if(xpos==1 and ypos==1):
ax.set_yticks([],minor=True)
ax.yaxis.set_ticklabels([])
plot.minorticks_off()
plot.xlabel('dt [ms]')
plot.show()
if(normtype==2): plot.savefig('%scomp_methods/plot_norm2_bydt_x=%.2f_y=%.2f_%d.png'%(figsave,xpositionv[m],ypositionv[m],ptype),dpi=200,bbox_inches='tight')
if(normtype==np.inf): plot.savefig('%scomp_method/plot_normmax_bydt_x=%.2f_y=%.2f_%d.png'%(figsave,xpositionv[m],ypositionv[m],ptype),dpi=200,bbox_inches='tight')
plot.close()
return
#==============================================================================
#==============================================================================
# Plotando Resultados - Rotina 7
#==============================================================================
def plot7(solplot_ref,solplot,domain_setup,ordersv,vdts,list_scheme,vnames,ptype,timevalue,figsave,timepos):
fscale = 10**(-3)
timevalue = fscale*timevalue
position_plot_listx = np.array([0,0,1,1,2,2,3,3,4,4])
position_plot_listy = np.array([0,1,0,1,0,1,0,1,0,1])
ntimes = vdts.shape[0]
norders = ordersv[0].shape[0]
nschemes = len(list_scheme) + 1
x0 = domain_setup[0]
x1 = domain_setup[1]
y0 = domain_setup[2]
y1 = domain_setup[3]
scale = max(np.amax(solplot_ref),np.amax(solplot))/50
for k1 in range(0,ntimes):
timeposloc = int(timepos[k1])
#for k1 in range(0,1):
for k2 in range(0,norders):
#for k2 in range(0,1):
fig1 = plot.figure(figsize = (3,8))
plot.suptitle('Displacement - Space Order = %d \n T = %.2f s - dt = %.3f ms'%(ordersv[0][k2],timevalue,vdts[k1]),fontsize=10)
grid = plot.GridSpec(5,2,wspace=0.45,hspace=0.1)
for k3 in range(0,nschemes):
if(k3==0):
xpos = int(position_plot_listx[k3])
ypos = int(position_plot_listy[k3])
plot.subplot(grid[xpos,ypos])
sol = solplot_ref[k1,timeposloc,:,:]
plot.title('Reference',fontsize=7)
else:
xpos = int(position_plot_listx[k3])
ypos = int(position_plot_listy[k3])
plot.subplot(grid[xpos,ypos])
listm = list_scheme[k3-1]
index = listm[k2][-1]
sol = solplot[k1,index,timeposloc,:,:]
plot.title(vnames[k3-1],fontsize=7)
ax = plot.gca()
extent = [fscale*x0,fscale*x1, fscale*y1, fscale*y0]
fig = plot.imshow(np.transpose(sol),vmin=-scale, vmax=scale, cmap=cm.gray, extent=extent)
plot.grid()
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.tick_params(axis="x", labelsize=6)
ax.tick_params(axis="y", labelsize=6)
if(ypos==0):
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
if(xpos==4):
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
if(xpos==4 and ypos==0):
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
cb_ax = fig1.add_axes([0.001, 0.06, 1.0 , 0.02])
cbar = fig1.colorbar(fig, cax=cb_ax,format='%.2e',orientation='horizontal')
cbar.ax.tick_params(labelsize=6)
plot.show()
plot.savefig('%scomp_methods/disp_order_%d_dt_%f_%d.png'%(figsave,ordersv[0][k2],vdts[k1],ptype),dpi=200,bbox_inches='tight')
plot.close()
return
#==============================================================================
#==============================================================================
# Plotando Resultados - Rotina 8
#==============================================================================
def plot8(rec_ref,rec_num,domain_setup,ordersv,vdts,list_scheme,vnames,ptype,timevalue,figsave,timeposrec):
fscale = 10**(-3)
timevalue = fscale*timevalue
position_plot_listx = np.array([0,0,1,1,2,2,3,3,4,4])
position_plot_listy = np.array([0,1,0,1,0,1,0,1,0,1])
ntimes = vdts.shape[0]
norders = ordersv[0].shape[0]
nschemes = len(list_scheme) + 1
x0 = domain_setup[0]
x1 = domain_setup[1]
t0 = domain_setup[4]
tn = domain_setup[5]
scale = max(np.amax(rec_ref),np.amax(rec_num))/50
for k1 in range(0,ntimes):
timeposrecloc = int(timeposrec[k1])
#for k1 in range(0,1):
for k2 in range(0,norders):
#for k2 in range(0,1):
fig1 = plot.figure(figsize = (3,8))
plot.suptitle('Receiver - Space Order = %d \n T = %.2f s - dt = %.3f ms'%(ordersv[0][k2],timevalue,vdts[k1]),fontsize=10)
grid = plot.GridSpec(5,2,wspace=0.45,hspace=0.1)
for k3 in range(0,nschemes):
if(k3==0):
xpos = int(position_plot_listx[k3])
ypos = int(position_plot_listy[k3])
plot.subplot(grid[xpos,ypos])
setup = setup_list[k1]
posfinal = timeposrecloc#setup[1]
rec = rec_ref[k1,0:posfinal,:]
plot.title('Reference',fontsize=7)
else:
xpos = int(position_plot_listx[k3])
ypos = int(position_plot_listy[k3])
plot.subplot(grid[xpos,ypos])
setup = setup_list[k1]
posfinal = timeposrecloc#setup[1]
listm = list_scheme[k3-1]
index = listm[k2][-1]
rec = rec_num[k1,index,0:posfinal,:]
plot.title(vnames[k3-1],fontsize=7)
ax = plot.gca()
extent = [fscale*x0,fscale*x1, fscale*tn, fscale*t0]
fig = plot.imshow(rec,vmin=-scale, vmax=scale, cmap=cm.gray, extent=extent)
plot.grid()
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.tick_params(axis="x", labelsize=6)
ax.tick_params(axis="y", labelsize=6)
if(ypos==0):
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))
if(xpos==4):
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
if(xpos==4 and ypos==0):
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))
#fig1.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,wspace=0.02, hspace=0.02)
cb_ax = fig1.add_axes([0.001, 0.06, 1.0 , 0.02])
cbar = fig1.colorbar(fig, cax=cb_ax,format='%.2e',orientation='horizontal')
cbar.ax.tick_params(labelsize=6)
plot.show()
plot.savefig('%scomp_methods/rec_order_%d_dt_%f_%d.png'%(figsave,ordersv[0][k2],vdts[k1],ptype),dpi=200,bbox_inches='tight')
plot.close()
return
#==============================================================================
#==============================================================================
# Plotando Resultados
#==============================================================================
P1 = plot1(mnormas_disp,timev_disp,ordersv,list_scheme,vnames,vdts_select,times_cho,normtype,ptype,figsave)
P2 = plot2(mnormas_rec,timev_rec,ordersv,list_scheme,vnames,vdts_select,times_cho,setup,normtype,ptype,figsave)
P3 = plot3(mnormas_disp_select,timev_disp,ordersv,list_scheme,vnames,vdts_select,times_cho,xpositionv,ypositionv,normtype,ptype,figsave)
P4 = plot4(mnormas_disp,timev_disp,ordersv,list_scheme,vnames,vdts,orders_cho,normtype,ptype,figsave)
P5 = plot5(mnormas_rec,timev_rec,ordersv,list_scheme,vnames,vdts,orders_cho,setup,normtype,ptype,figsave)
P6 = plot6(mnormas_disp_select,timev_disp,ordersv,list_scheme,vnames,vdts,orders_cho,xpositionv,ypositionv,normtype,ptype,figsave)
P7 = plot7(solplot_ref,solplot,domain_setup,ordersv,vdts,list_scheme,vnames,ptype,timevalue,figsave,timepos)
P8 = plot8(rec_ref,rec_num,domain_setup,ordersv,vdts,list_scheme,vnames,ptype,timevalue,figsave,timeposrec)
#==============================================================================
|
StarcoderdataPython
|
4815475
|
<filename>Gioco/main.py<gh_stars>0
import pygame
import os
from Network import Connessione
from SchermataPrincipale import *
import webbrowser
pygame.init()
if __name__ == "__main__":
FINESTRA = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
SCREEN_WIDTH, SCREEN_HEIGHT = pygame.display.get_surface().get_size()
PERCORSO = os.path.realpath(__file__)[:-14]
SFONDO_SCHERMATA_PRINCIPALE = pygame.image.load(PERCORSO + "/Gioco/Immagini/Home.jpg")
FINESTRA.blit(SFONDO_SCHERMATA_PRINCIPALE, (0, 0))
click = False
NET = None
while True:
pygame.time.delay(20)
FINESTRA.blit(SFONDO_SCHERMATA_PRINCIPALE, (0, 0))
mouse_x, mouse_y = pygame.mouse.get_pos()
pulsante_1 = pygame.Rect(860, 800, 200, 48)
pulsante_1_img = pygame.image.load(PERCORSO + "/Gioco/Immagini/Pulsante_home_1.png")
pulsante_1_img = pygame.transform.scale(pulsante_1_img, (200, 48))
pulsante_2 = pygame.Rect(860, 860, 200, 48)
pulsante_2_img = pygame.image.load(PERCORSO + "/Gioco/Immagini/Pulsante_home_2.png")
pulsante_2_img = pygame.transform.scale(pulsante_2_img, (200, 48))
if pulsante_1.collidepoint((mouse_x, mouse_y)):
if click:
NET = Connessione()
minigioco = Schermata_Principale(FINESTRA, NET, SCREEN_HEIGHT, SCREEN_WIDTH)
minigioco.main()
if pulsante_2.collidepoint((mouse_x, mouse_y)):
if click:
webbrowser.open_new_tab("http://localhost:8080/WebApp")
pygame.draw.rect(FINESTRA, (90, 70, 183), pulsante_1)
FINESTRA.blit(pulsante_1_img, (860, 800))
pygame.draw.rect(FINESTRA, (90, 70, 183), pulsante_2)
FINESTRA.blit(pulsante_2_img, (860, 860))
click = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
click = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
pygame.display.update()
|
StarcoderdataPython
|
1641752
|
#!/usr/bin python3
from io import StringIO
from functools import lru_cache, partial
from datetime import datetime
from json import dumps
from pandas import read_csv, concat, DataFrame
from storage import StorageClient
from msoa_etl_db.processor import dry_run
@lru_cache()
def get_msoa_poplation():
with StorageClient("pipeline", "assets/msoa_pop2019.csv") as client:
population_io = StringIO(client.download().readall().decode())
result = (
read_csv(population_io)
.rename(columns={"MSOA11CD": "areaCode", "Pop2019": "population"})
.set_index(["areaCode"])
.to_dict()
)
return result["population"]
def process_data(area_code: str, data_path: str) -> DataFrame:
population = get_msoa_poplation()
payload = dumps({
"data_path": {
"container": "rawsoadata",
"path": data_path
},
"area_code": area_code,
"area_type": "msoa",
"metric": "newCasesBySpecimenDate",
"partition_id": "N/A",
"population": population[area_code],
"area_id": -1,
"metric_id": -1,
"release_id": -1,
"timestamp": datetime.utcnow().isoformat()
})
return dry_run(payload)
def local_test(data_path, msoa_codes):
func = partial(process_data, data_path=data_path)
data = concat(map(func, msoa_codes))
return data
if __name__ == '__main__':
codes = [
"E02003377",
"E02000977",
"E02003539",
"E02003106",
"E02003984",
"E02003135",
]
result = local_test("daily_msoa_cases_202103101048.csv", codes)
result.to_csv("request_data.csv")
|
StarcoderdataPython
|
129698
|
<filename>smart_event/settings/common.py
# Python imports
from os.path import abspath, basename, dirname, join, normpath
from django.contrib import messages
import sys
# ##### PATH CONFIGURATION ################################
# fetch Django's project directory
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# fetch the project_root
PROJECT_ROOT = dirname(DJANGO_ROOT)
# the name of the whole site
SITE_NAME = basename(DJANGO_ROOT)
# collect static files here
STATIC_ROOT = join(PROJECT_ROOT, 'run', 'static')
# collect media files here
MEDIA_ROOT = join(PROJECT_ROOT, 'run', 'media')
# look for static assets here
STATICFILES_DIRS = [
join(PROJECT_ROOT, 'static'),
]
# db routers
DATABASE_ROUTERS = [
'smart_event.settings.routers.TenantRouter'
]
# look for templates here
# This is an internal setting, used in the TEMPLATES directive
PROJECT_TEMPLATES = [
join(PROJECT_ROOT, 'templates'),
]
# add apps/ to the Python path
sys.path.append(normpath(join(PROJECT_ROOT, 'apps')))
# ##### APPLICATION CONFIGURATION #########################
# these are the apps
DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'apps.acl',
'apps.dashboard',
'apps.cliente',
'apps.core',
'apps.escala',
'apps.notification',
'apps.support',
'apps.location',
]
# Middlewares
MIDDLEWARE = [
'smart_event.settings.middleware.multidb_middleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'smart_event.settings.middleware.LocaleMiddleware',
'smart_event.settings.middleware.TimezoneMiddleware'
]
# template stuff
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': PROJECT_TEMPLATES,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request'
],
},
},
]
# Internationalization
USE_I18N = False
# ##### SECURITY CONFIGURATION ############################
# We store the secret key here
# The required SECRET_KEY is fetched at the end of this file
SECRET_FILE = normpath(join(PROJECT_ROOT, 'run', 'SECRET.key'))
# these persons receive error notification
ADMINS = (
('Gustavo', '<EMAIL>'),
)
MANAGERS = ADMINS
AUTH_USER_MODEL = 'cliente.Client'
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# ##### DJANGO RUNNING CONFIGURATION ######################
# the default WSGI application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
# the root URL configuration
ROOT_URLCONF = '%s.urls' % SITE_NAME
# the URL for static files
STATIC_URL = '/static/'
# the URL for media files
MEDIA_URL = '/media/'
# login redirect url
LOGIN_REDIRECT_URL = '/'
# login url
LOGIN_URL = '/acl/login'
# logout redirect urk
LOGOUT_REDIRECT_URL = LOGIN_URL
# ##### DEBUG CONFIGURATION ###############################
DEBUG = False
# ONESIGNAL APP_ID
ONESIGNAL_APP_ID = "ac46fd4e-3813-479a-b175-0149f9789d8f"
# MAPBOX Key
MAPBOX_KEY = '<KEY>'
# IPSTACK Key
IPSTACK_KEY = 'b23be60d5e84503c80c6fee49a62317e'
# finally grab the SECRET KEY
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
from django.utils.crypto import get_random_string
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!$%&()=+-_'
SECRET_KEY = get_random_string(50, chars)
with open(SECRET_FILE, 'w') as f:
f.write(SECRET_KEY)
except IOError:
raise Exception('Could not open %s for writing!' % SECRET_FILE)
|
StarcoderdataPython
|
1749803
|
<reponame>shishaochen/TensorFlow-0.8-Win
"""Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import op_def_library
def reduce_join(inputs, reduction_indices, keep_dims=None, separator=None,
name=None):
r"""Joins a string Tensor across the given dimensions.
Computes the string join across dimensions in the given string Tensor of shape
`[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input
strings with the given separator (default: empty string). Negative indices are
counted backwards from the end, with `-1` being equivalent to `n - 1`. Passing
an empty `reduction_indices` joins all strings in linear index order and outputs
a scalar string.
For example:
```
# tensor `a` is [["a", "b"], ["c", "d"]]
tf.reduce_join(a, 0) ==> ["ac", "bd"]
tf.reduce_join(a, 1) ==> ["ab", "cd"]
tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
tf.reduce_join(a, [0, 1]) ==> ["acbd"]
tf.reduce_join(a, [1, 0]) ==> ["abcd"]
tf.reduce_join(a, []) ==> ["abcd"]
```
Args:
inputs: A `Tensor` of type `string`.
The input to be joined. All reduced indices must have non-zero size.
reduction_indices: A `Tensor` of type `int32`.
The dimensions to reduce over. Dimensions are reduced in the
order specified. If `reduction_indices` has higher rank than `1`, it is
flattened. Omitting `reduction_indices` is equivalent to passing
`[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.
keep_dims: An optional `bool`. Defaults to `False`.
If `True`, retain reduced dimensions with length `1`.
separator: An optional `string`. Defaults to `""`.
The separator to use when joining.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
Has shape equal to that of the input with reduced dimensions removed or
set to `1` depending on `keep_dims`.
"""
return _op_def_lib.apply_op("ReduceJoin", inputs=inputs,
reduction_indices=reduction_indices,
keep_dims=keep_dims, separator=separator,
name=name)
def string_to_hash_bucket(string_tensor, num_buckets, name=None):
r"""Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the
process.
Note that the hash function may change from time to time.
Args:
string_tensor: A `Tensor` of type `string`.
num_buckets: An `int` that is `>= 1`. The number of buckets.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
A Tensor of the same shape as the input `string_tensor`.
"""
return _op_def_lib.apply_op("StringToHashBucket",
string_tensor=string_tensor,
num_buckets=num_buckets, name=name)
def _InitOpDefLibrary():
op_list = op_def_pb2.OpList()
text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
op_def_registry.register_op_list(op_list)
op_def_lib = op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "ReduceJoin"
input_arg {
name: "inputs"
type: DT_STRING
}
input_arg {
name: "reduction_indices"
type: DT_INT32
}
output_arg {
name: "output"
type: DT_STRING
}
attr {
name: "keep_dims"
type: "bool"
default_value {
b: false
}
}
attr {
name: "separator"
type: "string"
default_value {
s: ""
}
}
}
op {
name: "StringToHashBucket"
input_arg {
name: "string_tensor"
type: DT_STRING
}
output_arg {
name: "output"
type: DT_INT64
}
attr {
name: "num_buckets"
type: "int"
has_minimum: true
minimum: 1
}
}
"""
_op_def_lib = _InitOpDefLibrary()
|
StarcoderdataPython
|
3399783
|
<reponame>bjuergens/NaturalNets
import abc
import attr
import numpy as np
from typing import Callable
registered_brain_classes = {}
def get_brain_class(brain_class_name: str):
if brain_class_name in registered_brain_classes:
return registered_brain_classes[brain_class_name]
else:
raise RuntimeError("No valid brain")
@attr.s(slots=True, auto_attribs=True, frozen=True)
class IBrainCfg(abc.ABC, dict):
type: str
class IBrain(abc.ABC):
@abc.abstractmethod
def __init__(self, input_size: int, output_size: int, individual: np.ndarray, configuration: dict,
brain_state: dict):
pass
@abc.abstractmethod
def step(self, u):
pass
@abc.abstractmethod
def reset(self):
pass
@classmethod
@abc.abstractmethod
def get_free_parameter_usage(cls, input_size: int, output_size: int, configuration: dict, brain_state: dict):
pass
@classmethod
@abc.abstractmethod
def generate_brain_state(cls, input_size: int, output_size: int, configuration: dict):
pass
@classmethod
@abc.abstractmethod
def save_brain_state(cls, path, brain_state):
pass
@classmethod
def load_brain_state(cls, path):
# For Visualization usage
pass
@staticmethod
def read_matrix_from_genome(individual: np.ndarray, index: int, matrix_rows: int, matrix_columns: int):
matrix_size = matrix_columns * matrix_rows
matrix = np.array(individual[index:index + matrix_size], dtype=np.single)
matrix = matrix.reshape(matrix_rows, matrix_columns)
index += matrix_size
return matrix, index
@classmethod
def get_activation_function(cls, activation: str) -> Callable[[np.ndarray], np.ndarray]:
if activation == "relu":
return cls.relu
elif activation == "linear":
return cls.linear
elif activation == "tanh":
return cls.tanh
else:
raise RuntimeError("The chosen activation function '{}' is not implemented".format(activation))
@classmethod
def get_individual_size(cls, input_size: int, output_size: int, configuration: dict, brain_state: dict) -> int:
"""uses context information to calculate the required number of free parameter needed to construct
an individual of this class"""
def sum_dict(node):
sum_ = 0
for key, item in node.items():
if isinstance(item, dict):
sum_ += sum_dict(item)
else:
sum_ += item
return sum_
usage_dict = cls.get_free_parameter_usage(input_size, output_size, configuration, brain_state)
return sum_dict(usage_dict)
@staticmethod
def relu(x: np.ndarray) -> np.ndarray:
return np.maximum(0, x)
@staticmethod
def linear(x: np.ndarray) -> np.ndarray:
return x
@staticmethod
def tanh(x: np.ndarray) -> np.ndarray:
return np.tanh(x)
@staticmethod
def sigmoid(x):
return 1 / (1 + np.exp(-x))
|
StarcoderdataPython
|
1763641
|
"""
Name: <NAME>
Class: CS370
Date: 13/12/216
Model: major.py
"""
from ferris import BasicModel
from google.appengine.ext import ndb
class Major(BasicModel):
college = ndb.StringProperty();
department = ndb.StringProperty();
link = ndb.StringProperty();
degree_level = ndb.StringProperty();
discipline = ndb.StringProperty();
major_description = ndb.JsonProperty() # The full metadata of the major in JSON format
parsed = ndb.BooleanProperty()
@classmethod
def get_majors(cls):
"""
Retrieves all majors, ordered by discipline
"""
return cls.query().order(cls.discipline)
@classmethod
def get_major_for_discipline(cls, discipline):
return cls.query(cls.discipline==discipline)
@classmethod
def add_new(cls, form_data):
new_major = Major(
college = form_data['college'],
department = form_data['department'],
link = form_data['link'],
degree_level = form_data['degree_level'],
discipline = form_data['discipline'],
major_description = form_data['major_description'],
parsed = form_data['parsed']
)
new_major.put()
return new_major
|
StarcoderdataPython
|
3228942
|
class InvalidRessourceException(Exception):
pass
|
StarcoderdataPython
|
65846
|
import numpy as np
from skmultiflow.drift_detection import ADWIN
def demo():
""" _test_adwin
In this demo, an ADWIN object evaluates a sequence of numbers corresponding to 2 distributions.
The ADWIN object indicates the indices where change is detected.
The first half of the data is a sequence of randomly generated 0's and 1's.
The second half of the data is a normal distribution of integers from 0 to 7.
"""
adwin = ADWIN()
size = 2000
change_start = 999
np.random.seed(1)
data_stream = np.random.randint(2, size=size)
data_stream[change_start:] = np.random.randint(8, size=size-change_start)
for i in range(size):
adwin.add_element(data_stream[i])
if adwin.detected_change():
print('Change has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
if __name__ == '__main__':
demo()
|
StarcoderdataPython
|
62122
|
<gh_stars>1-10
import librosa
import numpy as np
import os
import pyworld
def world_encode_spectral_envelop(sp, fs, dim=36):
# Get Mel-cepstral coefficients (MCEPs)
#sp = sp.astype(np.float64)
coded_sp = pyworld.code_spectral_envelope(sp, fs, dim)
return coded_sp
def world_decompose(wav, fs, frame_period = 5.0):
# Decompose speech signal into f0, spectral envelope and aperiodicity using WORLD
wav = wav.astype(np.float64)
f0, timeaxis = pyworld.harvest(wav, fs, frame_period = frame_period, f0_floor = 71.0, f0_ceil = 800.0)
sp = pyworld.cheaptrick(wav, f0, timeaxis, fs)
ap = pyworld.d4c(wav, f0, timeaxis, fs)
return f0, timeaxis, sp, ap
def world_speech_synthesis(f0, coded_sp, ap, fs, frame_period):
decoded_sp = world_decode_spectral_envelop(coded_sp, fs)
# TODO
min_len = min([len(f0), len(coded_sp), len(ap)])
f0 = f0[:min_len]
coded_sp = coded_sp[:min_len]
ap = ap[:min_len]
wav = pyworld.synthesize(f0, decoded_sp, ap, fs, frame_period)
# Librosa could not save wav if not doing so
wav = wav.astype(np.float32)
return wav
def world_decode_spectral_envelop(coded_sp, fs):
# Decode Mel-cepstral to sp
fftlen = pyworld.get_cheaptrick_fft_size(fs)
decoded_sp = pyworld.decode_spectral_envelope(coded_sp, fs, fftlen)
return decoded_sp
path1 = "data/VCC2SF1/"
path2 = "data/VCC2SM1/"
files_1 = os.listdir(path1)
files_2 = os.listdir(path2)
#wav_file = "VCC2SF1/10001.wav"
sampling_rate, num_mcep, frame_period=22050, 40, 5
length = len(files_1)
for i in range(length):
print("Iteration ", i)
wav, _ = librosa.load(path1+files_1[i], sr=sampling_rate, mono=True)
f0, timeaxis, sp, ap = world_decompose(wav=wav, fs=sampling_rate, frame_period=frame_period)
coded_sp = world_encode_spectral_envelop(sp=sp, fs=sampling_rate, dim=num_mcep)
# print("This is the feature we want -> coded_sp")
# print("Type of coded_sp: ", type(coded_sp))
# print("shape of coded_sp: ", coded_sp.shape)
np.save("data/parameters/VCC2SF1/coded_sp/"+str(i)+".npy", coded_sp)
np.save("data/parameters/VCC2SF1/f0/"+str(i)+".npy", f0)
np.save("data/parameters/VCC2SF1/ap/"+str(i)+".npy", ap)
#print("ap: ", ap)
# wav_transformed = world_speech_synthesis(f0=f0, coded_sp=coded_sp,
# ap=ap, fs=sampling_rate, frame_period=frame_period)
# librosa.output.write_wav("generate/"+path1+files_1[i], wav_transformed, sampling_rate)
# sampling_rate, num_mcep, frame_period=22050, 40, 5
wav, _ = librosa.load(path2+files_2[i], sr=sampling_rate, mono=True)
f0, timeaxis, sp, ap = world_decompose(wav=wav, fs=sampling_rate, frame_period=frame_period)
coded_sp = world_encode_spectral_envelop(sp=sp, fs=sampling_rate, dim=num_mcep)
np.save("data/parameters/VCC2SM1/coded_sp/"+str(i)+".npy", coded_sp)
np.save("data/parameters/VCC2SM1/f0/"+str(i)+".npy", f0)
np.save("data/parameters/VCC2SM1/ap/"+str(i)+".npy", ap)
# wav_transformed = world_speech_synthesis(f0=f0, coded_sp=coded_sp,
# ap=ap, fs=sampling_rate, frame_period=frame_period)
# librosa.output.write_wav("data/generate/"+path2+files_2[i], wav_transformed, sampling_rate)
|
StarcoderdataPython
|
4820193
|
<reponame>mori-c/cs106a<gh_stars>1-10
"""
File: gameshow.py
------------------
Lets play a gameshow!
"""
def main():
print("Welcome to the CS106A Game Show")
print("Chose a door and pick a prize")
print("-------------")
# PART 1: Get the door number from the user
door = int(input("Door: "))
# while the input is invalid
while door < 1 or door > 3:
# tell the user the input was invalid
print("Invalid door!")
# ask for a new input
door = int(input("Door: "))
# PART 2: Compute the prize.
prize = 4
if door == 1:
prize = 2 + 9 // 10 * 100
elif door == 2:
locked = prize % 2 != 0
if not locked:
prize += 5
elif door == 3:
for i in range(door):
prize += i
print('You win: $' + str(prize))
# This provided line is required at the end of a Python file
# to call the main() function.
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3311697
|
<reponame>sguillory6/e3
import os
from analysis.rrdtool.types.Graph import Graph
class HibernateJmx(Graph):
_graph_config = [
{
'file': '%s-hibernate-collections.png',
'title': 'Hibernate collections on %s',
'label': 'Collection operations',
'stack': True,
'config': [
{
'gauge-CollectionFetchCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Fetch',
'line': '#5b32ff',
'area': '#c5b7ff'
}
]
},
{
'gauge-CollectionUpdateCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Update',
'line': '#ac1cbc',
'area': '#f59eff'
}
]
},
{
'gauge-CollectionLoadCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Load',
'line': '#18a54e',
'area': '#8bd6a7'
}
]
},
{
'gauge-CollectionRecreateCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Recreate',
'line': '#bc8d1e',
'area': '#ddc17e'
}
]
},
{
'gauge-CollectionRemoveCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Remove',
'line': '#a33b1b',
'area': '#f7a991'
}
]
}
],
'graph_name': 'hibernate-collections'
},
{
'file': '%s-hibernate-entities.png',
'title': 'Hibernate entities on %s',
'label': 'Entity operations',
'stack': True,
'config': [
{
'gauge-EntityFetchCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Fetch',
'line': '#5b32ff',
'area': '#c5b7ff'
}
]
},
{
'gauge-EntityUpdateCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Update',
'line': '#ac1cbc',
'area': '#f59eff'
}
]
},
{
'gauge-EntityLoadCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Load',
'line': '#18a54e',
'area': '#8bd6a7'
}
]
},
{
'gauge-EntityInsertCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Insert',
'line': '#bc8d1e',
'area': '#ddc17e'
}
]
},
{
'gauge-EntityDeleteCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Delete',
'line': '#a33b1b',
'area': '#f7a991'
}
]
}
],
'graph_name': 'hibernate-entities'
},
{
'file': '%s-hibernate-query-cache.png',
'title': 'Hibernate query cache on %s',
'label': 'Cache operations',
'stack': True,
'config': [
{
'gauge-QueryCachePutCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Put',
'line': '#5b32ff',
'area': '#c5b7ff'
}
]
},
{
'gauge-QueryExecutionCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Execution',
'line': '#ac1cbc',
'area': '#f59eff'
}
]
},
{
'gauge-QueryCacheMissCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Miss',
'line': '#bc8d1e',
'area': '#ddc17e'
}
]
},
{
'gauge-QueryCacheHitCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Hit',
'line': '#18a54e',
'area': '#8bd6a7'
}
]
}
],
'graph_name': 'hibernate-query-cache'
},
{
'file': '%s-hibernate-L2-cache.png',
'title': 'Hibernate L2 cache on %s',
'label': 'Cache operations',
'stack': True,
'config': [
{
'gauge-SecondLevelCachePutCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'L2 Put',
'line': '#5b32ff',
'area': '#c5b7ff'
}
]
},
{
'gauge-SecondLevelCacheMissCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'L2 Miss',
'line': '#bc8d1e',
'area': '#ddc17e'
}
]
},
{
'gauge-SecondLevelCacheHitCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'L2 Hit',
'line': '#18a54e',
'area': '#8bd6a7'
}
]
}
],
'graph_name': 'hibernate-l2'
},
{
'file': '%s-hibernate-update-timestamps.png',
'title': 'Hibernate update timestamps on %s',
'label': 'Update operations',
'stack': True,
'config': [
{
'gauge-UpdateTimestampsCachePutCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Put',
'line': '#5b32ff',
'area': '#c5b7ff'
}
]
},
{
'gauge-UpdateTimestampsCacheMissCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Miss',
'line': '#bc8d1e',
'area': '#ddc17e'
}
]
},
{
'gauge-UpdateTimestampsCacheHitCount': [
{
'ds': 'value',
'multi': 1,
'desc': 'Hit',
'line': '#18a54e',
'area': '#8bd6a7'
}
]
}
],
'graph_name': 'hibernate-timestamps'
}
]
_image_dir = None
_server_dir = None
def __init__(self, image_dir, server_dir):
self._image_dir = image_dir
self._server_dir = server_dir
def render(self, node_name, start, end, graph_width=350, graph_height=100):
data_dir = os.path.join(self._server_dir, 'GenericJMX-org.hibernate.core_')
for graph_config in self._graph_config:
image_file = os.path.join(self._image_dir, graph_config['file'] % node_name)
graph_name=graph_config['graph_name']
self._render(data_dir, image_file, start, end, graph_config,
graph_width, graph_height, node_name, graph_name)
|
StarcoderdataPython
|
3263107
|
# -*- coding: utf-8 -*-
"""
Basic_stat Package for Python
Version 1.0 Nov 29, 2021
Author: <NAME>, Graduate School of Oceanography (GSO), URI.
Email: <EMAIL>
#
# DISCLAIMER:
# This software is provided "as is" without warranty of any kind.
#=========================================================================
"""
#import numpy as np
from .basic_stats import *
__all__ = ['basic_stats']
|
StarcoderdataPython
|
60529
|
# -*- coding: utf-8 -*-
import pykintone
from mymodel import Person
from cache import get_all
from jinja2.environment import Environment
from jinja2 import Template, FileSystemLoader
import codecs
import os
import argparse
import unicodecsv as csv
from cStringIO import StringIO
OUTPUT_DIR = './output'
# OUTPUT_DIR = '../mitou/webtools/mitou.github.io/people'
env = Environment()
env.loader = FileSystemLoader('.')
def output(x):
data = x.__dict__
t = env.get_template('template_v01.html')
html = t.render(data)
fo = codecs.open(os.path.join(OUTPUT_DIR, '%s.html' % x.name), 'w', 'utf-8')
fo.write(html)
fo.close()
def assure_length(xs, length):
if args.non_strict:
if len(xs) != length:
print 'length mismatch:', length, ','.join(xs)
xs += [''] * length
else:
if len(xs) != length:
raise RuntimeError(u'length mismatch: {} and {}'.format(length, u','.join(xs)).encode('utf-8'))
def process_tags(x):
resume_list = []
activity_list = []
mitou_list = []
photo = None
rows = csv.reader(StringIO(x.tags.strip('\n').encode('utf8')), encoding='utf8')
for items in rows:
if not items: continue
if items[0] == u'所属':
# syntax: 所属,where,when,note
# whenは「20xx-xx-xx時点」や「2014-2016」などのフリーフォーマット
assure_length(items, 4)
if items[1] in [u"", u"フリー", u"フリーランス"]: continue
resume_list.append(dict(
where=items[1],
when=items[2],
who=[], # be filled by collect_tags
note=items[3],
ref_id=''
))
# TODO when note include '#1' etc, put it in ref_id
elif items[0] == u'未踏採択':
# e.g. 未踏採択,2014,未踏,SC,任意キャラクターへの衣装転写システム,首藤 一幸
assure_length(items, 6)
mitou_list.append(dict(
when=items[1],
kubun=items[2],
members=[], # be filled by collect_tags
sc=items[3],
theme=items[4],
pm=items[5]
))
elif items[0] == u'Photo':
photo = ':'.join(items[1:]).strip(':')
elif items[0] == u'講演':
activity_list.append(dict(
type=items[0],
title=u"{}「{}」".format(items[1], items[2]),
when=items[3],
who=[],
note=items[4],
ref_id=''
))
else:
assure_length(items, 4)
activity_list.append(dict(
type=items[0],
title=items[1],
when=items[2],
who=[],
note=items[3],
ref_id=''
))
x.activity_list = activity_list
x.resume_list = resume_list
x.mitou_list = mitou_list
x.photo = photo
return x
def put_all():
for x in get_all(args.use_cache):
x = process_tags(x)
output(x)
def collect_tags():
from collections import defaultdict
mitou_theme = defaultdict(set)
mitou_sc = defaultdict(set)
mitou_kubun = defaultdict(set)
affiliation = defaultdict(set)
event = defaultdict(set)
xs = get_all(args.use_cache)
for x in xs:
x = process_tags(x)
for m in x.mitou_list:
if m['theme']:
# PMs don't have `theme`
mitou_theme[m['theme']].add(x)
m['pretty_kubun'] = pretty(m['when'], m['kubun'])
mitou_kubun[m['pretty_kubun']].add(x)
if m['sc']:
mitou_sc[m['pretty_kubun']].add(x)
for m in x.resume_list:
affiliation[m['where']].add(x)
for m in x.activity_list:
event[m['title']].add(x)
for x in xs:
me = set([x])
for m in x.mitou_list:
m['members'] = sorted(mitou_theme[m['theme']] - me)
for m in x.resume_list:
m['who'] = sorted(affiliation[m['where']] - me)
for m in x.activity_list:
m['who'] = sorted(event[m['title']] - me)
if not(args.index_only):
for x in xs:
output(x)
t = env.get_template('template_list.html')
print 'output kubun'
data = list(sorted((k, mitou_kubun[k]) for k in mitou_kubun))
html = t.render(title=u'採択区分別一覧', data=data)
fo = codecs.open(os.path.join(OUTPUT_DIR, 'kubun.html'), 'w', 'utf-8')
fo.write(html)
fo.close()
print 'output sc'
data = list(sorted((k, mitou_sc[k]) for k in mitou_sc))
html = t.render(title=u'スパクリ一覧', data=data)
fo = codecs.open(os.path.join(OUTPUT_DIR, 'sc.html'), 'w', 'utf-8')
fo.write(html)
fo.close()
print 'output affiliation'
data = list(sorted(
((k, affiliation[k])
for k in affiliation),
key=lambda x:-len(x[1])))
html = t.render(title=u'所属別一覧', data=data)
fo = codecs.open(os.path.join(OUTPUT_DIR, 'affiliation.html'), 'w', 'utf-8')
fo.write(html)
fo.close()
print 'output index.html'
t = env.get_template('template_index.html')
html = t.render(title=u'未踏名鑑')
fo = codecs.open(os.path.join(OUTPUT_DIR, 'index.html'), 'w', 'utf-8')
fo.write(html)
fo.close()
def find_links(s):
'convert name to link'
assert isinstance(s, basestring)
import re
def make_link(m):
name = m.groups()[0]
return to_link(name)
s = re.sub('\[(.+?)\]', make_link, s)
return s
env.filters['find_links'] = find_links
def to_link(name):
'get a name and make it to link'
assert isinstance(name, basestring)
return u'<nobr><a href="{0}.html">[{0}]</a></nobr>'.format(name)
env.filters['to_link'] = to_link
def abbrev_people(people):
if len(people) < 8:
return show_people(people)
# 情報量の多い順に表示,同点なら名前順
rest = len(people) - 7
people = sorted(
people,
key=lambda p: (len(p.tags) + len(p.note), p.name),
reverse=True)
from itertools import islice
people = islice(people, 7)
return ' '.join(to_link(p.name) for p in people) + u'他{}人'.format(rest)
env.filters['abbrev_people'] = abbrev_people
def show_people(people):
return ' '.join(map(to_link, sorted(p.name for p in people)))
env.filters['show_people'] = show_people
def pretty(when, kubun=None):
if '.' in when:
year, ki = when.split('.')
result = u'{}年{}期'.format(year, ki)
else:
result = when + u'年'
if kubun:
if kubun == u'ユース':
kubun = u'未踏ユース'
elif kubun == u'本体':
kubun = u'未踏本体'
elif kubun == u'未踏':
pass # do nothing
else:
raise RuntimeError(u'{} should be in ユース, 本体, 未踏'.format(kubun))
result += kubun
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--use-cache', '-c', action='store_true', help='use local cache for input instead of reading from kintone')
parser.add_argument('--index-only', action='store_true', help='render index only')
parser.add_argument('--non-strict', action='store_true', help='skip strict format check')
args = parser.parse_args()
collect_tags()
|
StarcoderdataPython
|
123671
|
from __future__ import division
import numpy as np
__author__ = '<NAME>'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
class OscParams(object):
def __init__(self, dm_solar, dm_atm, x12, x13, x23, deltacp):
"""
Expects dm_solar and dm_atm to be in [eV^2], and x_{ij} to be
sin^2(theta_{ij})
params:
* xij - sin^2(theta_{ij}) values to use in oscillation calc.
* dm_solar - delta M_{21}^2 value [eV^2]
* dm_atm - delta M_{32}^2 value [eV^2] if Normal hierarchy, or
delta M_{31}^2 value if Inverted Hierarchy (following
BargerPropagator class).
* deltacp - \delta_{cp} value to use.
"""
assert x12 <= 1
assert x13 <= 1
assert x23 <= 1
self.sin12 = np.sqrt(x12)
self.sin13 = np.sqrt(x13)
self.sin23 = np.sqrt(x23)
self.deltacp = deltacp
# Comment BargerPropagator.cc:
# "For the inverted Hierarchy, adjust the input
# by the solar mixing (should be positive)
# to feed the core libraries the correct value of m32."
self.dm_solar = dm_solar
if dm_atm < 0.0:
self.dm_atm = dm_atm - dm_solar
else:
self.dm_atm = dm_atm
@property
def M_pmns(self):
# real part [...,0]
# imaginary part [...,1]
Mix = np.zeros((3,3,2))
sd = np.sin(self.deltacp)
cd = np.cos(self.deltacp)
c12 = np.sqrt(1.0-self.sin12*self.sin12)
c23 = np.sqrt(1.0-self.sin23*self.sin23)
c13 = np.sqrt(1.0-self.sin13*self.sin13)
Mix[0][0][0] = c12*c13
Mix[0][0][1] = 0.0
Mix[0][1][0] = self.sin12*c13
Mix[0][1][1] = 0.0
Mix[0][2][0] = self.sin13*cd
Mix[0][2][1] = -self.sin13*sd
Mix[1][0][0] = -self.sin12*c23-c12*self.sin23*self.sin13*cd
Mix[1][0][1] = -c12*self.sin23*self.sin13*sd
Mix[1][1][0] = c12*c23-self.sin12*self.sin23*self.sin13*cd
Mix[1][1][1] = -self.sin12*self.sin23*self.sin13*sd
Mix[1][2][0] = self.sin23*c13
Mix[1][2][1] = 0.0
Mix[2][0][0] = self.sin12*self.sin23-c12*c23*self.sin13*cd
Mix[2][0][1] = -c12*c23*self.sin13*sd
Mix[2][1][0] = -c12*self.sin23-self.sin12*c23*self.sin13*cd
Mix[2][1][1] = -self.sin12*c23*self.sin13*sd
Mix[2][2][0] = c23*c13
Mix[2][2][1] = 0.0
return Mix
@property
def M_mass(self):
dmVacVac = np.zeros((3,3))
mVac = np.zeros(3)
delta = 5.0e-9
mVac[0] = 0.0
mVac[1] = self.dm_solar
mVac[2] = self.dm_solar+self.dm_atm
# Break any degeneracies
if self.dm_solar == 0.0:
mVac[0] -= delta
if self.dm_atm == 0.0:
mVac[2] += delta
dmVacVac[0][0] = 0.
dmVacVac[1][1] = 0.
dmVacVac[2][2] = 0.
dmVacVac[0][1] = mVac[0]-mVac[1]
dmVacVac[1][0] = -dmVacVac[0][1]
dmVacVac[0][2] = mVac[0]-mVac[2]
dmVacVac[2][0] = -dmVacVac[0][2]
dmVacVac[1][2] = mVac[1]-mVac[2]
dmVacVac[2][1] = -dmVacVac[1][2]
return dmVacVac
|
StarcoderdataPython
|
3252513
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
import pytest
import torch
from direct.nn.didn.didn import DIDN
def create_input(shape):
data = torch.rand(shape).float()
return data
@pytest.mark.parametrize(
"shape",
[
[3, 2, 32, 32],
[3, 2, 16, 16],
],
)
@pytest.mark.parametrize(
"out_channels",
[3, 5],
)
@pytest.mark.parametrize(
"hidden_channels",
[16, 8],
)
@pytest.mark.parametrize(
"n_dubs",
[3, 4],
)
@pytest.mark.parametrize(
"num_convs_recon",
[3, 4],
)
@pytest.mark.parametrize(
"skip",
[True, False],
)
def test_didn(shape, out_channels, hidden_channels, n_dubs, num_convs_recon, skip):
model = DIDN(shape[1], out_channels, hidden_channels, n_dubs, num_convs_recon, skip)
data = create_input(shape).cpu()
out = model(data)
assert list(out.shape) == [shape[0]] + [out_channels] + shape[2:]
|
StarcoderdataPython
|
3207201
|
<gh_stars>0
def conf():
return {
"id":"discourse",
"description":"this is the discourse c360 component",
"enabled":True,
}
|
StarcoderdataPython
|
199637
|
import dotdict
import os
import submitit
import sys
from pathlib import Path
from sst.train import train
from global_utils import save_result, search_hyperparams, slurm_job_babysit
meta_configs = dotdict.DotDict(
{
'tagset_size': {
'values': [5],
'flag': None
},
'data_path': {
'values': [
'../data/sst/10p/{split}.txt',
'../data/sst/10p/{split}.txt',
'../data/sst/10p/{split}_c.txt',
'../data/sst/10p/{split}_cl.txt',
'../data/sst/10p/{split}.txt',
'../data/sst/10p/{split}_c.txt',
'../data/sst/10p/{split}_cl.txt',
'../data/sst/10p/{split}_pc.txt',
'../data/sst/10p/{split}_pcl.txt',
'../data/sst/10p/{split}_pc.txt',
'../data/sst/10p/{split}_pcl.txt',
'../data/sst/10p/{split}.txt',
'../data/sst/10p/{split}.txt',
],
'flag': 'data'
},
'learning_rate': {
'values': [0.0005],
'flag': 'optimizer'
},
'min_lr': {
'values': [5e-6],
'flag': None
},
'optimizer': {
'values': ['Adam'],
'flag': 'optimizer'
},
'model_name': {
'values': ['xlm-roberta-large'],
'flag': 'pretrained-model'
},
'device': {
'values': ['cuda'],
'flag': None
},
'hidden_dim': {
'values': [512],
'flag': None
},
'dropout_p': {
'values': [0.2],
'flag': None
},
'fine_tune': {
'values': [False],
'flag': 'fine-tune'
},
'batch_size': {
'values': [64],
'flag': 'fine-tune'
},
'epochs': {
'values': [20],
'flag': None
},
'validation_per_epoch': {
'values': [4],
'flag': 'pretrained-model'
},
'seed':{
'values': [115],
'flag': 'global-seed'
},
'tmp_path':{
'values': [f'../tmp'],
'flag': None
},
'augment': {
'values': [
False,
True, True, True,
'free-length', 'free-length', 'free-length',
True, True, 'free-length',
'free-length', 'synonym', 'random'
],
'flag': 'data'
},
'use_spans': {
'values': [False],
'flag': None
},
'use_attn': {
'values': [True],
'flag': None
}
}
)
all_configs = list(search_hyperparams(dict(), meta_configs))
print(len(all_configs), 'configs generated.')
idx = int(sys.argv[1])
results = [train(x) for x in all_configs[idx:idx+1]]
print(results[0])
os.system(f'mkdir -p ../results/sst/')
save_result(results, '../results/sst/attn_frozen_small_shuf.json')
|
StarcoderdataPython
|
4812579
|
<reponame>ryu-sw/alembic
##-*****************************************************************************
##
## Copyright (c) 2009-2011,
## <NAME>, Inc. and
## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following disclaimer
## in the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Sony Pictures Imageworks, nor
## Industrial Light & Magic nor the names of their contributors may be used
## to endorse or promote products derived from this software without specific
## prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##-*****************************************************************************
from maya import cmds as MayaCmds
import maya.OpenMaya as OpenMaya
import os
import math
# adds the current working directory so tools don't get confused about where we
# are storing files
def expandFileName(name):
return os.getcwd() + os.path.sep + name
# compare the two floating point values
def floatDiff(val1, val2, tolerance):
diff = math.fabs(val1 - val2)
if diff < math.pow(10, -tolerance):
return True
return False
# function that returns a node object given a name
def getObjFromName(nodeName):
selectionList = OpenMaya.MSelectionList()
selectionList.add( nodeName )
obj = OpenMaya.MObject()
selectionList.getDependNode(0, obj)
return obj
# function that finds a plug given a node object and plug name
def getPlugFromName(attrName, nodeObj):
fnDepNode = OpenMaya.MFnDependencyNode(nodeObj)
attrObj = fnDepNode.attribute(attrName)
plug = OpenMaya.MPlug(nodeObj, attrObj)
return plug
# meaning of return value:
# 0 if array1 = array2
# 1 if array1 and array2 are of the same length, array1[i] == array2[i] for 0<=i<m<len, and array1[m] < array2[m]
# -1 if array1 and array2 are of the same length, array1[i] == array2[i] for 0<=i<m<len, and array1[m] > array2[m]
# 2 if array1.length() < array2.length()
# -2 if array1.length() > array2.length()
def compareArray(array1, array2):
len1 = array1.length()
len2 = array2.length()
if len1 > len2 : return -2
if len1 < len2 : return 2
for i in range(0, len1):
if array1[i] < array2[i] :
return 1
if array1[i] > array2[i] :
return -1
return 0
# return True if the two point arrays are exactly the same
def comparePointArray(array1, array2):
len1 = array1.length()
len2 = array2.length()
if len1 != len2 :
return False
for i in range(0, len1):
if not array1[i].isEquivalent(array2[i], 1e-6):
return False
return True
# return True if the two meshes are identical
def compareMesh( nodeName1, nodeName2 ):
# basic error checking
obj1 = getObjFromName(nodeName1)
if not obj1.hasFn(OpenMaya.MFn.kMesh):
return False
obj2 = getObjFromName(nodeName2)
if not obj2.hasFn(OpenMaya.MFn.kMesh):
return False
polyIt1 = OpenMaya.MItMeshPolygon( obj1 )
polyIt2 = OpenMaya.MItMeshPolygon( obj2 )
if polyIt1.count() != polyIt2.count():
return False
if polyIt1.polygonVertexCount() != polyIt2.polygonVertexCount():
return False
vertices1 = OpenMaya.MIntArray()
vertices2 = OpenMaya.MIntArray()
pointArray1 = OpenMaya.MPointArray()
pointArray2 = OpenMaya.MPointArray()
while polyIt1.isDone()==False and polyIt2.isDone()==False :
# compare vertex indices
polyIt1.getVertices(vertices1)
polyIt2.getVertices(vertices2)
if compareArray(vertices1, vertices2) != 0:
return False
# compare vertex positions
polyIt1.getPoints(pointArray1)
polyIt2.getPoints(pointArray2)
if not comparePointArray( pointArray1, pointArray2 ):
return False
polyIt1.next()
polyIt2.next()
if polyIt1.isDone() and polyIt2.isDone() :
return True
return False
# return True if the two Nurbs Surfaces are identical
def compareNurbsSurface(nodeName1, nodeName2):
# basic error checking
obj1 = getObjFromName(nodeName1)
if not obj1.hasFn(OpenMaya.MFn.kNurbsSurface):
return False
obj2 = getObjFromName(nodeName2)
if not obj2.hasFn(OpenMaya.MFn.kNurbsSurface):
return False
fn1 = OpenMaya.MFnNurbsSurface(obj1)
fn2 = OpenMaya.MFnNurbsSurface(obj2)
# degree
if fn1.degreeU() != fn2.degreeU():
return False
if fn1.degreeV() != fn2.degreeV():
return False
# span
if fn1.numSpansInU() != fn2.numSpansInU():
return False
if fn1.numSpansInV() != fn2.numSpansInV():
return False
# form
if fn1.formInU() != fn2.formInU():
return False
if fn1.formInV() != fn2.formInV():
return False
# control points
if fn1.numCVsInU() != fn2.numCVsInU():
return False
if fn1.numCVsInV() != fn2.numCVsInV():
return False
cv1 = OpenMaya.MPointArray()
fn1.getCVs(cv1)
cv2 = OpenMaya.MPointArray()
fn2.getCVs(cv2)
if not comparePointArray(cv1, cv2):
return False
# knots
if fn1.numKnotsInU() != fn2.numKnotsInU():
return False
if fn1.numKnotsInV() != fn2.numKnotsInV():
return False
knotsU1 = OpenMaya.MDoubleArray()
fn1.getKnotsInU(knotsU1)
knotsV1 = OpenMaya.MDoubleArray()
fn1.getKnotsInV(knotsV1)
knotsU2 = OpenMaya.MDoubleArray()
fn2.getKnotsInU(knotsU2)
knotsV2 = OpenMaya.MDoubleArray()
fn2.getKnotsInV(knotsV2)
if compareArray( knotsU1, knotsU2 ) != 0:
return False
if compareArray( knotsV1, knotsV2 ) != 0:
return False
# trim curves
if fn1.isTrimmedSurface() != fn2.isTrimmedSurface():
return False
# may need to add more trim checks
return True
# return True if the two locators are idential
def compareLocator(nodeName1, nodeName2):
# basic error checking
obj1 = getObjFromName(nodeName1)
if not obj1.hasFn(OpenMaya.MFn.kLocator):
return False
obj2 = getObjFromName(nodeName2)
if not obj2.hasFn(OpenMaya.MFn.kLocator):
return False
if not floatDiff(MayaCmds.getAttr(nodeName1+'.localPositionX'),
MayaCmds.getAttr(nodeName2+'.localPositionX'), 4):
return False
if not floatDiff(MayaCmds.getAttr(nodeName1+'.localPositionY'),
MayaCmds.getAttr(nodeName2+'.localPositionY'), 4):
return False
if not floatDiff(MayaCmds.getAttr(nodeName1+'.localPositionZ'),
MayaCmds.getAttr(nodeName2+'.localPositionZ'), 4):
return False
if not floatDiff(MayaCmds.getAttr(nodeName1+'.localScaleX'),
MayaCmds.getAttr(nodeName2+'.localScaleX'), 4):
return False
if not floatDiff(MayaCmds.getAttr(nodeName1+'.localScaleY'),
MayaCmds.getAttr(nodeName2+'.localScaleY'), 4):
return False
if not floatDiff(MayaCmds.getAttr(nodeName1+'.localScaleZ'),
MayaCmds.getAttr(nodeName2+'.localScaleZ'), 4):
return False
return True
# return True if the two cameras are identical
def compareCamera( nodeName1, nodeName2 ):
# basic error checking
obj1 = getObjFromName(nodeName1)
if not obj1.hasFn(OpenMaya.MFn.kCamera):
return False
obj2 = getObjFromName(nodeName2)
if not obj2.hasFn(OpenMaya.MFn.kCamera):
return False
fn1 = OpenMaya.MFnCamera( obj1 )
fn2 = OpenMaya.MFnCamera( obj2 )
if fn1.filmFit() != fn2.filmFit():
print "differ in filmFit"
return False
if not floatDiff(fn1.filmFitOffset(), fn2.filmFitOffset(), 4):
print "differ in filmFitOffset"
return False
if fn1.isOrtho() != fn2.isOrtho():
print "differ in isOrtho"
return False
if not floatDiff(fn1.orthoWidth(), fn2.orthoWidth(), 4):
print "differ in orthoWidth"
return False
if not floatDiff(fn1.focalLength(), fn2.focalLength(), 4):
print "differ in focalLength"
return False
if not floatDiff(fn1.lensSqueezeRatio(), fn2.lensSqueezeRatio(), 4):
print "differ in lensSqueezeRatio"
return False
if not floatDiff(fn1.cameraScale(), fn2.cameraScale(), 4):
print "differ in cameraScale"
return False
if not floatDiff(fn1.horizontalFilmAperture(),
fn2.horizontalFilmAperture(), 4):
print "differ in horizontalFilmAperture"
return False
if not floatDiff(fn1.verticalFilmAperture(), fn2.verticalFilmAperture(), 4):
print "differ in verticalFilmAperture"
return False
if not floatDiff(fn1.horizontalFilmOffset(), fn2.horizontalFilmOffset(), 4):
print "differ in horizontalFilmOffset"
return False
if not floatDiff(fn1.verticalFilmOffset(), fn2.verticalFilmOffset(), 4):
print "differ in verticalFilmOffset"
return False
if not floatDiff(fn1.overscan(), fn2.overscan(), 4):
print "differ in overscan"
return False
if not floatDiff(fn1.nearClippingPlane(), fn2.nearClippingPlane(), 4):
print "differ in nearClippingPlane"
return False
if not floatDiff(fn1.farClippingPlane(), fn2.farClippingPlane(), 4):
print "differ in farClippingPlane"
return False
if not floatDiff(fn1.preScale(), fn2.preScale(), 4):
print "differ in preScale"
return False
if not floatDiff(fn1.postScale(), fn2.postScale(), 4):
print "differ in postScale"
return False
if not floatDiff(fn1.filmTranslateH(), fn2.filmTranslateH(), 4):
print "differ in filmTranslateH"
return False
if not floatDiff(fn1.filmTranslateV(), fn2.filmTranslateV(), 4):
print "differ in filmTranslateV"
return False
if not floatDiff(fn1.horizontalRollPivot(), fn2.horizontalRollPivot(), 4):
print "differ in horizontalRollPivot"
return False
if not floatDiff(fn1.verticalRollPivot(), fn2.verticalRollPivot(), 4):
print "differ in verticalRollPivot"
return False
if fn1.filmRollOrder() != fn2.filmRollOrder():
print "differ in filmRollOrder"
return False
if not floatDiff(fn1.filmRollValue(), fn2.filmRollValue(), 4):
print "differ in filmRollValue"
return False
if not floatDiff(fn1.fStop(), fn2.fStop(), 4):
print "differ in fStop"
return False
if not floatDiff(fn1.focusDistance(), fn2.focusDistance(), 4,):
print "differ in focusDistance"
return False
if not floatDiff(fn1.shutterAngle(), fn2.shutterAngle(), 4):
print "differ in shutterAngle"
return False
if fn1.usePivotAsLocalSpace() != fn2.usePivotAsLocalSpace():
print "differ in usePivotAsLocalSpace"
return False
if fn1.tumblePivot() != fn2.tumblePivot():
print "differ in tumblePivot"
return False
return True
# return True if the two Nurbs curves are identical
def compareNurbsCurve(nodeName1, nodeName2):
# basic error checking
obj1 = getObjFromName(nodeName1)
if not obj1.hasFn(OpenMaya.MFn.kNurbsCurve):
print nodeName1, "not a curve."
return False
obj2 = getObjFromName(nodeName2)
if not obj2.hasFn(OpenMaya.MFn.kNurbsCurve):
print nodeName2, "not a curve."
return False
fn1 = OpenMaya.MFnNurbsCurve(obj1)
fn2 = OpenMaya.MFnNurbsCurve(obj2)
if fn1.degree() != fn2.degree():
print nodeName1, nodeName2, "degrees differ."
return False
if fn1.numCVs() != fn2.numCVs():
print nodeName1, nodeName2, "numCVs differ."
return False
if fn1.numSpans() != fn2.numSpans():
print nodeName1, nodeName2, "spans differ."
return False
if fn1.numKnots() != fn2.numKnots():
print nodeName1, nodeName2, "numKnots differ."
return False
if fn1.form() != fn2.form():
print nodeName1, nodeName2, "form differ."
return False
cv1 = OpenMaya.MPointArray()
fn1.getCVs(cv1)
cv2 = OpenMaya.MPointArray()
fn2.getCVs(cv2)
if not comparePointArray(cv1, cv2):
print nodeName1, nodeName2, "points differ."
return False
# we do not need to compare knots, since they aren't stored in Alembic
# and are currently recreated as uniformly distributed between 0 and 1
return True
|
StarcoderdataPython
|
1761736
|
import sys
from kubernetes import watch
# This function returns the kubernetes secret object present in a given namespace
def get_kubernetes_secret(api_instance, namespace, secret_name):
try:
return api_instance.read_namespaced_secret(secret_name, namespace)
except Exception as e:
sys.exit("Error occurred when retrieving secret '{}': ".format(secret_name) + str(e))
# Function that watches events corresponding to kubernetes secrets and passes the events to a callback function
def watch_kubernetes_secret(api_instance, namespace, secret_name, timeout, callback=None):
if not callback:
return
field_selector = "metadata.name={}".format(secret_name) if secret_name else ""
try:
w = watch.Watch()
for event in w.stream(api_instance.list_namespaced_secret, namespace, field_selector=field_selector, timeout_seconds=timeout):
if callback(event):
return
except Exception as e:
sys.exit("Error occurred when watching kubernetes secret events: " + str(e))
sys.exit("The watch on the kubernetes secret events has timed out. Please see the pod logs for more info.")
|
StarcoderdataPython
|
3260369
|
<reponame>rafaelbarretomg/Uninter
# Exercicio 01 Tuplas da Aula 03
ano = int(input('Digite o ano atual: '))
nasc = int(input('Digite o seu ano de nascimento: '))
idade = ano - nasc
if (idade >= 18):
# Maneira Classica
print('A sua idade é de %i e voce já pode tirar carteira de motorista.' % idade)
# Maneira Moderna
print('A sua idade é de {} e voce já pode tirar carteira de motorista.' .format(idade))
else:
# Maneira Classica
print('A sua idade é de %i e voce é menor de idade.' % idade)
# Maneira Moderna
print('A sua idade é de {} e voce é menor de idade.'.format(idade))
|
StarcoderdataPython
|
4840495
|
<reponame>chesfire/ceafa-dms-prod
from django.test import TestCase
from django.test import Client
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.http.response import (
HttpResponseRedirect,
)
from ceafadms.core.models import Tag
User = get_user_model()
class TestTagsViewsAuthReq(TestCase):
def setUp(self):
self.user = _create_user(
username="john",
password="<PASSWORD>"
)
def test_tag_view(self):
"""
If user is not authenticated reponse must
be HttpReponseRedirect (302)
"""
tag = Tag.objects.create(
user=self.user, name="test"
)
ret = self.client.get(
reverse('admin:tag-update', args=(tag.pk,)),
)
self.assertEqual(
ret.status_code,
HttpResponseRedirect.status_code
)
def test_tags_view(self):
"""
Not accessible to users which are not authenticated
"""
ret = self.client.post(
reverse('admin:tags'),
{
'action': 'delete_selected',
'_selected_action': [1, 2],
}
)
self.assertEqual(
ret.status_code,
HttpResponseRedirect.status_code
)
# same story for get method
ret = self.client.get(
reverse('admin:tags'),
)
self.assertEqual(
ret.status_code,
HttpResponseRedirect.status_code
)
class TestTagViews(TestCase):
def setUp(self):
self.user = _create_user(
username="john",
password="<PASSWORD>"
)
self.client = Client()
self.client.login(
username='john',
password='<PASSWORD>'
)
def test_tag_change_view(self):
tag = Tag.objects.create(
user=self.user, name="test"
)
ret = self.client.get(
reverse('admin:tag-update', args=(tag.pk,)),
)
self.assertEqual(ret.status_code, 200)
# try to see a non existing log entry
# must return 404 status code
ret = self.client.get(
reverse('admin:tag-update', args=(tag.pk + 1,)),
)
self.assertEqual(ret.status_code, 404)
def test_tags_view(self):
ret = self.client.get(
reverse('admin:tags')
)
self.assertEqual(
ret.status_code, 200
)
def test_delete_tags(self):
tag1 = Tag.objects.create(
user=self.user, name="test1"
)
tag2 = Tag.objects.create(
user=self.user, name="test2"
)
Tag.objects.create(
user=self.user, name="test3"
)
ret = self.client.post(
reverse('admin:tags'),
{
'action': 'delete_selected',
'_selected_action': [tag1.id, tag2.id],
}
)
self.assertEqual(
ret.status_code, 302
)
# two tags entries were deleted
# only one should remain
self.assertEqual(
Tag.objects.filter(
user=self.user
).count(),
1
)
def test_tags_view_user_adds_duplicate_tag(self):
"""
User will try to
add a duplicate. In case of duplicate - a user friendly
error will be displayed
"""
Tag.objects.create(
user=self.user, name="tag-10"
)
# do it again
ret = self.client.post(
reverse('admin:tag-add'),
{
'name': 'tag-10',
'pinned': False,
'fg_color': '#000000',
'bg_color': '#FF0000'
}
)
# no dramatic exception here, like DB duplicate key
# violations
self.assertEqual(
ret.status_code,
200
)
# no new tags were added
self.assertEqual(
Tag.objects.count(),
1
)
def _create_user(username, password):
user = User.objects.create_user(
username=username,
is_active=True,
)
user.set_password(password)
user.save()
return user
|
StarcoderdataPython
|
128317
|
#-----------------------------------------------------------------------------
# press-stitch.py
# Merges the three Press Switch games together
# pylint: disable=bad-indentation
#-----------------------------------------------------------------------------
import getopt
import hashlib
import os.path
import pathlib
import shutil
import sys
import csv
import copy
import zipfile
import press_stitch_archive
import rpp
import backgrounds_map
# Mappings for 0.3 -> 0.5
import character_map_35_chris
import character_map_35_ciel
import character_map_35_eliza
import character_map_35_karyn
import character_map_35_main
import character_map_35_martha
import character_map_35_michelle
import character_map_35_mother
import character_map_35_nick
import character_map_35_vanessa
# Mappings for 0.4 -> 0.5
import character_map_45_alma
import character_map_45_amber
import character_map_45_anna
import character_map_45_april
import character_map_45_candice
import character_map_45_chris
import character_map_45_ciel
import character_map_45_cindy
import character_map_45_donald
import character_map_45_eliza
import character_map_45_erin
import character_map_45_ermach
import character_map_45_hillary
import character_map_45_jenna
import character_map_45_jennifer
import character_map_45_jillian
import character_map_45_karyn
import character_map_45_kayla
import character_map_45_main
import character_map_45_martha
import character_map_45_melina
import character_map_45_michelle
import character_map_45_mika
import character_map_45_mother
import character_map_45_nelson
import character_map_45_nick
import character_map_45_nurse
import character_map_45_sean
import character_map_45_vanessa
import character_map_45_waitress
# Mappings for 0.5 -> 0.6
import character_map_56_eliza
import character_map_56_main
filename_03 = "Press-SwitchV0.3b-all";
filename_04 = "Press-SwitchV0.4a-pc";
filename_05 = "Press-SwitchV0.5c-pc";
filename_06 = "Press-SwitchV0.6";
# The key is the label used in an RPY "show" command to show a character.
# The value is the character directory used to find the images.
characterLabelMap = {
"alma": "alma",
"amber": "amber",
"amberd": "amber",
"anna": "anna",
"april": "april",
"candice": "candice",
"candiced": "candice",
"chris": "chris",
"chrisd": "chris",
"chrisghost": "chris",
"ciel": "ciel",
"cindy": "cindy",
"donald": "donald",
"donaldd": "donald",
"donaldflash": "donald",
"eliza": "eliza",
"elizad": "eliza",
"elizaflash": "eliza",
"elizaghost": "eliza",
"erin": "erin",
"erind": "erin",
"eringhost": "erin",
"hillary": "hillary",
"hillaryd": "hillary",
"jenna": "jenna",
"jennifer": "jennifer",
"jenniferd": "jennifer",
"jillian": "jillian",
"jilliand": "jillian",
"karyn": "karyn",
"karynd": "karyn",
"karynflash": "karyn",
"karynghost": "karyn",
"kayla": "kayla",
"kaylad": "kayla",
"main": "main",
"maind": "main",
"mainflash": "main",
"mainghost": "main",
"martha": "martha",
"marthad": "martha",
"marthaghost": "martha",
"melina": "melina",
"michelle": "michelle",
"michelled": "michelle",
"michelleghost": "michelle",
"mika": "mika",
"mikad": "mika",
"mother": "mother",
"nelson": "nelson",
"nick": "nick",
"nurse": "nurse",
"sean": "sean",
"vanessa": "vanessa",
"vanessad": "vanessa",
"waitress": "waitress"
};
# Map showing whether to remap the character based on RenPy variables
characterDoRemap = {
"alma": False,
"amber": False,
"amberd": True,
"anna": False,
"april": False,
"candice": False,
"candiced": True,
"chris": False,
"chrisd": True,
"chrisghost": False,
"ciel": False,
"cindy": False,
"donald": False,
"donaldd": True,
"donaldflash": False,
"eliza": False,
"elizad": True,
"elizaflash": False,
"elizaghost": False,
"erin": False,
"erind": True,
"eringhost": False,
"hillary": False,
"hillaryd": True,
"jenna": False,
"jennifer": False,
"jenniferd": True,
"jillian": False,
"jilliand": True,
"karyn": False,
"karynd": True,
"karynflash": False,
"karynghost": False,
"kayla": False,
"kaylad": True,
"main": False,
"maind": True,
"mainflash": False,
"mainghost": False,
"martha": False,
"marthad": True,
"marthaghost": False,
"melina": False,
"michelle": False,
"michelled": True,
"michelleghost": False,
"mika": False,
"mikad": True,
"mother": False,
"nelson": False,
"nick": False,
"nurse": False,
"sean": False,
"vanessa": False,
"vanessad": True,
"waitress": False,
};
characterImageMap35 = {
"chris": character_map_35_chris .characterMapChris,
"ciel": character_map_35_ciel .characterMapCiel,
"eliza": character_map_35_eliza .characterMapEliza,
"karyn": character_map_35_karyn .characterMapKaryn,
"main": character_map_35_main .characterMapMain,
"martha": character_map_35_martha .characterMapMartha,
"michelle": character_map_35_michelle.characterMapMichelle,
"mother": character_map_35_mother .characterMapMother,
"nick": character_map_35_nick .characterMapNick,
"vanessa": character_map_35_vanessa .characterMapVanessa,
};
characterImageMap45 = {
"alma": character_map_45_alma .characterMapAlma,
"amber": character_map_45_amber .characterMapAmber,
"anna": character_map_45_anna .characterMapAnna,
"april": character_map_45_april .characterMapApril,
"candice": character_map_45_candice .characterMapCandice,
"chris": character_map_45_chris .characterMapChris,
"ciel": character_map_45_ciel .characterMapCiel,
"cindy": character_map_45_cindy .characterMapCindy,
"donald": character_map_45_donald .characterMapDonald,
"eliza": character_map_45_eliza .characterMapEliza,
"erin": character_map_45_erin .characterMapErin,
"ermach": character_map_45_ermach .characterMapErmach,
"hillary": character_map_45_hillary .characterMapHillary,
"jenna": character_map_45_jenna .characterMapJenna,
"jennifer": character_map_45_jennifer.characterMapJennifer,
"jillian": character_map_45_jillian .characterMapJillian,
"karyn": character_map_45_karyn .characterMapKaryn,
"kayla": character_map_45_kayla .characterMapKayla,
"main": character_map_45_main .characterMapMain,
"martha": character_map_45_martha .characterMapMartha,
"melina": character_map_45_melina .characterMapMelina,
"michelle": character_map_45_michelle.characterMapMichelle,
"mika": character_map_45_mika .characterMapMika,
"mother": character_map_45_mother .characterMapMother,
"nelson": character_map_45_nelson .characterMapNelson,
"nick": character_map_45_nick .characterMapNick,
"nurse": character_map_45_nurse .characterMapNurse,
"sean": character_map_45_sean .characterMapSean,
"vanessa": character_map_45_vanessa .characterMapVanessa,
"waitress": character_map_45_waitress.characterMapWaitress
};
characterImageMap56 = {
"eliza": character_map_56_eliza .characterMapEliza,
"main": character_map_56_main .characterMapMain,
};
# Initial state of RenPy variables
pyVariables = {
"Al.display": "alma",
"Am.display": "amber",
"Can.display": "candice",
"ch.display": "chris",
"Do.display": "donald",
"e.display": "eliza",
"er.display": "erin",
"hi.display": "hillary",
"je.display": "jennifer",
"ji.display": "jillian",
"k.display": "karyn",
"ka.display": "kayla",
"ma.display": "martha",
"m.display": "mika",
"M.display": "main",
"mic.display": "michelle",
"Nel.display": "nelson",
"nur2.display": "nurse",
"Te.display": "teacher",
"v.display": "vanessa"
};
# Association of person name to RenPy display variable
personDispVars = {
"alma": "Al.display",
"amber": "Am.display",
"candice": "Can.display",
"chris": "ch.display",
"donald": "Do.display",
"eliza": "e.display",
"erin": "er.display",
"hillary": "hi.display",
"jennifer": "je.display",
"jillian": "ji.display",
"karyn": "k.display",
"kayla": "ka.display",
"martha": "ma.display",
"mika": "m.display",
"main": "M.display",
"michelle": "mic.display",
"nelson": "Nel.display",
"nurse": "nur2.display",
"teacher": "Te.display",
"vanessa": "v.display"
};
# List of active threads
threads = [];
# List of label call objects
labelCalls = [];
inlineErrors = False;
#-----------------------------------------------------------------------------
def printRed(s):
#type: (str) -> None
print("\033[1;31m" + s + "\033[0m");
#-----------------------------------------------------------------------------
def showError(txt):
#type: (str) -> None
printRed("Error: " + txt);
#-----------------------------------------------------------------------------
def flagError(rpFile, lineNum, txt):
#type: (rpp.RenPyFile, int, str) -> str
showError("Line " + str(lineNum) + ": " + txt);
if inlineErrors:
return rpFile.lines[lineNum].strip('\n') + " # ERROR: " + txt + "\n";
sys.exit(1);
#-----------------------------------------------------------------------------
def md5(fname):
#type: (str) -> str
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
#-----------------------------------------------------------------------------
def verifySingleFile(filename, desiredHash):
#type: (str, str) -> bool
print("Verifying " + filename + "...");
if (not(os.path.exists(filename))):
showError("File does not exist!");
return False;
actualHash = md5(filename);
if (actualHash != desiredHash):
showError("Checksum is not correct, please download the file again");
print("Desired MD5: " + desiredHash);
print("Actual MD5 : " + actualHash);
return False;
print("Succeeded");
return True;
#-----------------------------------------------------------------------------
def unzipFile(filename):
#type: (str) -> None
print("Unzipping file " + filename + "...");
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(".")
#-----------------------------------------------------------------------------
def removeDir(filename):
#type: (str) -> None
if os.path.isdir(pathlib.Path(filename)):
print("Removing directory " + filename + "...");
shutil.rmtree(filename);
#-----------------------------------------------------------------------------
def checkFile(dirname, checksum):
#type: (str, str) -> bool
if os.path.isdir(pathlib.Path(dirname)):
print("Directory " + dirname + " exists, ZIP extract skipped");
return True;
filename = dirname + ".zip";
if not(verifySingleFile(filename, checksum)):
return False;
unzipFile(filename);
return True;
#-----------------------------------------------------------------------------
def doMakeDir(path):
#type: (str) -> None
if (os.path.isdir(pathlib.Path(path))):
print("Directory " + path + " already exists, skipping creation");
else:
print("Creating directory " + path);
os.mkdir(path);
#-----------------------------------------------------------------------------
def doCopyFile(srcPath, dstPath, filename):
#type: (str, str, str) -> None
srcFile = os.path.join(srcPath, filename);
print("Copying file " + srcFile + " into " + dstPath);
shutil.copy(srcFile, dstPath);
#-----------------------------------------------------------------------------
def isNumberField(s):
#type: (str) -> bool
for c in s:
if not(c in "0123456789"):
return False;
return True;
#-----------------------------------------------------------------------------
def expandNumberField(s):
#type: (str) -> str
if not(isNumberField(s)):
return s;
return s.zfill(3);
#-----------------------------------------------------------------------------
def getIndentOf(line):
#type: (str) -> int
indent = 0;
lineLen = len(line);
while((indent < lineLen) and (line[indent] == ' ')):
indent = indent + 1;
return indent;
#-----------------------------------------------------------------------------
def processCommand(rpFile, thread, lineNum, line):
#type: (rpp.RenPyFile, rpp.RenPyThread, int, str) -> None
fields = list(csv.reader([line], delimiter=' '))[0];
if (len(fields) < 2):
return;
# Try for a UI timer jump
if (fields[0].startswith("ui.timer(") and fields[1].startswith("ui.jumps(")):
jumpLabel = fields[1].split('"')[1];
addLabelCall(rpFile, jumpLabel, thread);
return;
# Try for a variable assignment
if (len(fields) < 3):
return;
pyVar = fields[0].strip();
pyVal = fields[2].strip().strip('"').strip('\'');
#print(str(lineNum) + ": Command " + str(fields));
if (fields[1] == "="):
thread.vars[pyVar] = pyVal;
#print("Variable '" + pyVar + "' becomes '" + pyVal + "'");
elif (fields[1] == "+="):
if not(pyVar in thread.vars):
flagError(rpFile, lineNum, "Variable '" + pyVar + "' not found in thread");
thread.vars[pyVar] = str(int(thread.vars[pyVar]) + int(pyVal));
elif (fields[1] == "-="):
if not(pyVar in thread.vars):
flagError(rpFile, lineNum, "Variable '" + pyVar + "' not found in thread");
thread.vars[pyVar] = str(int(thread.vars[pyVar]) - int(pyVal));
else:
flagError(rpFile, lineNum, "Unsupported operator '" + fields[1] + "', line is: " + line);
#-----------------------------------------------------------------------------
def calculateCondition(thread, lineNum, fields):
#type: (rpp.RenPyThread, int, list[str]) -> bool
offset = 1;
while(offset < len(fields)):
varname = fields[offset];
condition = fields[offset + 1];
value = fields[offset + 2];
if not(varname in thread.vars):
return False;
if (condition == "=="):
cont = False;
if (value[-1] == ","):
cont = True;
value = value.strip(',');
if (thread.vars[varname] == value.strip('"').strip('\'')):
return True;
if (cont):
offset = offset + 1;
value = fields[offset + 2];
if (thread.vars[varname] == value.strip('"').strip('\'')):
return True;
elif (condition == ">="):
if (int(thread.vars[varname]) >= int(value.strip('"').strip('\''))):
return True;
else:
showError("Condition " + condition + " not supported");
sys.exit(1);
offset = offset + 3;
if ((offset < len(fields) and not(fields[offset] == "or"))):
showError(str(lineNum) + ": Boolean operator " + fields[offset] + " not supported, fields are " + str(fields));
sys.exit(1);
offset = offset + 1;
return False;
#-----------------------------------------------------------------------------
def processIfStep(rpFile, thread):
#type: (rpp.RenPyFile, rpp.RenPyThread) -> None
obj = thread.stack[-1];
line = rpFile.lines[obj.lineNum].split(':')[0];
fields = line.split();
# Are we still in the block?
if (not(rpFile.indentIsGood(obj.lineNum, obj.indent))):
thread.stack.pop(); # Kill the IF
return;
# Call the "if" hook to see if the file has special processing
rpFile.hookIf(thread);
if((fields[0] == "if") or (fields[0] == "elif")):
condition = calculateCondition(thread, obj.lineNum, fields);
if (condition and not(obj.hasExecuted)):
obj.hasExecuted = True;
thread.stack.append(rpp.RenPyBlock(obj.lineNum + 1, obj.indent + 4));
obj.lineNum = rpFile.blockEndLine(obj.lineNum + 1, obj.indent + 4);
elif (fields[0] == "else"):
if not(obj.hasExecuted):
thread.stack.append(rpp.RenPyBlock(obj.lineNum + 1, obj.indent + 4));
obj.lineNum = rpFile.blockEndLine(obj.lineNum + 1, obj.indent + 4);
return;
thread.stack.pop();
else:
# Must have finished the block
thread.stack.pop();
#-----------------------------------------------------------------------------
def processBlockStep(rpFile, thread):
#type: (rpp.RenPyFile, rpp.RenPyThread) -> None
blk = thread.stack[-1];
i = blk.lineNum;
indent = blk.indent;
if (not(rpFile.indentIsGood(i, indent))):
thread.stack.pop();
return;
strippedLine = rpFile.lines[i].strip();
if (strippedLine.startswith("menu:")):
# Shift the block processor to the end of the menu, so that when the
# thread gets cloned it resumes from the right place
blk.lineNum = rpFile.blockEndLine(i + 1, indent + 4);
processMenuStep(rpFile, thread, i);
return;
elif (strippedLine.startswith("return")):
thread.stack = []; # Kill the thread
return;
elif (strippedLine.startswith("if ")):
thread.stack.append(rpp.RenPyIf(i, indent)); # Add an IF processor to the stack
i = rpFile.blockEndLine(i + 1, indent + 4);
elif (strippedLine.startswith("elif ") or strippedLine.startswith("else:")):
i = rpFile.blockEndLine(i + 1, indent + 4); # Flush it
elif (strippedLine.startswith("label goopy")):
# We hit the goopy path, no need to process this
thread.stack = []; # Kill the thread
return;
elif (strippedLine.startswith("hide")):
person = strippedLine.split()[1];
if rpFile.trackVis and not(person == "bg"):
thread.vars["_visible_" + person] = "0";
i += 1;
elif (strippedLine.startswith("jump")):
label = strippedLine.split()[1];
if not(label in rpFile.labelList):
print("External jump: " + label);
thread.stack = []; # Kill this thread, it jumped out of the file
return;
jumpDest = rpFile.labelList[label];
if (not((label == "kpathendroundup2") or label.startswith("endingclone")) or (jumpDest > blk.lineNum)):
addLabelCall(rpFile, label, thread);
thread.stack = []; # Kill this thread, it jumped
return;
else:
i = i + 1;
elif (strippedLine.startswith("show") or strippedLine.startswith("scene")):
if rpFile.trackVis and strippedLine.startswith("scene"):
for varName in thread.vars:
if varName.startswith("_visible_"):
thread.vars[varName] = "0";
if not(rpFile.lineModifiedFlags[i]):
rpFile.lines[i] = processShow(rpFile, thread, i);
rpFile.lineModifiedFlags[i] = True;
i = i + 1;
elif (strippedLine.startswith("$")):
processCommand(rpFile, thread, i, strippedLine.strip('$').strip());
i = i + 1;
else:
i = i + 1;
blk.lineNum = i;
#-----------------------------------------------------------------------------
# On entry, lineNum points to the menu: line
def processMenuStep(rpFile, thread, lineNum):
#type: (rpp.RenPyFile, rpp.RenPyThread, int) -> None
global threads;
indent = getIndentOf(rpFile.lines[lineNum]) + 4;
lineNum = lineNum + 1;
# Iterate the whole menu and fork threads from the current one for each
# menu option
line = rpFile.lines[lineNum];
while((lineNum < rpFile.numLines) and rpFile.indentIsGood(lineNum, indent)):
if (getIndentOf(line) == indent):
menuItem = line.strip('\n').strip('\r').strip();
if not((menuItem[0] == '#') or menuItem.startswith("\"{s}")):
endQuote = menuItem.find("\"", 1);
condition = ":";
if (endQuote > 0):
condition = menuItem[endQuote + 1:].strip();
res = True;
if (not(condition == ":")):
# Menu has a condition on it
condition = condition.strip(':');
res = calculateCondition(thread, lineNum, condition.split());
if (res):
newThread = copy.deepcopy(thread);
newThread.stack.append(rpp.RenPyBlock(lineNum + 1, indent + 4));
threads.append(newThread);
lineNum = rpFile.blockEndLine(lineNum + 1, indent + 4);
else:
lineNum = lineNum + 1;
else:
lineNum = lineNum + 1;
line = rpFile.lines[lineNum];
# Kill the current thread. Because it's been used as the parent thread for
# all the menu options, it's not needed any more as each menu option will
# continue from here.
thread.stack = [];
#-----------------------------------------------------------------------------
def processShow(rpFile, thread, lineNum):
#type: (rpp.RenPyFile, rpp.RenPyThread, int) -> str
line = rpFile.lines[lineNum];
fields = line.strip().strip(":").split();
# At this point, 'fields' looks like this:
# ['show', 'maind', '17', 'with', 'dissolve']
# Check for backgrounds
if fields[1] == "bg":
if len(fields) < 3:
return line;
if not(fields[2] in rpFile.backMap):
#return flagError(rpFile, lineNum, "Background " + fields[2] + " has no mapping table entry");
return line;
newLine = "";
indent = 0;
while line[indent] == " ":
newLine += " ";
indent = indent + 1;
newbg = rpFile.backMap[fields[2]];
if (newbg == ""):
return flagError(rpFile, lineNum, "Background '" + fields[2] + "' exists but has no mapping");
newLine += fields[0] + " bg " + newbg;
i = 3;
while i < len(fields):
newLine += " " + fields[i];
i = i + 1;
if (line.strip()[-1] == ":"):
newLine += ":";
newLine += "\n";
return newLine;
# Check for 0.3 style "show cg" statements
if (fields[0] == "show") and (fields[1] == "cg"):
return rpFile.processCG(line);
# Try for a character
# Character label is fields[1], get character name
if not(fields[0] == "show"):
return line;
if not(fields[1] in characterLabelMap):
return line;
if not(lineNum in rpFile.showLines):
rpFile.showLines.append(lineNum);
if (rpFile.trackVis):
varName = "_visible_" + fields[1];
if not(varName in thread.vars) or (thread.vars[varName] == "0"):
# Person has become visible
if (fields[1] in rpFile.charFlip) and not(lineNum in rpFile.visLines):
rpFile.visLines.append(lineNum);
thread.vars[varName] = "1";
# If it's got no parameters, like "show michelled:", then just return it
# as there's no mapping to do
if (len(fields) < 3):
return line;
charName = characterLabelMap[fields[1]];
swappedCharName = charName;
if characterDoRemap[fields[1]]:
# Character is not a ghost, do the remap
if (charName in personDispVars):
swappedCharName = thread.vars[personDispVars[charName]];
swappedFields = swappedCharName.split();
swappedCharName = swappedFields[0];
#i = 1;
#while i < len(swappedFields):
# fields.append(swappedFields[i]);
# i = i + 1;
filenameMode = True;
baseMode = True;
exFile = swappedCharName + "_ex";
modifiers = "";
base = "";
i = 2;
while i < len(fields):
if (fields[i] in ["as", "at", "behind", "with", "zorder"]):
filenameMode = False;
if (filenameMode):
field = expandNumberField(fields[i]);
if (field == "full"):
exFile = exFile + "_full";
elif isNumberField(field):
baseMode = False;
if baseMode:
if not(field == "full") and not((charName == "hillary") and (fields[i] == "school")):
base = base + " " + fields[i];
else:
exFile = exFile + "_" + field;
else:
modifiers = modifiers + " " + fields[i];
i = i + 1;
if (exFile == (swappedCharName + "_ex")):
# It's something like "show candice with dissolve", with no fields so nothing to do
return line;
mappedFile = "";
hasMapped = False;
if (swappedCharName in rpFile.charMap):
if exFile in rpFile.charMap[swappedCharName]:
mappedFile = rpFile.charMap[swappedCharName][exFile];
hasMapped = True;
elif exFile+"_001" in rpFile.charMap[swappedCharName]:
mappedFile = rpFile.charMap[swappedCharName][exFile+"_001"];
hasMapped = True;
elif exFile+"_002" in rpFile.charMap[swappedCharName]:
mappedFile = rpFile.charMap[swappedCharName][exFile+"_002"];
hasMapped = True;
elif exFile+"_003" in rpFile.charMap[swappedCharName]:
mappedFile = rpFile.charMap[swappedCharName][exFile+"_003"];
hasMapped = True;
else:
# We're not doing a V3 or V4 mapping for this character, fake that we've done one
mappedFile = exFile;
hasMapped = True;
if not(hasMapped):
# The .rpy file is referencing a graphic that doesn't seem to exist in the 0.4 graphics directory.
print("DBG: Vars are: " + str(thread.vars));
return(flagError(rpFile, lineNum, "Mapping failed, source file '" + exFile + "' not found. Line being processed is: " + str(fields)));
if mappedFile == "":
return(flagError(rpFile, lineNum, "Mapping failed, source file '" + exFile + "' exists but has no mapping. Line being processed is: " + str(fields)));
# Map V6 if present
if (swappedCharName in rpFile.v6Map):
hasMapped = False;
v6File = "";
if mappedFile in rpFile.v6Map[swappedCharName]:
v6File = rpFile.v6Map[swappedCharName][mappedFile];
hasMapped = True;
elif mappedFile+"_001" in rpFile.v6Map[swappedCharName]:
v6File = rpFile.v6Map[swappedCharName][mappedFile+"_001"];
hasMapped = True;
elif mappedFile+"_002" in rpFile.v6Map[swappedCharName]:
v6File = rpFile.v6Map[swappedCharName][mappedFile+"_002"];
hasMapped = True;
elif mappedFile+"_003" in rpFile.v6Map[swappedCharName]:
v6File = rpFile.v6Map[swappedCharName][mappedFile+"_003"];
hasMapped = True;
if not(hasMapped):
return(flagError(rpFile, lineNum, "No V6 mapping for V5 file '" + mappedFile + "', source file '" + exFile + "', char name " + swappedCharName + ", original char " + charName));
#print("Mapped V5 " + mappedFile + " to V6 " + v6File);
mappedFile = v6File;
mappedFields = mappedFile.split("_");
if (len(mappedFields) < 2):
return(flagError(rpFile, lineNum, "Invalid mapping! Source is '" + exFile + "', map is '" + mappedFile + "'"));
if not(mappedFields[0] == swappedCharName):
return(flagError(rpFile, lineNum, "Mapped to a different character! Source is '" + exFile + "', map is '" + mappedFile + "'"));
if not(mappedFields[1] == "ex"):
return(flagError(rpFile, lineNum, "Mapping is not to an expression graphic! Source is '" + exFile + "', map is '" + mappedFile + "'"));
newLine = "";
indent = 0;
while line[indent] == " ":
newLine += " ";
indent = indent + 1;
newLine += "show " + fields[1] + base;
i = 2;
while i < len(mappedFields) - 1:
if isNumberField(mappedFields[i]):
newLine += " " + str(int(mappedFields[i]));
else:
newLine += " " + mappedFields[i];
i = i + 1;
newLine += modifiers;
if (line.strip()[-1] == ":"):
newLine += ":";
newLine += "\n";
return newLine;
#-----------------------------------------------------------------------------
def processNextThread(rpFile):
#type: (rpp.RenPyFile) -> None
global threads;
thread = threads.pop();
while len(thread.stack) > 0:
obj = thread.stack[-1];
if (obj.objType == "Block"):
processBlockStep(rpFile, thread);
elif (obj.objType == "If"):
processIfStep(rpFile, thread);
else:
print("Unhandled object type: " + obj.objType);
sys.exit(1);
#-----------------------------------------------------------------------------
def addLabelCall(rpFile, l, thread):
#type: (rpp.RenPyFile, str, rpp.RenPyThread) -> None
if not(rpFile.labelIsAcceptable(l)):
return;
labelCalls.append(rpp.RenPyLabelCall(l, thread.vars.copy()));
#-----------------------------------------------------------------------------
def processLabelCall(rpFile, l, v):
#type: (rpp.RenPyFile, str, dict[str, str]) -> None
global threads;
lineNum = rpFile.labelList[l] + 1;
line = rpFile.lines[lineNum];
indent = getIndentOf(line);
blk = rpp.RenPyBlock(lineNum, indent);
thread = rpp.RenPyThread(v, [blk]);
threads.append(thread);
#-----------------------------------------------------------------------------
def iterateLabelCalls(rpFile):
#type: (rpp.RenPyFile) -> None
global threads;
global labelCalls;
iterations = 1;
duplicates = 0;
numThreads = 0;
while ((len(labelCalls) > 0) or (len(threads) > 0)):
print("---------- Depth " + str(iterations) + " ----------");
print("Label calls: " + str(len(labelCalls)));
# Process label calls
while(len(labelCalls) > 0):
labelCall = labelCalls.pop();
if not(labelCall in labelCalls):
processLabelCall(rpFile, labelCall.label, labelCall.vars);
else:
print("Ignoring duplicate call");
duplicates += 1;
# Process threads
print("Paths: " + str(len(threads)));
while(len(threads) > 0):
processNextThread(rpFile);
numThreads += 1;
if (len(threads) % 10) == 0:
print("[Depth " + str(iterations) + "] Paths: " + str(duplicates) + " dupe, " + str(numThreads) + " total, " + str(len(threads)) + " left this depth");
iterations += 1;
#-----------------------------------------------------------------------------
# Main program
def main(argv):
global inlineErrors;
global pyVariables;
global threads;
doClean = False;
doEliza = True;
doCiel = True;
doGoopy = True;
doScan = True;
doV6 = False;
try:
opts, args = getopt.getopt(argv, "", ["clean","inlineerrors","nociel","noeliza","nogoopy","noscan","v6"])
except getopt.GetoptError:
showError('Usage is: press-stitch.py [--clean]');
sys.exit(1);
for opt, arg in opts:
if (opt == "--clean"):
doClean = True;
elif (opt == "--inlineerrors"):
inlineErrors = True;
elif (opt == "--nociel"):
doCiel = False;
elif (opt == "--noeliza"):
doEliza = False;
elif (opt == "--nogoopy"):
doGoopy = False;
elif (opt == "--noscan"):
doScan = False;
elif (opt == "--v6"):
doV6 = True;
doCiel = False; # Cielpath disabled for 0.6
if (doClean):
removeDir(filename_03);
removeDir(filename_04);
removeDir(filename_05);
removeDir("Extracted");
sys.exit(0);
# Normal run
have3 = False;
have4 = False;
if os.path.exists(filename_03 + ".zip"):
if not(checkFile(filename_03, "e01bfc54520e8251bc73c7ee128836e2")):
sys.exit(1);
have3 = True;
press_stitch_archive.unpackArchive(filename_03);
if os.path.exists(filename_03 + ".zip"):
if not(checkFile(filename_04, "ca7ee44f40f802009a6d49659c8a760d")):
sys.exit(1);
have4 = True;
press_stitch_archive.unpackArchive(filename_04);
if not(checkFile(filename_05, "6a4f9dac386e2fae1bce00e0157ee8b1")):
sys.exit(1);
press_stitch_archive.unpackArchive(filename_05);
extPath5 = os.path.join("Extracted", filename_05);
dstPath = os.path.join(filename_05, "game");
v6map = {};
if doV6:
v6map = characterImageMap56;
dstPath = os.path.join(filename_06, "game");
# Day-0.rpy
print("Patching Day-0.rpy...");
dayzero = rpp.RenPyFile();
dayzero.readFile(os.path.join(extPath5, "Story", "Day-0.rpy"));
dayzero.lines.insert(2848, (" " * 28) + "\"Maybe I was too quick to reject Eliza...\":\n");
dayzero.lines.insert(2849, (" " * 32) + "jump eliza\n");
if doCiel:
dayzero.lines[2851] = (" " * 20) + "\"Hide it until tomorrow.\":\n";
dayzero.lines[2863] = (" " * 20) + "\"Leave it on my desk and sleep.\":\n";
dayzero.lines[2864] = (" " * 24) + "jump leftit\n";
if doV6:
dayzero.v6map = v6map;
dayzero.findLabels();
dayzero.findShows();
addLabelCall(dayzero, "GameStart", rpp.RenPyThread(pyVariables.copy(), []));
iterateLabelCalls(dayzero);
dayzero.writeFile(os.path.join(dstPath, "Story", "Day-0.rpy"));
if doCiel:
# Read Cielpath.rpy into memory
print("Patching Cielpath.rpy...");
cielPath = rpp.RenPyFileCiel(backgrounds_map.backgroundMap35, characterImageMap35, v6map);
cielPath.readFile(os.path.join(extPath5, "Story", "Cielpath.rpy"));
# Search for labels
cielPath.findLabels();
# Search for "show" statements
cielPath.findShows();
# Process the 'leftit' label, it's the toplevel.
addLabelCall(cielPath, "leftit", rpp.RenPyThread({}, []));
iterateLabelCalls(cielPath);
# Flip the affected V3 characters
cielPath.doFlips();
# Write the updated Cielpath.rpy back out
cielPath.writeFile(os.path.join(dstPath, "Story", "Cielpath.rpy"));
if doEliza:
# Read ElizaPath.rpy into memory
print("Patching ElizaPath.rpy... (0.4 content)");
elizaPath = rpp.RenPyFileEliza(backgrounds_map.backgroundMap45, characterImageMap45, v6map);
elizaPath.readFile(os.path.join(extPath5, "Story", "ElizaPath.rpy"));
# Search for labels
elizaPath.findLabels();
# Search for "show" statements
elizaPath.findShows();
if doScan:
# Process the 'eliza' label, it's the toplevel.
# We need two calls, one for the timer < 34 and one for > 30
pyVariables["timer_value"] = 0; # Less than 30
addLabelCall(elizaPath, "eliza", rpp.RenPyThread(pyVariables.copy(), []));
pyVariables["timer_value"] = 60; # Greater than 30
addLabelCall(elizaPath, "eliza", rpp.RenPyThread(pyVariables.copy(), []));
iterateLabelCalls(elizaPath);
# Write the updated ElizaPath.rpy back out
elizaPath.writeFile(os.path.join(dstPath, "Story", "ElizaPath.rpy"));
if doGoopy:
# By default we're processing the file we just made for the 0.4 Eliza content...
srcFile = os.path.join(dstPath, "Story", "ElizaPath.rpy");
if not(doEliza):
# ...but if we've not done Eliza this time, we'll take a saved copy
srcFile = "ElizaPath-NoGoopy.rpy";
# Read ElizaPath.rpy into memory. GoopyPath is ElizaPath but with v0.3 mappings
print("Patching ElizaPath.rpy... (Goopy path)");
goopyPath = rpp.RenPyFileGoopy(backgrounds_map.backgroundMap35, characterImageMap35, v6map);
goopyPath.readFile(srcFile);
# Search for labels
goopyPath.findLabels();
# Search for "show" statements
goopyPath.findShows();
# Process the path
addLabelCall(goopyPath, "elizagoopypath", rpp.RenPyThread({}, []));
iterateLabelCalls(goopyPath);
# Flip the affected V3 characters
#goopyPath.doFlips();
# Write the updated ElizaPath.rpy back out
goopyPath.writeFile(os.path.join(dstPath, "Story", "ElizaPath.rpy"));
# Read effects.rpy into memory
print("Patching effects.rpy...");
effectsFile = rpp.RenPyFile();
effectsFile.readFile(os.path.join(extPath5, "effects.rpy"));
# Patch the timer
effectsFile.lines[492] = "default timer_value = 0\n";
effectsFile.lines[495] = " timer 1 repeat True action SetVariable(\"timer_value\", timer_value + 1)\n";
# Write the updated effects.rpy back out
effectsFile.writeFile(os.path.join(dstPath, "effects.rpy"));
#-----------------------------------------------------------------------------
# Hook to call main
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
3206282
|
<gh_stars>0
# -*- coding: utf-8 -*-
from deep_neural_network.activation import relu, sigmoid
class ActivationObject(object):
def __init__(self):
self.sigmoid = sigmoid.Sigmoid()
self.relu = relu.ReLU()
|
StarcoderdataPython
|
1675829
|
# https://leetcode.com/problems/number-of-steps-to-reduce-a-number-to-zero
class Solution:
def numberOfSteps(self, num):
ans = 0
while 0 < num:
if num % 2 == 0:
num = num // 2
else:
num -= 1
ans += 1
return ans
|
StarcoderdataPython
|
4823632
|
<gh_stars>0
"""regex_compile a regex pattern to an nfa machine.
"""
import re
import string
from regex.parsing_table import (semantic, all_symbols, grammar,
generate_syntax_table)
from regex.graph import Machine
from regex.regex_nfa import induct_star, induct_or, induct_cat, basis
class EscapeError(Exception):
pass
class Lexeme:
"""lexeme contains value and type."""
def __init__(self, lextype, letter):
self.value = letter
self.type = lextype
class Lexer:
"""parse a letter and return a lexeme."""
def __init__(self, stream):
self.stream = list(stream)
def get_lexeme(self):
try:
letter = self.stream.pop(0)
if letter == "\\":
letter = self.stream.pop(0)
if letter in "()|*$":
return Lexeme("a", letter)
else:
raise EscapeError("is not an Escape character \{}".format(letter))
elif letter in "()|*":
return Lexeme(letter, letter)
else:
return Lexeme('a', letter)
except IndexError:
raise IndexError
except EscapeError as e:
raise EscapeError(e)
class RegexCompiler:
"""Regex compiler reads a regex pattern, and return an nfa Machine of graph.
"""
def __init__(self):
self.syntax_table = generate_syntax_table()
self.state_stack = [0]
self.arg_stack = []
self.literal_machine = ""
def parse(self, stream):
lexer = Lexer(stream)
while True:
try:
lexeme = lexer.get_lexeme()
self.ahead(lexeme.type, lexeme.value)
except IndexError:
lexeme = Lexeme("$", "$")
self.ahead(lexeme.type, lexeme.value)
break
except EscapeError as e:
raise EscapeError(e)
def get_action(self, state, literal):
return self.syntax_table[state][all_symbols.index(literal)]
def ahead(self, literal, value=None):
action = self.get_action(self.state_stack[-1], literal)
if action[0] == 's': # shift action
self.state_stack.append(int(action[1:]))
if literal == 'a':
self.arg_stack.append(value)
elif action[0] == '$':
machine_literal = self.arg_stack.pop()
self.literal_machine = machine_literal
# success
elif action[0] == 'r':
number = int(action[1:])
production = grammar[number]
head = production[0]
body = production[1]
for _ in body:
self.state_stack.pop()
state = self.get_action(self.state_stack[-1], head)
self.state_stack.append(int(state))
# translations
args = []
for i in re.findall(r"{}", semantic[number]):
arg = self.arg_stack.pop()
args.insert(0, arg)
translation = semantic[number].format(*args)
self.arg_stack.append(translation)
self.ahead(literal, value)
def regex_compile(regex):
a = RegexCompiler()
a.parse(regex)
m = eval(a.literal_machine)
m.sort_state_names()
return Machine(m)
if __name__ == "__main__":
import sys
import warnings
if not sys.warnoptions: # allow overriding with `-W` option
warnings.filterwarnings('ignore', category=RuntimeWarning)
a = regex_compile("ab\**c*d(e|f)ka*z")
a.show()
|
StarcoderdataPython
|
1724807
|
<reponame>tjgran01/GSR_Concurrent_Recording_Analysis
import neurokit2 as nk
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def load_data(data_fpath="./exports/par2.1_finger.csv"):
data = pd.read_csv(data_fpath)
return data
def main():
data = load_data()
print(data.columns)
eda = nk.eda_phasic(nk.standardize(data["GSR_Skin_Conductance(uS)"]), sampling_rate=128)
eda["events"] = data["Timestamp_Marks"] - 5
eda.plot()
plt.show()
eda.to_csv("./exports/par2.1_finger_decomposed.csv")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4822786
|
<gh_stars>0
#
# This file does not ping the server
#
import glob
from bs4 import BeautifulSoup
sections = glob.glob('output/01-*.txt')
for section in sections:
print('Parsing: {}'.format(section))
section_name = section.split('-')[2].split('.')[0]
section_link_fn = 'output/02-{}_links.txt'.format(section_name)
f = open(section_link_fn, 'w')
f.close()
with open(section, 'r') as f:
data = f.read()
soup = BeautifulSoup(data, 'lxml')
card_panels = soup.find_all('div', class_='card-panel panel')
for article in card_panels:
rel_link = article.find('a')['href']
full_link = 'http://www.collegiatetimes.com' + rel_link + '\n'
with open(section_link_fn, 'a') as f:
f.write(full_link)
|
StarcoderdataPython
|
1708132
|
<gh_stars>1-10
import pandas as pd
import numpy as np
#import re
import argparse
import sys
import pickle
from cddm_data_simulation import ddm
from cddm_data_simulation import ddm_flexbound
from cddm_data_simulation import levy_flexbound
from cddm_data_simulation import ornstein_uhlenbeck
from cddm_data_simulation import full_ddm
from cddm_data_simulation import ddm_sdv
from cddm_data_simulation import ddm_flexbound_pre
from cddm_data_simulation import race_model
from cddm_data_simulation import lca
import cddm_data_simulation as cds
import boundary_functions as bf
def bin_simulator_output(out = None,
bin_dt = 0.04,
nbins = 0): # ['v', 'a', 'w', 'ndt', 'angle']
# Generate bins
if nbins == 0:
nbins = int(out[2]['max_t'] / bin_dt)
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, out[2]['max_t'], nbins)
bins[nbins] = np.inf
else:
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, out[2]['max_t'], nbins)
bins[nbins] = np.inf
cnt = 0
counts = np.zeros( (nbins, len(out[2]['possible_choices']) ) )
for choice in out[2]['possible_choices']:
counts[:, cnt] = np.histogram(out[0][out[1] == choice], bins = bins)[0] / out[2]['n_samples']
cnt += 1
return counts
def bin_arbitrary_fptd(out = None,
bin_dt = 0.04,
nbins = 256,
nchoices = 2,
choice_codes = [-1.0, 1.0],
max_t = 10.0): # ['v', 'a', 'w', 'ndt', 'angle']
# Generate bins
if nbins == 0:
nbins = int(max_t / bin_dt)
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, max_t, nbins)
bins[nbins] = np.inf
else:
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, max_t, nbins)
bins[nbins] = np.inf
cnt = 0
counts = np.zeros( (nbins, nchoices) )
for choice in choice_codes:
counts[:, cnt] = np.histogram(out[:, 0][out[:, 1] == choice], bins = bins)[0]
print(np.histogram(out[:, 0][out[:, 1] == choice], bins = bins)[1])
cnt += 1
return counts
def simulator(theta,
model = 'angle',
n_samples = 1000,
delta_t = 0.001,
max_t = 20,
bin_dim = None):
# Useful for sbi
if type(theta) == list or type(theta) == np.ndarray:
pass
else:
theta = theta.numpy()
if model == 'ddm':
x = ddm_flexbound(v = theta[0],
a = theta[1],
w = theta[2],
ndt = theta[3],
n_samples = n_samples,
delta_t = delta_t,
boundary_params = {},
boundary_fun = bf.constant,
boundary_multiplicative = True,
max_t = max_t)
if model == 'angle' or model == 'angle2':
x = ddm_flexbound(v = theta[0],
a = theta[1],
w = theta[2],
ndt = theta[3],
boundary_fun = bf.angle,
boundary_multiplicative = False,
boundary_params = {'theta': theta[4]},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'weibull_cdf' or model == 'weibull_cdf2' or model == 'weibull_cdf_ext' or model == 'weibull_cdf_concave':
x = ddm_flexbound(v = theta[0],
a = theta[1],
w = theta[2],
ndt = theta[3],
boundary_fun = bf.weibull_cdf,
boundary_multiplicative = True,
boundary_params = {'alpha': theta[4], 'beta': theta[5]},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'levy':
x = levy_flexbound(v = theta[0],
a = theta[1],
w = theta[2],
alpha_diff = theta[3],
ndt = theta[4],
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'full_ddm' or model == 'full_ddm2':
x = full_ddm(v = theta[0],
a = theta[1],
w = theta[2],
ndt = theta[3],
dw = theta[4],
sdv = theta[5],
dndt = theta[6],
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'ddm_sdv':
x = ddm_sdv(v = theta[0],
a = theta[1],
w = theta[2],
ndt = theta[3],
sdv = theta[4],
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'ornstein':
x = ornstein_uhlenbeck(v = theta[0],
a = theta[1],
w = theta[2],
g = theta[3],
ndt = theta[4],
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'pre':
x = ddm_flexbound_pre(v = theta[0],
a = theta[1],
w = theta[2],
ndt = theta[3],
boundary_fun = bf.angle,
boundary_multiplicative = False,
boundary_params = {'theta': theta[4]},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'race_model_3':
x = race_model(v = theta[:3],
a = theta[3],
w = theta[4:7],
ndt = theta[7],
s = np.array([1, 1, 1], dtype = np.float32),
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'race_model_4':
x = race_model(v = theta[:4],
a = theta[4],
w = theta[5:9],
ndt = theta[9],
s = np.array([1, 1, 1, 1], dtype = np.float32),
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'lca_3':
x = lca(v = theta[:3],
a = theta[4],
w = theta[4:7],
g = theta[7],
b = theta[8],
ndt = theta[9],
s = 1.0,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
if model == 'lca_4':
x = lca(v = theta[:4],
a = theta[4],
w = theta[5:9],
g = theta[9],
b = theta[10],
ndt = theta[11],
s = 1.0,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
max_t = max_t)
# race_model(v = np.array([0, 0, 0], dtype = DTYPE), # np.array expected, one column of floats
# float a = 1, # initial boundary separation
# w = np.array([0, 0, 0], dtype = DTYPE), # np.array expected, one column of floats
# float ndt = 1, # for now we we don't allow ndt by choice
# #ndt = np.array([0.0, 0.0, 0.0], dtype = DTYPE),
# s = np.array([1, 1, 1], dtype = DTYPE), # np.array expected, one column of floats
# float delta_t = 0.001, # time increment step
# float max_t = 20, # maximum rt allowed
# int n_samples = 2000,
# print_info = True,
# boundary_fun = None,
# boundary_multiplicative = True,
# boundary_params = {})
# if model == 'race_model_4':
if bin_dim == 0:
return x
else:
return bin_simulator_output(x, nbins = bin_dim)
|
StarcoderdataPython
|
3291494
|
from app import app
from slack_sdk.errors import SlackApiError
from apps.modal_production_calc.modal_production_calc_helpers import fetch_base_view
from apps.modal_production_calc.modal_production_calc_helpers import get_input_values
from apps.modal_production_calc.modal_production_calc_helpers import create_score_blocks
from apps.modal_production_calc.modal_production_calc_helpers import update_base_view
from apps.middleware import fetch_trigger_id
from apps.middleware import validate_input
from apps.middleware import calculate_production_score
# root view
@app.action('production_calc_button_click', middleware=[fetch_trigger_id,
fetch_base_view,
])
def show_root_view(ack, context, logger):
ack()
trigger_id = context['trigger_id']
view = context['base_view']
try:
app.client.views_open(
trigger_id=trigger_id,
view=view
)
except SlackApiError as e:
logger.error(e)
# updated view
@app.view("production_calc_submission", middleware=[fetch_base_view,
get_input_values,
validate_input,
calculate_production_score,
create_score_blocks,
update_base_view,
])
def show_updated_view(ack, context, logger):
ack()
if 'response_action' in context:
ack(context['response_action'])
else:
view = context['view']
response_action = {
"response_action": "update",
"view": view
}
try:
ack(response_action)
except SlackApiError as e:
logger.error(e)
|
StarcoderdataPython
|
150218
|
<reponame>cibu/language-resources
#! /usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Burmese grapheme clusters.
"""
from __future__ import unicode_literals
import codecs
import re
import sys
STDOUT = codecs.lookup('utf-8').streamwriter(sys.stdout)
UNICODE_GRAPHEME_CLUSTER = re.compile(r'''
[-()\u1041-\u104D\u104F\u200B]
| (\u1004\u103A\u1039)? \u104E
| ([\u1004\u101B\u105A]\u103A\u1039)? # kinzi above
[\u1000-\u102A\u103F\u1040\u1050-\u1055] # main independent letter
(\u1039[\u1000-\u102A\u103F\u1050-\u1055])* # stacked consonant below
[\u103A-\u103E\u200C\u200D]* # asat and medials
[\u102B-\u1035\u1056-\u1059]* # dependent vowels
[\u1036\u1037\u1038\u103A]* # final diacritics
''', re.VERBOSE)
ZAWGYI_GRAPHEME_CLUSTER = re.compile(r'''
[-()\u1041-\u104F\u200B]
| \u1031* # prevowel e
[\u103B\u107E-\u1084]? # medial r
[\u1000-\u102A\u1040\u106A\u106B\u106E\u106F\u1086\u108F-\u1092\u1097]
( [\u102B-\u1030\u1032-\u103A\u103C-\u103F\u105A\u1060-\u1069\u106C\u106D]
| [\u1070-\u107D\u1085\u1087-\u108E\u1093-\u1096\u200C\u200D]
)*
''', re.VERBOSE)
class GraphemeClusterer(object):
def __init__(self, which):
if which.startswith('z'):
self.pattern = ZAWGYI_GRAPHEME_CLUSTER
else:
self.pattern = UNICODE_GRAPHEME_CLUSTER
return
def GraphemeClusters(self, text):
end = 0
for match in self.pattern.finditer(text):
if match.start() != end:
unmatched = text[end:match.start()]
yield False, unmatched
yield True, match.group(0)
end = match.end()
if end < len(text):
yield False, text[end:]
return
def GetlineUnbuffered(f=sys.stdin):
while True:
line = f.readline()
if not line:
break
yield line.decode('utf-8')
return
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1].lower() not in ('unicode', 'zawgyi'):
STDOUT.write('Usage: %s (unicode|zawgyi)\n' % sys.argv[0])
sys.exit(2)
clusterer = GraphemeClusterer(sys.argv[1].lower())
for line in GetlineUnbuffered():
line = line.rstrip('\n')
STDOUT.write('Line\t%s\n' % line)
for matched, text in clusterer.GraphemeClusters(line):
STDOUT.write('%s\t%s\t%s\n' %
('Cluster' if matched else 'Unmatched',
text,
' '.join('%04X' % ord(c) for c in text)))
|
StarcoderdataPython
|
1621439
|
<reponame>rackerlabs/qonos
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from qonos.api.v1 import api_utils
from qonos.common import exception
from qonos.common import timeutils
from qonos.common import utils
import qonos.db
from qonos.openstack.common._i18n import _
from qonos.openstack.common import wsgi
class SchedulesController(object):
def __init__(self, db_api=None):
self.db_api = db_api or qonos.db.get_api()
def _get_request_params(self, request):
filter_args = {}
params = request.params
if params.get('next_run_after') is not None:
next_run_after = params['next_run_after']
next_run_after = timeutils.parse_isotime(next_run_after)
next_run_after = timeutils.normalize_time(next_run_after)
filter_args['next_run_after'] = next_run_after
if params.get('next_run_before') is not None:
next_run_before = params['next_run_before']
next_run_before = timeutils.parse_isotime(next_run_before)
next_run_before = timeutils.normalize_time(next_run_before)
filter_args['next_run_before'] = next_run_before
if request.params.get('tenant') is not None:
filter_args['tenant'] = request.params['tenant']
filter_args['limit'] = params.get('limit')
filter_args['marker'] = params.get('marker')
for filter_key in params.keys():
if filter_key not in filter_args:
filter_args[filter_key] = params[filter_key]
return filter_args
def list(self, request):
filter_args = self._get_request_params(request)
try:
filter_args = utils.get_pagination_limit(filter_args)
limit = filter_args['limit']
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=str(e))
try:
schedules = self.db_api.schedule_get_all(filter_args=filter_args)
if len(schedules) != 0 and len(schedules) == limit:
next_page = '/v1/schedules?marker=%s' % schedules[-1].get('id')
else:
next_page = None
except exception.NotFound:
msg = _('The specified marker could not be found')
raise webob.exc.HTTPNotFound(explanation=msg)
for sched in schedules:
utils.serialize_datetimes(sched),
api_utils.serialize_schedule_metadata(sched)
links = [{'rel': 'next', 'href': next_page}]
return {'schedules': schedules, 'schedules_links': links}
def create(self, request, body=None):
invalid_params = []
if not body:
invalid_params.append('request body is empty')
elif 'schedule' not in body:
invalid_params.append('request body needs "schedule" entity')
else:
if not body['schedule'].get('tenant'):
invalid_params.append('request body needs "tenant" entity')
if not body['schedule'].get('action'):
invalid_params.append('request body needs "action" entity')
if invalid_params:
msg = _('The following errors occured with your request: %s') \
% ', '.join(invalid_params)
raise webob.exc.HTTPBadRequest(explanation=msg)
api_utils.deserialize_schedule_metadata(body['schedule'])
values = {}
values.update(body['schedule'])
values['next_run'] = api_utils.schedule_to_next_run(body['schedule'])
schedule = self.db_api.schedule_create(values)
utils.serialize_datetimes(schedule)
api_utils.serialize_schedule_metadata(schedule)
return {'schedule': schedule}
def get(self, request, schedule_id):
try:
schedule = self.db_api.schedule_get_by_id(schedule_id)
utils.serialize_datetimes(schedule)
api_utils.serialize_schedule_metadata(schedule)
except exception.NotFound:
msg = _('Schedule %s could not be found.') % schedule_id
raise webob.exc.HTTPNotFound(explanation=msg)
return {'schedule': schedule}
def delete(self, request, schedule_id):
try:
self.db_api.schedule_delete(schedule_id)
except exception.NotFound:
msg = _('Schedule %s could not be found.') % schedule_id
raise webob.exc.HTTPNotFound(explanation=msg)
def update(self, request, schedule_id, body):
if not body:
msg = _('The request body must not be empty')
raise webob.exc.HTTPBadRequest(explanation=msg)
elif 'schedule' not in body:
msg = _('The request body must contain a "schedule" entity')
raise webob.exc.HTTPBadRequest(explanation=msg)
# NOTE(jculp): only raise if a blank tenant is passed
# passing no tenant at all is perfectly fine.
elif('tenant' in body['schedule'] and not
body['schedule']['tenant'].strip()):
msg = _('The request body has not specified a "tenant" entity')
raise webob.exc.HTTPBadRequest(explanation=msg)
api_utils.deserialize_schedule_metadata(body['schedule'])
values = {}
values.update(body['schedule'])
try:
values = api_utils.check_read_only_properties(values)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=unicode(e))
request_next_run = body['schedule'].get('next_run')
times = {
'minute': None,
'hour': None,
'month': None,
'day_of_week': None,
'day_of_month': None,
}
update_schedule_times = False
for key in times:
if key in values:
times[key] = values[key]
update_schedule_times = True
if update_schedule_times:
# NOTE(ameade): We must recalculate the schedules next_run time
# since the schedule has changed
values.update(times)
values['next_run'] = api_utils.schedule_to_next_run(times)
elif request_next_run:
try:
timeutils.parse_isotime(request_next_run)
except ValueError as e:
msg = _('Invalid "next_run" value. Must be ISO 8601 format')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
schedule = self.db_api.schedule_update(schedule_id, values)
except exception.NotFound:
msg = _('Schedule %s could not be found.') % schedule_id
raise webob.exc.HTTPNotFound(explanation=msg)
utils.serialize_datetimes(schedule)
api_utils.serialize_schedule_metadata(schedule)
return {'schedule': schedule}
def create_resource():
"""QonoS resource factory method."""
return wsgi.Resource(SchedulesController())
|
StarcoderdataPython
|
138142
|
import re
import datetime
from billy.scrape.events import Event, EventScraper
from openstates.utils import LXMLMixin
import pytz
class AKEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'ak'
_TZ = pytz.timezone('US/Alaska')
_DATETIME_FORMAT = '%m/%d/%Y %I:%M %p'
def scrape(self, session, chambers):
EVENTS_URL = 'http://www.akleg.gov/basis/Meeting/Find'
events = self.lxmlize(EVENTS_URL).xpath(
'//ul[@id="meetingResults"]/li')
for info in events:
event_url = info.xpath('span[@class="col04"]/a/@href')[0]
doc = self.lxmlize(event_url)
# Skip events that are placeholders or tentative
# Also skip whole-chamber events
if any(x.strip().startswith("No Meeting") for x in
doc.xpath('//div[@class="schedule"]//text()')) \
or "session" in \
info.xpath('span[@class="col01"]/text()')[0].lower():
continue
event = Event(
session=session,
when=self._TZ.localize(datetime.datetime.strptime(
info.xpath('span[@class="col02"]/text()')[0],
self._DATETIME_FORMAT
)),
type='committee:meeting',
description=" ".join(x.strip() for x
in doc.xpath('//div[@class="schedule"]//text()')
if x.strip()),
location=doc.xpath(
'//div[@class="heading-container"]/span/text()')
[0].title()
)
event.add_participant(
type='host',
participant=info.xpath(
'span[@class="col01"]/text()')[0].title(),
participant_type='committee'
)
for document in doc.xpath('//td[@data-label="Document"]/a'):
event.add_document(
name=document.xpath('text()')[0],
url=document.xpath('@href')[0]
)
event.add_source(EVENTS_URL)
event.add_source(event_url.replace(" ", "%20"))
self.save_event(event)
|
StarcoderdataPython
|
1768222
|
<reponame>pcrete/skil-python
import skil_client
from skil_client.rest import ApiException as api_exception
class WorkSpace:
"""WorkSpace
Workspaces are a collection of features that enable different tasks such as
conducting experiments, training models, and test different dataset transforms.
Workspaces are distinct from Deployments by operating as a space for
non-production work.
# Arguments
skil: Skil server instance
name: string. Name for the workspace.
labels: string. Labels associated with the workspace, useful for searching (comma seperated).
verbose: boolean. If True, api response will be printed.
create: boolean. Internal, do not use.
"""
def __init__(self, skil=None, name=None, labels=None, verbose=False, create=True):
if not create:
return
self.skil = skil
self.printer = self.skil.printer
self.name = name if name else 'skil_workspace'
self.workspace = self.skil.api.add_model_history(
self.skil.server_id,
skil_client.AddModelHistoryRequest(name, labels)
)
self.id = self.workspace.model_history_id
if verbose:
self.printer.pprint(self.workspace)
def delete(self):
"""Deletes the work space.
"""
try:
api_response = self.skil.api.delete_model_history(
self.skil.server_id, self.id)
self.skil.printer.pprint(api_response)
except api_exception as e:
self.skil.printer.pprint(
">>> Exception when calling delete_model_history: %s\n" % e)
def get_workspace_by_id(skil_server, workspace_id):
"""Get workspace by ID
# Arguments:
skil_server: `Skil` server instance
workspace_id: string, workspace ID
"""
server_id = skil_server.server_id
response = skil_server.api.get_model_history(server_id, workspace_id)
ws = WorkSpace(create=False)
ws.skil = skil_server
ws.printer = skil_server.printer
ws.workspace = response
ws.id = workspace_id
ws.name = response.model_name
return ws
|
StarcoderdataPython
|
1664062
|
from os import listdir
from os.path import isfile, join
from collections import defaultdict
from time import time
class Knapsack:
def __knapsack_topDown(self, number_items, weight_max, values_items, weight_items):
if number_items == 0 or weight_max == 0: return 0
if weight_items[number_items-1] > weight_max: return self.__knapsack_topDown(number_items-1, weight_max, values_items, weight_items)
if self.mem[number_items][weight_max] is not False: return self.mem[number_items][weight_max]
temp = max(self.__knapsack_topDown(number_items-1, weight_max-weight_items[number_items-1], values_items, weight_items)+values_items[number_items-1], self.__knapsack_topDown(number_items-1, weight_max, values_items, weight_items))
self.mem[number_items][weight_max] = temp
return temp
def __knapsack_bottomUp(self, number_items, weight_max, values_items, weight_items):
K = [[0 for x in range(weight_max + 1)] for x in range(number_items + 1)]
for i in range(number_items + 1):
for w in range(weight_max + 1):
if i == 0 or w == 0: K[i][w] = 0
elif weight_items[i-1] <= w: K[i][w] = max(values_items[i-1] + K[i-1][w-weight_items[i-1]], K[i-1][w])
else: K[i][w] = K[i-1][w]
return K[number_items][weight_max]
def get_result(self, all_instances, number_items, weight_max, values_items, weight_items):
result_topDown = []
time_topDown = []
result_bottomUp = []
time_bottomUp = []
for instance in all_instances:
start = time()
result_bottomUp.append(self.__knapsack_bottomUp(int(number_items[instance][0][0]),int(weight_max[instance][0][0]),values_items[instance],weight_items[instance]))
time_bottomUp.append(time()-start)
self.mem = [[False for i in range(int(weight_max[instance][0][0])+1)] for j in range(int(number_items[instance][0][0])+1)]
start = time()
result_topDown.append(self.__knapsack_topDown(int(number_items[instance][0][0]),int(weight_max[instance][0][0]),values_items[instance],weight_items[instance]))
time_topDown.append(time()-start)
return result_bottomUp, time_bottomUp, result_topDown, time_topDown
def read_instances(directory):
all_files = sorted([f for f in listdir(directory) if isfile(join(directory, f))]) #pega o nome de todos os arquivos
all_instances = {}
for file in all_files:
lines = []
with open(directory+file) as instance: #../Trabalho II/instancias/s000.kp até o ultimo
for line in instance:
if line != '\n': lines.append(line.split())
all_instances[file] = lines
return all_instances
def organize_instances(all_instances):
number_items = defaultdict(list)
weight_max = defaultdict(list)
values_items = defaultdict(list)
weight_items = defaultdict(list)
for i in all_instances.items():
number_items[i[0]].append(i[1].pop(0))
weight_max[i[0]].append(i[1].pop(0))
for k in i[1]:
values_items[i[0]].append(int(k[0]))
weight_items[i[0]].append(int(k[1]))
return number_items, weight_max, values_items, weight_items
if __name__ == "__main__":
all_instances = read_instances('../Trabalho II/instancias/')
number_items, weight_max, values_items, weight_items = organize_instances(all_instances)
# print(Knapsack().get_result(all_instances, number_items, weight_max, values_items, weight_items))
|
StarcoderdataPython
|
3262886
|
<gh_stars>1-10
# 2019-11-10 15:45:20(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
def main():
n, m, X, Y = (int(i) for i in sys.stdin.readline().split())
x = [int(x) for x in sys.stdin.readline().split()]
y = [int(y) for y in sys.stdin.readline().split()]
max_x = max(x + [X])
min_y = min(y + [Y])
if Y - X < 1 or min_y - max_x < 1:
ans = 'War'
else:
ans = 'No War'
print(ans)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1669403
|
# Vector.py
# Created by <NAME> (2015)
# Custom two-dimentional vector class for easy creation and manipulation of vectors. Contains
# standard arithmetic operations between vectors and coefficients (addition, subtraction, and
# multiplication), as well as a number of handy operations that are commonly used (dot product,
# normalization, etc.)
import math
import operator
class Vector:
#
x = 0.0
y = 0.0
#
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
#
def __abs__(self):
return math.sqrt(self.x**2 + self.y**2)
length = magnitude = __abs__
#
def normalize(self):
l = self.magnitude()
if l:
self.x = self.x / l
self.y = self.y / l
#
def normalized(self):
l = self.magnitude()
if l:
return Vector(self.x / l, self.y / l)
#
def dot(self, other):
return self.x * other.x + self.y * other.y
#
def distance(self, other):
return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)
#
def zero(self):
self.x = 0.0
self.y = 0.0
#
def one(self):
self.x = 1.0
self.y = 1.0
#
def tuple(self):
return (self.x, self.y)
#
def __copy__(self):
return self.__class__(self.x, self.y)
#
def __eq__(self, other):
return self.x == other.x and self.y == other.y
#
def __ne__(self, other):
return not self.__eq__(other)
#
def __nonzero__(self):
return self.x != 0.0 or self.y != 0.0
#
def __add__(self, other):
return Vector(self.x + other.x, self.y + other.y)
__radd__ = __add__
#
def __iadd_(self, other):
self.x += other.x
self.y += other.y
return self
#
def __sub__(self, other):
return Vector(self.x - other.x, self.y - other.y)
#
def __isub__(self, other):
self.x -= other.x
self.y -= other.y
return self
#
def __mul__(self, other):
assert type(other) in (int, int, float)
return Vector(self.x * other, self.y * other)
#
def __rmul__(self, other):
assert type(other) in (int, int, float)
return Vector(self.x * other, self.y * other)
#
def __imul__(self, other):
assert type(other) in (int, int, float)
self.x *= other
self.y *= other
return self
#
def __div__(self, other):
assert type(other) in (int, int, float)
return Vector(operator.div(self.x, other),
operator.div(self.y, other))
#
def __idiv__(self, other):
assert type(other) in (int, int, float)
operator.div(self.x, other)
operator.div(self.y, other)
return self
#
def __neg__(self):
return Vector(-self.x, -self.y)
#
def __pos__(self):
return Vector(self.x, self.y)
#
def __str__(self):
return "Vector("+str(self.x)+", "+str(self.y)+")"
#
@staticmethod
def zero():
return Vector(0.0, 0.0)
|
StarcoderdataPython
|
3257998
|
import unittest
import time
from minjob.jobs import JobManager
def run_process(with_exception, name="charlie", code="bravo"):
elapsed = 0
while True:
elapsed += 5
time.sleep(5)
if elapsed >= 5:
if with_exception:
raise Exception(f"Terminating process {name}-{code} abruptly")
else:
break
def run_thread(with_exception, name="hawk", code="alpha"):
elapsed = 0
while True:
elapsed += 5
time.sleep(5)
if elapsed >= 5:
if with_exception:
raise Exception(f"Terminating thread {name}-{code} abruptly")
else:
break
class MinJobTests(unittest.TestCase):
with_exceptions = [True, False]
def setUp(self):
self.names = ["test_process", "test_thread"]
def _stop_jobs(self, with_exceptions):
manager = JobManager()
manager.add_process(self.names[0], run_process, with_exceptions, daemonize=True)
manager.add_thread(self.names[1], run_thread, with_exceptions, daemonize=True)
stop = False
manager.start_all()
available_jobs = manager.available_jobs()
self.assertListEqual(available_jobs, self.names)
while not stop:
time.sleep(2)
manager.stop_all()
stop = True
job_alive = [(s.is_alive(), s.name) for s in manager.jobs]
job_alive.append(manager.supervisor.is_alive())
self.assertFalse(all(job_alive))
def _monitor_jobs(self, with_exceptions):
manager = JobManager()
manager.add_process(self.names[0], run_process, with_exceptions, daemonize=True)
manager.add_thread(self.names[1], run_thread, with_exceptions, daemonize=True)
manager.start_all()
available_jobs = manager.available_jobs()
self.assertListEqual(available_jobs, self.names)
stop = False
while not stop:
time.sleep(9)
job_alive = [(s.is_alive(), s.name) for s in manager.jobs]
self.assertTrue(all(job_alive))
stop = True
manager.stop_all()
job_alive = [(s.is_alive(), s.name) for s in manager.jobs]
job_alive.append(manager.supervisor.is_alive())
self.assertFalse(all(job_alive))
# FIXME: migrate to pytest to use better test parametrization
def test_stop_jobs(self):
for ex in self.with_exceptions:
self._stop_jobs(ex)
def test_monitor_jobs(self):
for ex in self.with_exceptions:
self._monitor_jobs(ex)
|
StarcoderdataPython
|
3217096
|
<filename>simulate.py
import numpy as np
import numpy.random as rng
#rng.seed(0)
NUM_CHANNELS = 5
TOL = 1E-6
def Phi(x):
"""
Softening function
"""
return (x + 1)**(1.0/3.0)
def PhiInv(x):
"""
Inverse
"""
return x**3 - 1.0
def update(m, w):
"""
One iteration of the procedure to compute the multipliers.
Returns True if converged.
"""
# Compute y from the current ms
y = np.empty(NUM_CHANNELS)
for j in range(NUM_CHANNELS):
terms = m*w[:, j]
terms[j] = w[j, j]
y[j] = np.sum(terms)
# Normalise y and compute updated m
y *= np.sum(np.sum(w, axis=0))/np.sum(y)
mnew = Phi(y)
converged = np.mean(np.abs(mnew - m)) < TOL
return mnew, converged
# Matrix of supports. Start with diagonal
w = np.zeros((NUM_CHANNELS, NUM_CHANNELS))
w += np.diag(np.exp(3.0 + rng.randn(NUM_CHANNELS)))
# Add a few signed supports (if they land off-diagonal that's what they are)
for r in range(1 + rng.randint(10)):
i = rng.randint(NUM_CHANNELS)
j = rng.randint(NUM_CHANNELS)
w[i, j] += np.exp(3.0 + rng.randn())
print("Support matrix:")
print("---------------")
print(w, end="\n\n")
print("Raw staked LBC:")
print("---------------")
print(np.sum(w, axis=0), end="\n\n")
# Credibilities
print("Calculating multipliers...", flush=True, end="")
m = np.ones(NUM_CHANNELS)
iterations = 0
while True:
m, converged = update(m, w)
iterations += 1
if converged:
break
print(f"converged after {iterations} iterations.", end="\n\n")
print("Final credibility scores on LBC grade:")
print("-----------------------------------")
y = PhiInv(m)
print(y)
|
StarcoderdataPython
|
3218859
|
<filename>tools_py3/setup.py
import setuptools
from os import listdir
from os.path import isfile, join
with open("README.md", "r") as fh:
long_description = fh.read()
script_path='./bin'
stretch_scripts={script_path+'/'+f for f in listdir(script_path) if isfile(join(script_path, f))}
setuptools.setup(
name="hello_robot_stretch_body_tools_py3",
version="0.0.8",
author="Hello Robot Inc.",
author_email="<EMAIL>",
description="Stretch Body Py3 Tools",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hello-robot/stretch_body",
scripts = stretch_scripts,
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"License :: OSI Approved :: Apache Software License"
],
install_requires=['trimesh==3.6.38', 'urdfpy', 'numba', 'opencv-python-inference-engine', 'rospkg', 'scipy']
)
#classifiers = [
# "Programming Language :: Python :: 2",
# "License :: OSI Approved :: BSD License",
# "Operating System :: OS Independent",
|
StarcoderdataPython
|
81181
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
result = ListNode(0)
result_tail = result
carry = 0
while l1 or l2 or carry:
val1 = (l1.val if l1 else 0)
val2 = (l2.val if l2 else 0)
carry, out = divmod(val1+val2 + carry, 10)
result_tail.next = ListNode(out)
result_tail = result_tail.next
l1 = (l1.next if l1 else None)
l2 = (l2.next if l2 else None)
return result.next
|
StarcoderdataPython
|
1626479
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# <NAME>, <EMAIL>
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.db.models import DeviceUser
from ppmessage.db.models import AppUserData
from ppmessage.core.genericupdate import generic_update
from ppmessage.core.constant import API_LEVEL
import json
import copy
import hashlib
import logging
class PPUpdateUserHandler(BaseHandler):
def _update(self):
_redis = self.application.redis
_request = json.loads(self.request.body)
_app_uuid = _request.get("app_uuid")
_user_uuid = _request.get("user_uuid")
_is_distributor_user = _request.get("is_distributor_user")
if _user_uuid == None or _app_uuid == None:
self.setErrorCode(API_ERR.NO_PARA)
return
if _is_distributor_user != None:
_key = AppUserData.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid." + _user_uuid + ".is_service_user.True"
_uuid = _redis.get(_key)
if _uuid != None:
_updated = generic_update(_redis, AppUserData, _uuid, {"is_distributor_user": _is_distributor_user})
if not _updated:
self.setErrorCode(API_ERR.GENERIC_UPDATE)
return
_old_password = _request.get("old_password")
_user_password = _request.get("user_password")
if _old_password != None and _user_password != None:
_key = DeviceUser.__tablename__ + ".uuid." + _user_uuid
_ex_password = _redis.hget(_key, "user_password")
if _ex_password != _old_password:
self.setErrorCode(API_ERR.MIS_ERR)
return
# remove not table fields
_data = copy.deepcopy(_request)
del _data["app_uuid"]
del _data["user_uuid"]
if _is_distributor_user != None:
del _data["is_distributor_user"]
if _old_password != None:
del _data["old_password"]
if len(_data) > 0:
_updated = generic_update(_redis, DeviceUser, _user_uuid, _data)
if not _updated:
self.setErrorCode(API_ERR.GENERIC_UPDATE)
return
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCOM)
self.addPermission(api_level=API_LEVEL.PPKEFU)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_KEFU)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPUpdateUserHandler, self)._Task()
self._update()
return
|
StarcoderdataPython
|
4827234
|
from flask import request, jsonify
from api.index import home_blu
from api.index.utils.handle_json import handle_json
@home_blu.route('/home')
def home():
path = request.args.get('path')
json_dict = handle_json(path)
return jsonify(json_dict)
|
StarcoderdataPython
|
3368675
|
__author__ = '<EMAIL>'
|
StarcoderdataPython
|
44534
|
# Simple XML against XSD Validator for Python 2.7 - 3.2
# to run this script you need additionally: lxml (http://lxml.de)
# author: <NAME>, 2013
import sys
from lxml import etree
xsd_files = []
xml_files = []
def usage():
print("Usage: ")
print("python XSDValidator.py <list of xml files> <list of xsd files>")
print("At least one .xml and one .xsd file is required.")
def validate_files():
""" validates every xml file against every schema file"""
for schema in xsd_files:
xmlschema = etree.XMLSchema(file=schema)
for file in xml_files:
xml_file = etree.parse(file)
if xmlschema.validate(xml_file):
print(file + " is valid against " + schema)
else:
log = xmlschema.error_log
print(file + " is not valid against " + schema)
for error in iter(log):
print("\tReason: " + error.message)
def main():
if(len(sys.argv) < 3):
usage()
sys.exit()
for arg in sys.argv[1:]:
if arg.lower().endswith(".xml"):
xml_files.append(arg)
elif arg.lower().endswith(".xsd"):
xsd_files.append(arg)
if len(xsd_files) < 1 or len(xml_files) < 1:
usage()
sys.exit()
validate_files()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1735687
|
CACHE_PREFIX = "chunk_"
|
StarcoderdataPython
|
44404
|
from system import System
from src.basic.sessions.cmd_session import CmdSession
class CmdSystem(System):
def __init__(self):
super(CmdSystem, self).__init__()
@classmethod
def name(cls):
return 'cmd'
def new_session(self, agent, kb):
return CmdSession(agent, kb)
|
StarcoderdataPython
|
1705286
|
import subprocess
import logging
from subprocess import PIPE
import tempfile
import json, os, re
from github import Github, GithubException
"""
private-www Integration Test CI Hook for Uncle Archie
This hook should be installed into all submodules of private-www.
When a pull request in any of these submodules is updated, and that
pull request has a label "Run private-www integration test",
we run a CI test and update the status of the head commit on the PR.
If the build succeeds, the commit is marked as having succeed.
Otherwise the commit is marked as failed.
"""
def process_payload(payload, meta, config):
"""
Look for events that are pull requests being opened or updated.
Clone the repo (recursively).
Find the head commit and check it out.
Build the docs.
Examine output for failures.
Mark the commit pass/fail.
"""
# Set parameters for the PR builder
params = {
'repo_whitelist' : ['dcppc/internal','dcppc/organize','dcppc/nih-demo-meetings'],
'task_name' : 'Uncle Archie private-www Integration Test',
'pass_msg' : 'The private-www integration test passed!',
'fail_msg' : 'The private-www integration test failed.',
}
# This must be a pull request
if ('pull_request' not in payload.keys()) or ('action' not in payload.keys()):
return
# We are only interested in PRs that have the label
# ""Run private-www integration test"
pr_labels = [d['name'] for d in payload['pull_request']['labels']]
if 'Run private-www integration test' not in pr_labels:
logging.debug("Skipping private-www integration test: this PR is not labeled with \"Run private-www integration test\"")
return
# We are only interested in PRs that are
# being opened or updated
if payload['action'] not in ['opened','synchronize']:
logging.debug("Skipping private-www integration test: this is not opening/updating a PR")
return
# This must be a whitelisted repo
repo_name = payload['repository']['name']
full_repo_name = payload['repository']['full_name']
if full_repo_name not in params['repo_whitelist']:
logging.debug("Skipping private-www integration test: this is not a whitelisted repo")
return
# Keep it simple:
# get the head commit
head_commit = payload['pull_request']['head']['sha']
pull_number = payload['number']
# Use Github access token to get API instance
token = config['github_access_token']
g = Github(token)
r = g.get_repo(full_repo_name)
c = r.get_commit(head_commit)
# -----------------------------------------------
# start private-www integration test build
logging.info("Starting private-www integration test build for submodule %s"%(full_repo_name))
# Strategy:
# * This will _always_ use private-www as the build repo
# * This will _always_ clone recursively
# * This will _only_ update the submodule of interest,
# to the head commit of the pull request.
# * This will run mkdocs on the entire private-www site.
######################
# logic. noise.
######################
# Fail by default!
build_status = "fail"
build_msg = "" # if blank at the end, use the default
######################
# make space.
######################
scratch_dir = tempfile.mkdtemp()
FNULL = open(os.devnull, 'w')
######################
# build.
######################
# Remember: you can only read() the output
# of a PIPEd process once.
abort = False
# This is always the repo we clone
ghurl = "[email protected]:dcppc/private-www"
clonecmd = ['git','clone','--recursive',ghurl]
logging.debug("Runing clone cmd %s"%(' '.join(clonecmd)))
cloneproc = subprocess.Popen(
clonecmd,
stdout=PIPE,
stderr=PIPE,
cwd=scratch_dir
)
if check_for_errors(cloneproc):
build_status = "fail"
abort = True
if not abort:
# We are always using the latest version of private-www,
# of the default branch, so no need to check out any version.
# However, we do want to check out the correct submodule commit
# in the docs/ folder before we test the mkdocs build command.
# That's what triggered this test in the first place - one of the
# submodules was updated in a PR. Make the submodule point
# to the head commit of that PR.
# Assemble submodule directory by determining which submdule
# was updated from the payload (repo_name)
repo_dir = os.path.join(scratch_dir, "private-www")
docs_dir = os.path.join(repo_dir,'docs')
submodule_dir = os.path.join(docs_dir,repo_name)
cocmd = ['git','checkout',head_commit]
logging.debug("Runing checkout cmd %s from %s"%(' '.join(cocmd), submodule_dir))
coproc = subprocess.Popen(
cocmd,
stdout=PIPE,
stderr=PIPE,
cwd=submodule_dir
)
if check_for_errors(coproc):
build_status = "fail"
abort = True
if not abort:
buildcmd = ['mkdocs','build']
logging.debug("Runing build command %s"%(' '.join(buildcmd)))
buildproc = subprocess.Popen(
buildcmd,
stdout=PIPE,
stderr=PIPE,
cwd=repo_dir
)
if check_for_errors(buildproc):
build_status = "fail"
abort = True
else:
# the only test that mattered, passed
build_status = "pass"
# end mkdocs build
# -----------------------------------------------
if build_status == "pass":
if build_msg == "":
build_msg = params['pass_msg']
commit_status = c.create_status(
state = "success",
description = build_msg,
context = params['task_name']
)
logging.info("private-www integration test succes:")
logging.info(" Commit %s"%head_commit)
logging.info(" PR %s"%pull_number)
logging.info(" Repo %s"%full_repo_name)
return
elif build_status == "fail":
if build_msg == "":
build_msg = params['fail_msg']
commit_status = c.create_status(
state = "failure",
description = build_msg,
context = params['task_name']
)
logging.info("private-www integration test failure:")
logging.info(" Commit %s"%head_commit)
logging.info(" PR %s"%pull_number)
logging.info(" Repo %s"%full_repo_name)
return
def check_for_errors(proc):
out = proc.stdout.read().decode('utf-8').lower()
err = proc.stderr.read().decode('utf-8').lower()
if "exception" in out or "exception" in err:
return True
if "error" in out or "error" in err:
return True
return False
|
StarcoderdataPython
|
31429
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
Code written by <NAME> with modifications by <NAME> and <NAME>
This file produces plots comparing our first order sensitivity with BS vega.
"""
# %%
# To run the stuff, you need the package plotly in your anaconda "conda install plotly"
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.io as pio
init_notebook_mode()
pio.renderers.default='svg'
import numpy as np
import numpy.random
import pandas as pd
from scipy.stats import norm, multivariate_normal
from scipy.optimize import minimize
import time
_tstart_stack = []
def tic():
_tstart_stack.append(time.time())
def toc(fmt="Elapsed: %s s"):
print(fmt % (time.time() - _tstart_stack.pop()))
# %%
# We first provide the computation of a call option according to BS (we assume Log normal distribution)
# definition of the dplus and minus functions
# and the BS formula.
def dplus(S, K, T, sigma):
sigmaT = sigma * T ** 0.5
return np.log(S/K)/sigmaT + sigmaT/2
def dminus(S, K, T, sigma):
sigmaT = sigma * T ** 0.5
return np.log(S/K)/sigmaT - sigmaT/2
def BS(S, K, T, sigma, Type = 1):
factor1 = S * norm.cdf(Type * dplus(S, K, T, sigma))
factor2 = K * norm.cdf(Type * dminus(S, K, T, sigma))
return Type * (factor1 - factor2)
# Now we provide the computation for the exact call according to the computations in BDT
# We take p = 2
def Robust_Call_Exact_fun(S, K, T, sigma, delta):
def fun(v): #v[0] = a, v[1] = lambda
price = BS(S,max(K - (2 * v[0] + 1)/ (2 * v[1]),0.000001), T, sigma)
return price + v[0] ** 2 / (2 * v[1]) + 0.5 * v[1] * delta ** 2
def cons_fun(v): # the value of v[0] should be constrained to keep strike positive
tmp = K - (2 * v[0] + 1)/ (2 * v[1])
return tmp
cons = ({'type': 'ineq', 'fun' : cons_fun})
guess = np.array([0, 1])
bounds = ((-np.Inf, np.Inf), (0, np.Inf))
res = minimize(fun, guess,
constraints=cons,
method='SLSQP',
bounds=bounds
)
return res.fun
Robust_Call_Exact = np.vectorize(Robust_Call_Exact_fun)
# Now we provide the computation for the first order model uncertainty sensitivity (Upsilon)
# and the resulting BS robust price approximation
# We take p = 2
def Robust_Call_Upsilon(S, K, T, sigma, delta):
muK = norm.cdf(dminus(S, K, T, sigma))
correction = np.sqrt(muK * (1-muK))
return correction
def Robust_Call_Approximation(S, K, T, sigma, delta):
price = BS(S, K, T, sigma)
correction = Robust_Call_Upsilon(S, K, T, sigma, delta)
return price + correction * delta
# %%
# Ploting the robust call and FO appriximation for a given strike and increasing uncertainty radius
S = 1
K = 1.2
T = 1
sigma = 0.2
Delta = np.linspace(0, 0.2, 50)
Y0 = BS(S, K, T, sigma)
Y1 = Robust_Call_Approximation(S, K, T, sigma, Delta)
Y2 = Robust_Call_Exact(S, K, T, sigma, Delta)
fig = go.Figure()
fig.add_scatter(x = Delta, y = Y1, name = 'FO')
fig.add_scatter(x = Delta, y = Y2, name = 'RBS')
#fig.layout.title = "Exact Robust Call vs First Order Approx: Strike K="+str(K)+", BS Price="+str(np.round(Y0,4))
fig.layout.xaxis.title = "delta"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Ploting the robust call and FO appriximation for a given radius of uncertainty and a range of strikes
S = 1
K = np.linspace(0.6, 1.4, 100)
T = 1
sigma = 0.2
delta = 0.05
Y0 = Robust_Call_Approximation(S, K, T, sigma, delta)
Y1 = Robust_Call_Exact(S, K, T, sigma, delta)
Y2 = BS(S, K, T, sigma)
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'FO')
fig.add_scatter(x = K, y = Y1, name = 'Exact')
fig.add_scatter(x = K, y = Y2, name = 'BS')
fig.layout.title = "Call Price vs Exact Robust Call and First Order Approx : delta ="+str(delta)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Run a plot to comapre BS Vega and BS Upsilon (Uncertainty Sensitivity)
# Plots show the sensitivities
S = 1
K = np.linspace(0.4 * S, 2 * S, 100)
T = 1
sigma = 0.2
delta = 0.02 #is irrelevant here
Y1 = S * (norm.pdf(dplus(S, K , T, sigma)))
Y0 = S * (Robust_Call_Upsilon(S, K, T, sigma, delta))
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'BS Upsilon')
fig.add_scatter(x = K, y = Y1, name = 'BS Vega')
#fig.layout.title = "Call Price Sensitivity: Vega vs Upsilon, sigma= "+str(sigma)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Run a ploting to comapre BS Vega and BS Upsilon (Uncertainty Sensitivity)
# Plots show the sensitivities
S = 1
K = np.linspace(0.6 * S, 1.4 * S, 100)
T = 1
sigma = 0.2
delta = 0.02 #is irrelevant here
Y0 = S * (norm.pdf(dplus(S, K * np.exp(T * sigma ** 2), T, sigma)) + 1/2-1/np.sqrt(2 * np.pi))
Y1 = S * (Robust_Call_Upsilon(S, K, T, sigma, delta))
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'BS Vega (shifted) + const')
fig.add_scatter(x = K, y = Y1, name = 'BS Upsilon')
fig.layout.title = "Call Price Sensitivity: Vega vs Upsilon, sigma= "+str(sigma)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
|
StarcoderdataPython
|
3344684
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import logging
import time
class BufferingHandler(logging.StreamHandler):
def __init__(self):
logging.StreamHandler.__init__(self)
self.buffered_log_records = []
self.level = None
def setLevel(self, level):
self.level = level
def set_level_by_name(self, new_level_name):
try:
self.level = logging._nameToLevel[new_level_name] # pylint: disable=protected-access
except:
pass
def emit(self, record):
if self.level <= record.levelno:
event = {
'timestamp': record.asctime,
'level': record.levelname,
'filename': record.filename,
'line': record.lineno,
'function': record.funcName,
'message': record.message.replace("'", '"'),
'exception_text': record.exc_text.replace("'", '"') if record.exc_text is not None else None
}
self.buffered_log_records.append(event)
def get_records(self, clear_buffer=False):
records = self.buffered_log_records
if clear_buffer:
self.buffered_log_records = []
return records
def create_logger(logger_name, create_console_handler=True, create_file_handler=False, create_buffering_handler=False, logging_level=logging.INFO):
"""Create a new logger.
Parameters
----------
logger_name : str
Name for the new logger.
create_console_handler : boolean, default=True
Whether to create a stream handler and add to the logger.
create_file_handler : boolean, default=False
Whether to add a file handler. If True, logs are stored in ``<logger_name>.log``.
create_buffering_handler : boolean, default=False
Whether to add a buffering handler. If True, return value will be
logger, buffering_handler instead of just the logger.
logging_level : int, default=logging.INFO
Log level.
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging_level)
logger.propagate = False
formatter = logging.Formatter('%(asctime)s - %(name)26s - %(levelname)7s - [%(filename)20s:%(lineno)4s - %(funcName)25s() ] %(message)s')
formatter.converter = time.gmtime
formatter.datefmt = '%m/%d/%Y %H:%M:%S'
if create_console_handler:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging_level)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
if create_file_handler:
file_handler = logging.FileHandler(logger_name + ".log")
file_handler.setLevel(logging_level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
buffering_handler = None
if create_buffering_handler:
buffering_handler = BufferingHandler()
buffering_handler.setLevel(logging_level)
buffering_handler.setFormatter(formatter)
logger.addHandler(buffering_handler)
# TODO: Fix this, as sometimes we are returning a tuple + logger and sometimes just the logger.
if create_buffering_handler:
return logger, buffering_handler
return logger
|
StarcoderdataPython
|
153654
|
<gh_stars>0
"""Utilities to call Git commands."""
import os
import re
import shutil
from contextlib import suppress
import log
from . import common, settings
from .exceptions import ShellError
from .shell import call, pwd
def git(*args, **kwargs):
return call("git", *args, **kwargs)
def gitsvn(*args, **kwargs):
return call("git", "svn", *args, **kwargs)
def clone(type, repo, path, *, cache=settings.CACHE, sparse_paths=None, rev=None):
"""Clone a new Git repository."""
log.debug("Creating a new repository...")
if type == "git-svn":
# just the preperation for the svn deep clone / checkout here
# clone will be made in update function to simplify source.py).
os.makedirs(path)
return
assert type == "git"
name = repo.split("/")[-1]
if name.endswith(".git"):
name = name[:-4]
normpath = os.path.normpath(path)
reference = os.path.join(cache, name + ".reference")
sparse_paths_repo = repo if settings.CACHE_DISABLE else reference
if not settings.CACHE_DISABLE and not os.path.isdir(reference):
git("clone", "--mirror", repo, reference)
if sparse_paths and sparse_paths[0]:
os.mkdir(normpath)
git("-C", normpath, "init")
git("-C", normpath, "config", "core.sparseCheckout", "true")
git("-C", normpath, "remote", "add", "-f", "origin", sparse_paths_repo)
with open(
"%s/%s/.git/info/sparse-checkout" % (os.getcwd(), normpath), "w"
) as fd:
fd.write("\n".join(sparse_paths))
with open(
"%s/%s/.git/objects/info/alternates" % (os.getcwd(), normpath), "w"
) as fd:
fd.write("%s/objects" % sparse_paths_repo)
# We use directly the revision requested here in order to respect,
# that not all repos have `master` as their default branch
git("-C", normpath, "pull", "origin", rev)
elif settings.CACHE_DISABLE:
git("clone", repo, normpath)
else:
git("clone", "--reference", reference, repo, normpath)
def is_sha(rev):
"""Heuristically determine whether a revision corresponds to a commit SHA.
Any sequence of 7 to 40 hexadecimal digits will be recognized as a
commit SHA. The minimum of 7 digits is not an arbitrary choice, it
is the default length for short SHAs in Git.
"""
return re.match("^[0-9a-f]{7,40}$", rev) is not None
def fetch(type, repo, path, rev=None): # pylint: disable=unused-argument
"""Fetch the latest changes from the remote repository."""
if type == "git-svn":
# deep clone happens in update function
return
assert type == "git"
git("remote", "set-url", "origin", repo)
args = ["fetch", "--tags", "--force", "--prune", "origin"]
if rev:
if is_sha(rev):
pass # fetch only works with a SHA if already present locally
elif "@" in rev:
pass # fetch doesn't work with rev-parse
else:
args.append(rev)
git(*args)
def valid():
"""Confirm the current directory is a valid working tree.
Checking both the git working tree exists and that the top leve
directory path matches the current directory path.
"""
log.debug("Checking for a valid working tree...")
try:
git("rev-parse", "--is-inside-work-tree", _show=False)
except ShellError:
return False
log.debug("Checking for a valid git top level...")
gittoplevel = os.path.normpath(
os.path.normcase(git("rev-parse", "--show-toplevel", _show=False)[0])
)
currentdir = os.path.normpath(os.path.normcase(pwd(_show=False)))
status = False
if gittoplevel == currentdir:
status = True
else:
log.debug(
"git top level: %s != current working directory: %s",
gittoplevel,
currentdir,
)
status = False
return status
def rebuild(type, repo): # pylint: disable=unused-argument
"""Rebuild a missing repo .git directory."""
if type == "git-svn":
# ignore rebuild in case of git-svn
return
assert type == "git"
common.show("Rebuilding mising git repo...", color="message")
git("init", _show=True)
git("remote", "add", "origin", repo, _show=True)
common.show("Rebuilt git repo...", color="message")
def changes(type, include_untracked=False, display_status=True, _show=False):
"""Determine if there are changes in the working tree."""
status = False
if type == "git-svn":
# ignore changes in case of git-svn
return status
assert type == "git"
try:
# Refresh changes
git("update-index", "-q", "--refresh", _show=False)
# Check for uncommitted changes
git("diff-index", "--quiet", "HEAD", _show=_show)
# Check for untracked files
lines = git(
"ls-files", "--others", "--exclude-standard", _show=_show, _stream=False
)
except ShellError:
status = True
else:
status = bool(lines) and include_untracked
if status and display_status:
with suppress(ShellError):
lines = git("status", _show=True, _stream=False)
common.show(*lines, color="git_changes")
return status
def update(
type, repo, path, *, clean=True, fetch=False, rev=None
): # pylint: disable=redefined-outer-name,unused-argument
if type == "git-svn":
# make deep clone here for simplification of sources.py
# and to realize consistent readonly clone (always forced)
# completly empty current directory (remove also hidden content)
for root, dirs, files in os.walk("."):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# clone specified svn revision
gitsvn("clone", "-r", rev, repo, ".")
return
assert type == "git"
# Update the working tree to the specified revision.
hide = {"_show": False, "_ignore": True}
git("stash", **hide)
if clean:
git("clean", "--force", "-d", "-x", _show=False)
rev = _get_sha_from_rev(rev)
git("checkout", "--force", rev, _stream=False)
git("branch", "--set-upstream-to", "origin/" + rev, **hide)
if fetch:
# if `rev` was a branch it might be tracking something older
git("pull", "--ff-only", "--no-rebase", **hide)
def get_url(type):
"""Get the current repository's URL."""
if type == "git-svn":
return git("config", "--get", "svn-remote.svn.url", _show=False)[0]
assert type == "git"
return git("config", "--get", "remote.origin.url", _show=False)[0]
def get_hash(type, _show=False):
"""Get the current working tree's hash."""
if type == "git-svn":
return "".join(filter(str.isdigit, gitsvn("info", _show=_show)[4]))
assert type == "git"
return git("rev-parse", "HEAD", _show=_show, _stream=False)[0]
def get_tag():
"""Get the current working tree's tag (if on a tag)."""
return git("describe", "--tags", "--exact-match", _show=False, _ignore=True)[0]
def is_fetch_required(type, rev):
if type == "git-svn":
return False
assert type == "git"
return rev not in (get_branch(), get_hash(type), get_tag())
def get_branch():
"""Get the current working tree's branch."""
return git("rev-parse", "--abbrev-ref", "HEAD", _show=False)[0]
def _get_sha_from_rev(rev):
"""Get a rev-parse string's hash."""
if "@{" in rev:
parts = rev.split("@")
branch = parts[0]
date = parts[1].strip("{}")
git("checkout", "--force", branch, _show=False)
rev = git(
"rev-list",
"-n",
"1",
"--before={!r}".format(date),
"--first-parent",
branch,
_show=False,
)[0]
return rev
|
StarcoderdataPython
|
50317
|
<reponame>UOC/dlkit
"""AuthZ Adapter implementations of osid sessions."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from ..osid.osid_errors import IllegalState, Unimplemented
from ..osid.osid_errors import PermissionDenied
from ..primitives import Id
from ..utilities import raise_null_argument
from dlkit.abstract_osid.osid import sessions as abc_osid_sessions
COMPARATIVE = 0
PLENARY = 1
FEDERATED = 0
ISOLATED = 1
class OsidSession(abc_osid_sessions.OsidSession):
"""Adapts underlying OsidSession methodswith authorization checks."""
def __init__(self, provider_session, authz_session, override_lookup_session=None, proxy=None, **kwargs):
self._provider_session = provider_session
self._authz_session = authz_session
self._override_lookup_session = override_lookup_session
self._proxy = proxy
if 'hierarchy_session' in kwargs:
self._hierarchy_session = kwargs['hierarchy_session']
else:
self._hierarchy_session = None
if 'query_session' in kwargs:
self._query_session = kwargs['query_session']
else:
self._query_session = None
self._object_catalog_session = None
self._id_namespace = None
self._qualifier_id = None
self._authz_cache = dict() # Does this want to be a real cache???
self._overriding_catalog_ids = None
self._object_view = COMPARATIVE
self._catalog_view = FEDERATED
def _get_function_id(self, func_name):
return Id(
identifier=func_name,
namespace=self._id_namespace,
authority='ODL.MIT.EDU')
def _can(self, func_name, qualifier_id=None):
"""Tests if the named function is authorized with agent and qualifier.
Also, caches authz's in a dict. It is expected that this will not grow to big, as
there are typically only a small number of qualifier + function combinations to
store for the agent. However, if this becomes an issue, we can switch to something
like cachetools.
"""
function_id = self._get_function_id(func_name)
if qualifier_id is None:
qualifier_id = self._qualifier_id
agent_id = self.get_effective_agent_id()
try:
return self._authz_cache[str(agent_id) + str(function_id) + str(qualifier_id)]
except KeyError:
authz = self._authz_session.is_authorized(agent_id=agent_id,
function_id=function_id,
qualifier_id=qualifier_id)
self._authz_cache[str(agent_id) + str(function_id) + str(qualifier_id)] = authz
return authz
def _can_for_object(self, func_name, object_id, method_name):
"""Checks if agent can perform function for object"""
can_for_session = self._can(func_name)
if (can_for_session or
self._object_catalog_session is None or
self._override_lookup_session is None):
return can_for_session
override_auths = self._override_lookup_session.get_authorizations_for_agent_and_function(
self.get_effective_agent_id(),
self._get_function_id(func_name))
if not override_auths.available():
return False
if self._object_catalog_session is not None:
catalog_ids = list(getattr(self._object_catalog_session, method_name)(object_id))
for auth in override_auths:
if auth.get_qualifier_id() in catalog_ids:
return True
return False
def _get_overriding_catalog_ids(self, func_name):
if self._overriding_catalog_ids is None and self._override_lookup_session is not None:
cat_id_list = []
function_id = Id(
identifier=func_name,
namespace=self._id_namespace,
authority='ODL.MIT.EDU')
auths = self._override_lookup_session.get_authorizations_for_agent_and_function(
self.get_effective_agent_id(),
function_id)
for auth in auths:
cat_id_list.append(auth.get_qualifier_id())
self._overriding_catalog_ids = cat_id_list
return self._overriding_catalog_ids
def _check_lookup_conditions(self):
if ((self._is_plenary_object_view() or self._is_isolated_catalog_view()) and
not self._get_overriding_catalog_ids('lookup') or
self._query_session is None):
raise PermissionDenied()
def _check_search_conditions(self):
if (self._is_federated_catalog_view() and
self._get_overriding_catalog_ids('search')):
return
raise PermissionDenied()
def _use_comparative_object_view(self):
self._object_view = COMPARATIVE
def _use_plenary_object_view(self):
self._object_view = PLENARY
def _is_comparative_object_view(self):
return not bool(self._object_view)
def _is_plenary_object_view(self):
return bool(self._object_view)
def _use_federated_catalog_view(self):
self._catalog_view = FEDERATED
def _use_isolated_catalog_view(self):
self._catalog_view = ISOLATED
def _is_federated_catalog_view(self):
return not bool(self._catalog_view)
def _is_isolated_catalog_view(self):
return bool(self._catalog_view)
def get_locale(self):
pass
locale = property(fget=get_locale)
def is_authenticated(self):
if self._proxy is None:
return False
elif self._proxy.has_authentication():
return self._proxy.get_authentication().is_valid()
else:
return False
def get_authenticated_agent_id(self):
if self.is_authenticated():
return self._proxy.get_authentication().get_agent_id()
else:
raise IllegalState()
authenticated_agent_id = property(fget=get_authenticated_agent_id)
def get_authenticated_agent(self):
if self.is_authenticated():
return self._proxy.get_authentication().get_agent()
else:
raise IllegalState()
authenticated_agent = property(fget=get_authenticated_agent)
def get_effective_agent_id(self):
if self.is_authenticated():
if self._proxy.has_effective_agent():
return self._proxy.get_effective_agent_id()
else:
return self._proxy.get_authentication().get_agent_id()
else:
return Id(
identifier='<EMAIL>',
namespace='agent.Agent',
authority='MIT-OEIT')
effective_agent_id = property(fget=get_effective_agent_id)
def get_effective_agent(self):
# effective_agent_id = self.get_effective_agent_id()
# This may want to be extended to get the Agent directly from the Authentication
# if available and if not effective agent is available in the proxy
# return Agent(
# identifier=effective_agent_id.get_identifier(),
# namespace=effective_agent_id.get_namespace(),
# authority=effective_agent_id.get_authority())
raise Unimplemented()
effective_agent = property(fget=get_effective_agent)
def get_date(self):
raise Unimplemented()
date = property(fget=get_date)
def get_clock_rate(self):
raise Unimplemented()
clock_rate = property(fget=get_clock_rate)
def get_format_type(self):
raise Unimplemented()
format_type = property(fget=get_format_type)
def supports_transactions(self):
raise Unimplemented()
def start_transaction(self):
raise Unimplemented()
|
StarcoderdataPython
|
3341036
|
<reponame>ChrisGCampbell/FlyRight
from django.db import models
from django.utils import timezone
class Clearance(models.Model):
clearance_id = models.TextField(primary_key=True)
created_by = models.TextField()
state = models.TextField()
message = models.TextField()
date = models.DateTimeField(default=timezone.now)
def as_dict(self):
return {
"id": self.clearance_id,
"created_by": self.created_by,
"state": self.state,
"message": self.message,
"date": self.date.isoformat()
}
|
StarcoderdataPython
|
108665
|
def gcd(a, b):
while b: a, b = b, a % b
return a
def isPrimeMR(n):
d = n - 1
d = d // (d & -d)
L = [2]
for a in L:
t = d
y = pow(a, t, n)
if y == 1: continue
while y != n - 1:
y = (y * y) % n
if y == 1 or t == n - 1: return 0
t <<= 1
return 1
def findFactorRho(n):
m = 1 << n.bit_length() // 8
for c in range(1, 99):
f = lambda x: (x * x + c) % n
y, r, q, g = 2, 1, 1, 1
while g == 1:
x = y
for _ in range(r):
y = f(y)
k = 0
while k < r and g == 1:
ys = y
for _ in range(min(m, r - k)):
y = f(y)
q = q * abs(x - y) % n
g = gcd(q, n)
k += m
r <<= 1
if g == n:
g = 1
while g == 1:
ys = f(ys)
g = gcd(abs(x - ys), n)
if g < n:
if isPrimeMR(g): return g
elif isPrimeMR(n // g): return n // g
return findFactorRho(g)
def primeFactor(n):
i = 2
ret = {}
rhoFlg = 0
while i*i <= n:
k = 0
while n % i == 0:
n //= i
k += 1
if k: ret[i] = k
i += 1 + i % 2
if i == 101 and n >= 2 ** 20:
while n > 1:
if isPrimeMR(n):
ret[n], n = 1, 1
else:
rhoFlg = 1
j = findFactorRho(n)
k = 0
while n % j == 0:
n //= j
k += 1
ret[j] = k
if n > 1: ret[n] = 1
if rhoFlg: ret = {x: ret[x] for x in sorted(ret)}
return ret
if __name__ == "__main__":
print(primeFactor(1234567891012354))
|
StarcoderdataPython
|
3334143
|
#ind internal-node for-internal-using inherit-from-dict
#anl attr_name_list
import efuntool.efuntool as eftl
from eobj.primitive import *
import types
class Node(dict):
def __init__(self,anl,*args,**kwargs):
'''
nd = Node(['pl'],[['a','b']])
>>> nd.pl
['a', 'b']
nd = Node(['pl'])
>>> nd.pl
undefined
'''
args_lngth = len(args)
dfltl= [undefined]*len(anl) if(args_lngth == 0) else args[0]
eftl.self_kwargs(self,anl,dfltl,**kwargs)
def add_method_to_inst(inst,func,*args):
fname = eftl.optional_arg(func.__name__,*args)
f = types.MethodType(func,inst)
inst.__setattr__(fname,f)
return(inst)
def add_method_to_cls(cls,func,*args):
return(add_method_to_inst(cls,func,*args))
def shcmmn(ele,attrs):
for i in range(len(attrs)):
v = ele.__getattribute__(attrs[i])
print(attrs[i]," . ",v)
|
StarcoderdataPython
|
1725542
|
<reponame>hengchu/fuzzi-impl
import json
import numpy as np
import pkg_resources
import random
import re
import scipy.misc as m
import sys
from mnist import MNIST
from optparse import OptionParser
ORIG_SIZE = 28;
NEW_SIZE = 28;
NUM_PARTITIONS = 10;
#Total number of available samples, can set to less if we don't need all of them
#TRAINING_EXAMPLES = 12665;
TRAINING_EXAMPLES = 1000;
TEST_EXAMPLES = 100
# Resizes images down given a single vector for image
# Also scales values from -1 to 1
def resize(imageVector):
vec = np.array(imageVector)
a = np.resize(vec, (ORIG_SIZE, ORIG_SIZE))
new_a = m.imresize(a, (NEW_SIZE, NEW_SIZE))
new_vec = new_a.flatten()
l = new_vec.tolist()
scaled = [(l[i] -128) /128.0 for i in range(len(l))]
return scaled
def main():
mndata = MNIST(pkg_resources.resource_filename('fuzzi', 'data/MNIST'))
#Each row of images is 28x28 represented as one vector
#60,000 total examples
images, labels = mndata.load_training()
#5924 zero labels
zero_indices = [i for i, x in enumerate(labels) if x == 0]
#6724 one labels
one_indices = [i for i, x in enumerate(labels) if x == 1]
#Reconstructing the data (total 12665 samples now) - random assortment of 1's and 0's
all_indices = zero_indices + one_indices
random.shuffle(all_indices)
binary_images = [images[i] for i in all_indices]
binary_labels = [labels[i] for i in all_indices]
binary_labels = [binary_labels[i]*2.0 - 1 for i in range(len(binary_labels))]
#binary_images = [images[i] for i in zero_indices] + [images[i] for i in one_indices]
#binary_labels = [0]*len(zero_indices) + [1]*len(one_indices)
#Number of samples we will take (leq 12665)
N = TRAINING_EXAMPLES
smaller_images = []
rand = []
def preview(img):
"""
Render a image list into visible image
"""
img_data = np.array(img)
img_data = np.reshape(img_data, newshape=((NEW_SIZE, NEW_SIZE)))
plt.imshow(img_data, cmap=mpl.cm.Greys)
plt.show()
for i in range(N + TEST_EXAMPLES):
smaller_images.append(resize(binary_images[i]))
rand.append(random.randint(0, NUM_PARTITIONS-1))
# Uncomment this line to look at the pixels
# preview(smaller_images[101])
data = {}
#Create one array with images and labels
images_with_labels = [smaller_images[i] + [binary_labels[i]] for i in range(N)]
images_with_labels_test = [smaller_images[i] + [binary_labels[i]] for i in range(N, N+TEST_EXAMPLES)]
data['db'] = images_with_labels
data['db_test'] = images_with_labels_test
data['trow1'] = [1.0] + (NEW_SIZE * NEW_SIZE + 1)*[0.0]
data['db1'] = []
data['wout'] = (NEW_SIZE * NEW_SIZE + 1)*[0.0]
data['dws'] = []
data['dws_j'] = []
data['w'] = (NEW_SIZE * NEW_SIZE + 1)*[0.0]
data['w_total'] = []
data['j_sum'] = 0.0
data_json_str = json.dumps(data, indent=4)
data_json_str = re.sub(r'(\d),\s+', r'\1, ', data_json_str)
with open(pkg_resources.resource_filename('fuzzi',
'data/MNIST/mnist784.json'),
'w') as outfile:
outfile.write(data_json_str)
|
StarcoderdataPython
|
1648053
|
# The seek information for our encode
class Seek:
def __init__(self, source_file, ss, to, output_name):
self.source_file = source_file
self.ss = ss
self.to = to
self.output_name = output_name
# The seek string arguments for our encode
def get_seek_string(self):
if len(self.ss) > 0 and len(self.to) > 0:
return f'-ss {self.ss} -to {self.to} -i "{self.source_file.file}"'
elif len(self.ss) > 0:
return f'-ss {self.ss} -i "{self.source_file.file}"'
elif len(self.to) > 0:
return f'-i "{self.source_file.file}" -to {self.to}'
else:
return f'-i "{self.source_file.file}"'
|
StarcoderdataPython
|
1760301
|
import os
import re
import subprocess
from contextlib import contextmanager
from os.path import expandvars
from pathlib import Path
from typing import Optional, Union
import cpuinfo
import psutil
from codecarbon.external.logger import logger
def is_jetson():
return os.path.isdir('/sys/bus/i2c/drivers/ina3221x/0-0041/iio_device/')
@contextmanager
def suppress(*exceptions):
try:
yield
except exceptions:
logger.warning("graceful shutdown. Exceptions:")
logger.warning(
exceptions if len(exceptions) != 1 else exceptions[0], exc_info=True
)
logger.warning("stopping.")
pass
def resolve_path(path: Union[str, Path]) -> Path:
"""
Fully resolve a path:
resolve env vars ($HOME etc.) -> expand user (~) -> make absolute
Args:
path (Union[str, Path]): Path to a file or repository to resolve as
string or pathlib.Path
Returns:
pathlib.Path: resolved absolute path
"""
return Path(expandvars(str(path))).expanduser().resolve()
def backup(file_path: Union[str, Path], ext: Optional[str] = ".bak") -> None:
"""
Resolves the path to a path then backs it up, adding the extension provided.
Args:
file_path (Union[str, Path]): Path to a file to backup.
ext (Optional[str], optional): extension to append to the filename when
backing it up. Defaults to ".bak".
"""
file_path = resolve_path(file_path)
if not file_path.exists():
return
assert file_path.is_file()
idx = 0
parent = file_path.parent
file_name = f"{file_path.name}{ext}"
backup = parent / file_name
while backup.exists():
file_name = f"{file_path.name}_{idx}{ext}"
backup = parent / file_name
idx += 1
file_path.rename(backup)
def detect_cpu_model() -> str:
cpu_info = cpuinfo.get_cpu_info()
if cpu_info:
cpu_model_detected = cpu_info.get("brand_raw", "")
return cpu_model_detected
else:
return None
def count_cpus() -> int:
if os.environ.get("SLURM_JOB_ID") is None:
return psutil.cpu_count()
try:
scontrol = subprocess.check_output(
["scontrol show job $SLURM_JOBID"], shell=True
).decode()
except subprocess.CalledProcessError:
logger.warning(
"Error running `scontrol show job $SLURM_JOBID` "
+ "to count SLURM-available cpus. Using the machine's cpu count."
)
return psutil.cpu_count()
num_cpus_matches = re.findall(r"NumCPUs=\d+", scontrol)
if len(num_cpus_matches) == 0:
logger.warning(
"Could not find NumCPUs= after running `scontrol show job $SLURM_JOBID` "
+ "to count SLURM-available cpus. Using the machine's cpu count."
)
return psutil.cpu_count()
if len(num_cpus_matches) > 1:
logger.warning(
"Unexpected output after running `scontrol show job $SLURM_JOBID` "
+ "to count SLURM-available cpus. Using the machine's cpu count."
)
return psutil.cpu_count()
num_cpus = num_cpus_matches[0].replace("NumCPUs=", "")
return int(num_cpus)
|
StarcoderdataPython
|
191176
|
<filename>tankmonitor.py
from threading import Lock, Thread
from tornado.web import Application, RequestHandler, HTTPError
from tornado.httpserver import HTTPServer
from tornado.template import Template
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.gen import coroutine
from tornado.concurrent import run_on_executor
from sockjs.tornado import SockJSRouter, SockJSConnection
import logging
from tanklogger import TankLogger, TankLogRecord, TankAlert
from functools import partial
from datetime import datetime
from time import time
from serial import Serial
from email.mime.text import MIMEText
from concurrent.futures import ThreadPoolExecutor
import smtplib
import base64
import settings as appconfig
from PIL import Image, ImageDraw, ImageFont
import pcd8544.lcd as lcd
import netifaces as ni
import wiringpi2 as wiringpi
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
listen_port = 4242
disp_contrast_on = 0xB0
disp_contrast_off = 0x80
disp_font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf", 34)
disp_font_sm = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf", 9)
BTN_IN = 2 # wiringpi pin ID
BTN_OUT = 3 # wiringpi pin ID
VALVE_GPIO = 6 # wiringpi pin ID
thread_pool = ThreadPoolExecutor(2)
class EventConnection(SockJSConnection):
event_listeners = set()
def on_open(self, request):
self.event_listeners.add(self)
def on_close(self):
self.event_listeners.remove(self)
@classmethod
def notify_all(cls, msg_dict):
import json
for event_listener in EventConnection.event_listeners:
event_listener.send(json.dumps(msg_dict))
class MainPageHandler(RequestHandler):
def get(self, *args, **kwargs):
self.render('main.html')
logger_map = {
'10': 'tensec_logger',
'60': 'minute_logger',
'3600': 'hour_logger'
}
class LogDownloadHandler(RequestHandler):
def get(self, logger_interval):
fmt = self.get_argument('format', 'nvd3') # or tsv
deltas = self.get_argument('deltas', False)
logger = getattr(self.application, logger_map[logger_interval], None)
if logger:
records = logger.deltas if deltas else logger.records
if fmt == 'nvd3':
self.finish({'key': 'Tank Level',
'values': list(records)})
elif fmt == 'tsv':
self.set_header('Content-Type', 'text/plain')
if deltas:
self.write('"Timestamp"\t"Rate of Change (%s/min)"\n' % appconfig.LOG_UNIT)
else:
self.write('"Timestamp"\t"%s"\n' % appconfig.LOG_UNIT)
self.write_tsv(records)
self.finish()
def write_tsv(self, records):
for record in records:
timestamp = datetime.fromtimestamp(record.timestamp).strftime('%Y-%m-%d %H:%M:%S')
self.write(str(timestamp))
self.write('\t')
self.write(str(record.depth))
self.write('\n')
class ValveHandler(RequestHandler):
"""Callers can use the GET method to get the status of the creek intake valve and use the
POST method to toggle the status of the creek intake valve.
In both cases the response is a json dict like so:
{
"valve": 0,
"transition_time": "2015-03-18T12:00:12"
}
Indicating the current status of the valve: 0 means that the IO pin is low (the valve is
normally-open, so the valve will be open). 1 means that the IO pin is high and the valve is
closed. transition_time is the time of the most recent state change, in the server's time
zone, or null if the transition time is not known."""
_valve_state = False
_transition_time = None
def get(self, *args, **kwargs):
self.finish(ValveHandler.get_state())
def post(self, *args, **kwargs):
auth_header = self.request.headers.get('Authorization')
if auth_header is None or not auth_header.startswith('Basic '):
self.set_status(401, reason="Valve control requires authentication")
self.set_header('WWW-Authenticate', 'Basic realm=Restricted')
self.finish()
return
else:
auth_decoded = base64.decodestring(auth_header[6:])
hdr_auth = dict()
hdr_auth['username'], hdr_auth['password'] = auth_decoded.split(':', 2)
if hdr_auth != appconfig.CREDENTIALS:
raise HTTPError(403, reason="Valve control credentials invalid")
ValveHandler._valve_state = not ValveHandler._valve_state
ValveHandler._transition_time = datetime.now().isoformat()[:19]
wiringpi.digitalWrite(VALVE_GPIO, int(ValveHandler._valve_state))
self.finish(ValveHandler.get_state())
@staticmethod
def get_state():
return {
'valve': ValveHandler._valve_state,
'transition_time': ValveHandler._transition_time
}
class TankMonitor(Application):
def __init__(self, handlers=None, **settings):
super(TankMonitor, self).__init__(handlers, **settings)
rate_threshold = appconfig.ALERT_RATE_THRESHOLD
self.level_threshold = appconfig.ALERT_LEVEL_THRESHOLD
self.tensec_logger = TankLogger(10, alert_rate_threshold=rate_threshold)
self.minute_logger = TankLogger(60, alert_rate_threshold=rate_threshold)
self.hour_logger = TankLogger(3600, alert_rate_threshold=rate_threshold)
self.latest_raw_val = None
self.display_expiry = 0
def log_tank_depth(self, tank_depth):
"""This method can be called from outside the app's IOLoop. It's the
only method that can be called like that"""
log.debug("Logging depth: " + str(tank_depth))
IOLoop.current().add_callback(partial(self._offer_log_record, time(),
tank_depth))
@coroutine
def _offer_log_record(self, timestamp, depth):
log_record = TankLogRecord(timestamp=timestamp, depth=depth)
if depth < self.level_threshold:
yield AlertMailer.offer(TankAlert(timestamp=timestamp, depth=depth, delta=None))
for logger in self.tensec_logger, self.minute_logger, self.hour_logger:
alert = logger.offer(log_record)
if alert:
yield AlertMailer.offer(alert)
EventConnection.notify_all({
'event': 'log_value',
'timestamp': timestamp,
'value': depth
})
def update_display(self):
ip_addr = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr']
now = time()
if now < self.display_expiry:
im = Image.new('1', (84, 48))
draw = ImageDraw.Draw(im)
draw.text((0, 5), self.latest_raw_val, font=disp_font, fill=1)
draw.text((0, 0), ip_addr, font=disp_font_sm, fill=1)
draw.text((5, 36), "mm to surface", font=disp_font_sm, fill=1)
lcd.show_image(im)
# clean up
del draw
del im
lcd.set_contrast(disp_contrast_on)
else:
lcd.set_contrast(disp_contrast_off)
lcd.cls()
def poll_display_button(self):
btn_down = wiringpi.digitalRead(BTN_IN)
if btn_down:
self.display_expiry = time() + 60
def _set_latest_raw_val(self, val):
self.latest_raw_val = val
def set_latest_raw_val(self, val):
"""This method can be called from any thread."""
IOLoop.instance().add_callback(self._set_latest_raw_val, val)
class MaxbotixHandler():
def __init__(self, tank_monitor, **kwargs):
"""kwargs will be passed through to the serial port constructor"""
self.port_lock = Lock()
self.serial_port = None
self.set_serial_port(**kwargs)
self.stop_reading = False
self.tank_monitor = tank_monitor
self.calibrate_m = 1
self.calibrate_b = 0
def read(self):
log.info("Starting MaxbotixHandler read")
val = None
while not self.stop_reading:
try:
with self.port_lock:
val = self.serial_port.read()
if val == 'R':
val = self.serial_port.read(4)
self.tank_monitor.set_latest_raw_val(val)
self.tank_monitor.log_tank_depth(self.convert(val))
except:
print "Unable to convert value '" + str(val) + "'"
import traceback
traceback.print_exc()
def calibrate(self, m, b):
""" Defines the parameters for a linear equation y=mx+b, which is used
to convert the output of the sensor to whatever units are specified in the settings file.
"""
log.info("Calibrating Maxbotix interface with m=%2.4f, b=%2.4f" % (m, b))
self.calibrate_m = float(m)
self.calibrate_b = float(b)
def convert(self, val):
converted = self.calibrate_m * float(val) + self.calibrate_b
if log.isEnabledFor(logging.DEBUG):
log.debug("Raw value %2.4f converted to %2.4f" % (float(val), converted))
return converted
def shutdown(self):
self.stop_reading = True
def set_serial_port(self, **kwargs):
with self.port_lock:
self.serial_port = Serial(**kwargs)
class AlertMailer(object):
last_alert = None
alert_mail = Template(open('templates/tanklevel.txt', 'rb').read())
@staticmethod
def send_message(alert_text, tank_alert):
msg = MIMEText(alert_text)
msg[
'Subject'] = "[TWUC Alert] Tank Level Warning" if not tank_alert.delta else "[TWUC Alert] Tank Delta Warning"
msg['From'] = appconfig.EMAIL['sending_address']
msg['To'] = ', '.join(appconfig.EMAIL['distribution'])
conn = None
try:
conn = smtplib.SMTP(
"%s:%d" % (appconfig.EMAIL['smtp_server'], appconfig.EMAIL['smtp_port']))
if appconfig.EMAIL['smtp_tls']:
conn.starttls()
conn.login(appconfig.EMAIL['sending_address'], appconfig.EMAIL['sending_password'])
conn.sendmail(appconfig.EMAIL['sending_address'], appconfig.EMAIL['distribution'],
msg.as_string())
finally:
if conn:
conn.quit()
@staticmethod
@coroutine
def offer(tank_alert):
offer_time = time()
if AlertMailer.last_alert is None or \
(offer_time - AlertMailer.last_alert) > appconfig.EMAIL['period']:
alert_text = AlertMailer.alert_mail.generate(alert=tank_alert)
log.warn("Sending e-mail alert due to " + str(tank_alert))
log.warn(alert_text)
AlertMailer.last_alert = offer_time
yield thread_pool.submit(lambda: AlertMailer.send_message(alert_text, tank_alert))
if __name__ == "__main__":
event_router = SockJSRouter(EventConnection, '/event')
handlers = [
(r'/', MainPageHandler),
(r'/logger/(.*)', LogDownloadHandler), # arg is log interval
(r'/valve', ValveHandler)
]
handlers += event_router.urls
tornado_settings = {
'static_path': 'static',
'template_path': 'templates',
'debug': True
}
lcd.init()
lcd.gotoxy(0, 0)
lcd.set_contrast(disp_contrast_on)
lcd.cls()
lcd.text("LCD Init")
wiringpi.pinMode(BTN_OUT, 1)
wiringpi.digitalWrite(BTN_OUT, 1)
wiringpi.pinMode(VALVE_GPIO, 1)
wiringpi.digitalWrite(VALVE_GPIO, 0)
wiringpi.pinMode(BTN_IN, 0)
app = TankMonitor(handlers, **tornado_settings)
maxbotix = MaxbotixHandler(tank_monitor=app, port='/dev/ttyAMA0', timeout=10)
maxbotix.calibrate(appconfig.MAXBOTICS['calibrate_m'],
appconfig.MAXBOTICS['calibrate_b'])
ioloop = IOLoop.instance()
disp_print_cb = PeriodicCallback(app.update_display, callback_time=500, io_loop=ioloop)
disp_print_cb.start()
button_poll_cb = PeriodicCallback(app.poll_display_button, callback_time=100, io_loop=ioloop)
button_poll_cb.start()
http_server = HTTPServer(app)
http_server.listen(listen_port)
log.info("Listening on port " + str(listen_port))
maxbotix_thread = Thread(target=maxbotix.read)
maxbotix_thread.daemon = True
maxbotix_thread.start()
ioloop.start()
|
StarcoderdataPython
|
3328618
|
<filename>examples/polygon_plot.py
#!/usr/bin/env python
# This script plots a polygon created from points.
import pdb
import sys
import warnings
import numpy as np
from cornish import ASTPolygon
from cornish import ASTICRSFrame, ASTFrameSet, ASTBox, ASTFITSChannel, ASTCircle, ASTCompoundRegion
import astropy.units as u
from astropy.io import fits
import matplotlib.pyplot as plt
import starlink.Grf as Grf
import starlink.Ast as Ast
import starlink.Atl as Atl
points = np.array([[ 24.9220814, -2.32553877e-01],
[ 24.8690619, -2.13198227e-01],
[ 24.8080071, -1.56379062e-01],
[ 24.7961841, -1.32038075e-01],
[ 24.7603950, -3.85297093e-02],
[ 24.7542463, 1.17204538e-02],
[ 24.7542463, 1.17204538e-02],
[ 24.7769168, 8.05598701e-02],
[ 24.8216773, 1.66088007e-01],
[ 24.8332202, 1.83192204e-01],
[ 24.8724133, 2.11948177e-01],
[ 24.9190898, 2.36432081e-01],
[ 25.0443598, 2.38795506e-01],
[ 25.0694520, 2.34736352e-01],
[ 25.1083355, 2.26095876e-01],
[ 25.1263480, 2.15632984e-01],
[ 25.1730281, 1.80042404e-01],
[ 25.2055145, 1.40829694e-01],
[ 25.2459929, 1.54015514e-02],
[ 25.2459929, 1.54015514e-02],
[ 25.2426565, -3.86550958e-02],
[ 25.2219908, -9.67569213e-02],
[ 25.1986233, -1.49820068e-01],
[ 25.0872686, -2.32297073e-01]])
icrs_frame = ASTICRSFrame()
polygon = ASTPolygon(frame=icrs_frame, points=points)
galex_circle = ASTCircle(frame=icrs_frame, center=[24.617269485878584,0.2727299618460874], radius=1.1312250143591236)
compound_region = ASTCompoundRegion(regions=[polygon, galex_circle], operation=Ast.AND)
# define the extend of the plot
#bounding_circle = polygon.boundingCircle()
bounding_circle = compound_region.boundingCircle()
# -------------------------------------------------------
# Create frame set that will map the position in the plot
# (i.e. pixel coordinates) to the sky (i.e. WCS)
fits_chan = ASTFITSChannel()
cards = {
"CRVAL1":bounding_circle.center[0], # reference point (image center) in sky coords
"CRVAL2":bounding_circle.center[1],
"CTYPE1":"RA---TAN", #"GLON-TAN", # projection type
"CTYPE2":"DEC--TAN", #"GLAT-TAN",
"CRPIX1":50.5, # reference point (image center) point in pixel coords
"CRPIX2":50.5,
"CDELT1":2.1*bounding_circle.radius.to_value(u.deg)/100,
"CDELT2":2.1*bounding_circle.radius.to_value(u.deg)/100,
"NAXIS1":100,
"NAXIS2":100,
"NAXES":2,
}
print(cards)
naxis1 = cards['NAXIS1']
naxis2 = cards['NAXIS2']
pix2sky_mapping = ASTFrameSet.fromFITSHeader(fits_header=cards)
# -------------------------------------------------------
#pix2sky_mapping.system = "Galactic"
print(bounding_circle.center)
# Create a matplotlib figure, 12x12 inches in size.
dx=12.0
dy=12.0
fig = plt.figure( figsize=(dx,dy) )
fig_aspect_ratio = dy/dx
# Set up the bounding box of the image in pixel coordinates, and get
# the aspect ratio of the image.
naxis1 = int(cards["NAXIS1"])
naxis2 = int(cards["NAXIS2"])
bbox = (0.5, 0.5, naxis1 + 0.5, naxis2 + 0.5)
fits_aspect_ratio = ( bbox[3] - bbox[1] )/( bbox[2] - bbox[0] )
#fits_aspect_ratio = 1
# Set up the bounding box of the image as fractional offsets within the
# figure. The hx and hy variables hold the horizontal and vertical half
# widths of the image, as fractions of the width and height of the figure.
# Shrink the image area by a factor of 0.7 to leave room for annotated axes.
if fig_aspect_ratio > fits_aspect_ratio :
hx = 0.5
hy = 0.5*fits_aspect_ratio/fig_aspect_ratio
else:
hx = 0.5*fig_aspect_ratio/fits_aspect_ratio
hy = 0.5
hx *= 0.7
hy *= 0.7
gbox = ( 0.5 - hx, 0.5 - hy, 0.5 + hx, 0.5 + hy )
# Add an Axes structure to the figure and display the image within it,
# scaled between data values zero and 100. Suppress the axes as we will
# be using AST to create axes.
ax_image = fig.add_axes( [ gbox[0], gbox[1], gbox[2] - gbox[0],
gbox[3] - gbox[1] ], zorder=1 )
ax_image.xaxis.set_visible( False )
ax_image.yaxis.set_visible( False )
#ax_image.imshow( hdu_list[0].data, vmin=0, vmax=200, cmap=plt.cm.gist_heat,
# origin='lower', aspect='auto')
# Add another Axes structure to the figure to hold the annotated axes
# produced by AST. It is displayed on top of the previous Axes
# structure. Make it transparent so that the image will show through.
ax_plot = fig.add_axes( [ 0, 0, 1, 1 ], zorder=2 )
ax_plot.xaxis.set_visible(False)
ax_plot.yaxis.set_visible(False)
ax_plot.patch.set_alpha(0.0)
# Create a drawing object that knows how to draw primitives
# (lines, marks and strings) into this second Axes structure.
grf = Grf.grf_matplotlib( ax_plot )
#print(f"gbox: {gbox}")
#print(f"bbox: {bbox}")
# box in graphics coordinates (area to draw on, dim of plot)
#plot = Ast.Plot( frameset.astObject, gbox, bbox, grf )
plot = Ast.Plot( pix2sky_mapping.astObject, gbox, bbox, grf, options="Uni1=ddd:mm:ss" )
#, options="Grid=1" )
#plot.set( "Colour(border)=2, Font(textlab)=3" );
plot.Grid = True # can change the line properties
plot.Format_1 = "dms"
# colors:
# 1 = black
# 2 = red
# 3 = lime
# 4 = blue
# 5 =
# 6 = pink
plot.grid()
plot.Width_Border = 2
#plot.Colour_Border = "#0099cc"
#plot.regionoutline(bounding_circle.astObject)
plot.Colour_Border = "#106942"
plot.regionoutline(polygon.astObject)
plot.Colour_Border = "blue"
plot.regionoutline(galex_circle.astObject)
#plt.plot(galex_circle.center[0],galex_circle.center[1],'ro')
plot.Colour_Border = "red"
plot.Style = 3
plot.regionoutline(bounding_circle.astObject)
plt.show()
|
StarcoderdataPython
|
27337
|
<reponame>luceatnobis/yt_handle
#!/usr/bin/env python3
from __future__ import print_function
import os
import sys
import shutil
import httplib2
import oauth2client
try:
import apiclient as googleapiclient
except ImportError:
import googleapiclient
from oauth2client.file import Storage, Credentials
from oauth2client.client import flow_from_clientsecrets
CS = "client_secrets.json"
CREDS = "credentials.json"
YOUTUBE_DATA_ROOT = '~/.youtube'
YOUTUBE_READ_WRITE_SSL_SCOPE = (
"https://www.googleapis.com/auth/youtube.force-ssl")
def return_handle(id_name):
identity_root = os.path.expanduser(YOUTUBE_DATA_ROOT)
identity_folder = os.path.join(identity_root, id_name)
if not os.path.exists(identity_folder):
n = input("Identity %s is not known; create it? [Y|n] " % id_name)
if not n or n.lower().startswith('y'):
create_identity(id_name)
else:
sys.exit()
identity = _retrieve_files(identity_folder)
c = Credentials().new_from_json(identity['credentials'])
handle = c.authorize(http=httplib2.Http())
return googleapiclient.discovery.build(
"youtube", "v3", http=handle)
def create_identity(id_name, cs_location=None):
if cs_location is None:
n = input("Please specify the location of the client_secrets file: ")
cs_location = os.path.abspath(os.path.expanduser(n))
if os.path.isdir(cs_location):
cs_location = os.path.join(cs_location, CS)
identity_root = os.path.expanduser(YOUTUBE_DATA_ROOT)
identity_folder = os.path.join(identity_root, id_name)
if os.path.exists(identity_folder):
return
id_cs_location = os.path.join(identity_root, id_name, CS)
id_cred_location = os.path.join(identity_root, id_name, CREDS)
storage = Storage(id_cred_location)
credentials = storage.get()
if credentials and not credentials.invalid:
return credentials # credentials exist
flow = flow_from_clientsecrets(
cs_location, scope=YOUTUBE_READ_WRITE_SSL_SCOPE)
flow.redirect_uri = oauth2client.client.OOB_CALLBACK_URN
authorize_url = flow.step1_get_authorize_url()
code = _console_auth(authorize_url)
if code:
credential = flow.step2_exchange(code, http=None)
os.makedirs(identity_folder)
storage.put(credential)
credential.set_store(storage)
shutil.copyfile(cs_location, id_cs_location)
return credential
else:
print("Invalid input, exiting", file=sys.stderr)
sys.exit()
def _console_auth(authorize_url):
"""Show authorization URL and return the code the user wrote."""
message = "Check this link in your browser: {0}".format(authorize_url)
sys.stderr.write(message + "\n")
try:
input = raw_input # For Python2 compatability
except NameError:
# For Python3 on Windows compatability
try:
from builtins import input as input
except ImportError:
pass
return input("Enter verification code: ")
def _retrieve_files(folder):
cs_f = os.path.join(folder, CS)
creds_f = os.path.join(folder, CREDS)
with open(cs_f) as sec, open(creds_f) as cred:
secrets = sec.read()
credentials = cred.read()
return dict(secrets=secrets, credentials=credentials)
|
StarcoderdataPython
|
3314244
|
class cc_language:
cc_wrong_arguments = "[USER_ID] you must have forgotten the arguments?"
cc_wrong_game_command = "[USER_ID] you must have mistyped?"
cc_shutdown_bot = "Shut down bot..."
cc_game_already_running = "[USER_ID] there is already a game running. Please wait until this one is over."
cc_cards_per_player_set_to = "Each player gets [COUNTER] cards."
cc_no_game_running = "[USER_ID] currently no game running!"
cc_user_already_joined = "[USER_ID] you have already joined the game! Wait until it starts..."
cc_user_joined_game = "[USER_ID] you have joined the game. [[PLAYER_JOINED_COUNT] player]"
cc_more_players_needed = "[USER_ID] more players are needed for a start! At least [MIN_PLAYER_COUNT] players are needed for a start."
cc_user_started_game = "[USER_NAME] has started a new game..."
cc_user_not_part = "[USER_ID] you are not a player in the current round! Wait until this round is finished and then join a new round."
cc_player_won = "#########################\n\n🏆 **[USER_NAME]** has won the game. 🏆"
cc_user_leave_no_part = "[USER_ID] you are not a player in the current round!"
cc_game_end_because_user_left = "The game was ended prematurely because [USER_NAME] left the game and now there are not enough players."
cc_user_left = "[USER_NAME] has left the game."
cc_user_cant_leave_his_turn = "[USER_ID] you can't leave the game right now! Place another card first. After that you can leave the game."
cc_user_no_turn = "[USER_ID] please wait your turn."
cc_card_not_exist = "[USER_ID] this card does not exist! Choose another one."
cc_user_cant_lay_card = "[USER_ID] you can't lay this card! Choose another one."
cc_user_your_turn = "[USER_ID] is now on the move."
cc_wish_without_color = "[USER_ID] you forgot the color."
cc_wish_unknown_color = "[USER_ID] the desired color does not exist."
cc_input_only_numbers = "[USER_ID] you must use a card number and no letters or special characters!"
cc_input_no_number_arg = "[USER_ID] please provide a card number."
cc_game_stopped_by = "The game was terminated prematurely by [USER_NAME]!"
cc_game_cant_stopped = "[USER_ID] you can't finish the game because you're not playing."
cc_game_player_has_cc = "\n**[PLAYER_NAME]** has only one card left! **#CC**"
cc_game_player_can_lay = " it's your turn to lay.\nLay one of your cards with '!card X'."
cc_game_player_cant_lay = " it's your turn and you can't lay.\nPick up a new card with '!getnewcard'."
cc_please_choose_wish_color_react = "[USER_ID] please select a color via the Reactions."
cc_please_choose_card_color_react = "[USER_ID] please select a color via the Reactions. After that you will be able to choose one of the cards of that color from your hand."
cc_please_choose_card_num_react = "[USER_ID] please select a card via the Reactions. Back to the color selection? Use the X"
cc_false_choose_color_react = "[USER_ID] this color does not exist!"
cc_false_choose_number_react = "[USER_ID] you have no card of this color with this number!"
cc_no_kick_user = "[USER_ID] the player was not found."
cc_kick_user_isnt_player = "[USER_ID] the user does not play at all."
cc_cant_kick_current_player = "[USER_ID] the player cannot be removed right now because he has to explain terms. Try again when he doesn't have to explain any more terms."
cc_user_kicked = "The player was successfully removed from the game."
cc_suspend_player_cant_lay_direct_chat = " you'll have to sit this round out, unfortunately, since you can't counter. Wait a moment. We'll continue in a moment..."
cc_suspend_player_cant_lay = "You'll have to sit this round out, unfortunately, since you can't counter. Wait a moment. We'll continue in a moment..."
cc_suspend_player_false_card = "[USER_ID] you have laid the wrong card. Play a sitout card to counter or sit out with '!sitout'!"
cc_suspend_player_must_counter = " Play a sitout card to counter or sit out with '!sitout'!"
cc_suspend_player_counter_cant_get_new_cards = "[USER_ID] you can't pick up cards! Play a sitout card to counter or sit out with '!sitout'."
cc_suspend_player_cant_get_new_cards = "[USER_ID] you can't pick up cards! Wait a moment. We'll be right back..."
cc_suspend_player_want_sit_out = "[USER_ID] you are now sitting out."
cc_suspend_player_cant_sit_out = "[USER_ID] you can't sit out!"
cc_suspend_player_cant_skip = "[USER_ID] you're already sitting out! Wait a moment until it continues..."
cc_plus_card_player_can_lay = " counter the plus card by also laying one, or take the [PLUS_AMOUNT] cards with '!take'."
cc_plus_card_player_cant_lay = " you can't counter and pick up the [PLUS_AMOUNT] cards. Wait a moment. We'll continue in a moment..."
cc_plus_card_player_lay_false_card = "[USER_ID] counter the plus card by also laying one, or take the [PLUS_AMOUNT] cards with '!take'."
cc_plus_card_player_cant_lay_false_card = "[USER_ID] you can't counter and pick up the [PLUS_AMOUNT] cards. Wait a moment. We'll continue in a moment..."
cc_plus_card_player_cant_take = "[USER_ID] there are no plus cards you can take!"
cc_plus_card_player_take = "[USER_ID] you have recorded the plus cards."
cc_plus_card_player_cant_skip = "[USER_ID] you have already automatically recorded the plus cards. Wait a moment until it goes further..."
cc_plus_card_player_cant_get_new_cards = "[USER_ID] you cannot pick up any cards! Counter the plus card by laying one as well, or pick up the [PLUS_AMOUNT] cards with '!take'."
cc_plus_card_player_counter_cant_get_new_cards = "[USER_ID] you cannot pick up any cards! Wait a moment. We'll be right back..."
#Generate card-str
cc_timer_action_sit_out = " had to sit out."
cc_timer_action_take_plus_cards = " has picked up the cards."
cc_your_cards = "Your cards"
cc_current_mid_card = "Current deck of cards"
cc_player_sequence = "Round sequence"
cc_players_turn = " is now on the move."
cc_player_laid_card = "put this card"
cc_player_picked_up_card = "has picked up a card"
#Voice
cc_voice_players_turn = "[USER_NAME] is now on the move."
cc_voice_player_won = "[USER_NAME] has won the game!"
cc_voice_player_sit_out = "[USER_NAME] must sit out this round."
|
StarcoderdataPython
|
3310558
|
# runs BF on data and saves the best RPN expressions in results.dat
# all the .dat files are created after I run this script
# the .scr are needed to run the fortran code
import csv
import os
import shutil
import subprocess
import sys
from subprocess import call
import numpy as np
import sympy as sp
from sympy.parsing.sympy_parser import parse_expr
from resources import _get_resource
def brute_force_number(filename):
try_time = 2
file_type = "10ops.txt"
try:
os.remove("results.dat")
os.remove("brute_solutions.dat")
os.remove("brute_formulas.dat")
except:
pass
print("Trying to solve mysteries with brute force...")
print("Trying to solve {}".format(filename))
shutil.copy2(filename, "mystery.dat")
data = "'{}' '{}' mystery.dat results.dat".format(_get_resource(file_type),
_get_resource("arity2templates.txt"))
with open("args.dat", 'w') as f:
f.write(data)
try:
subprocess.call(["feynman_sr1"], timeout=try_time)
except:
pass
return 1
|
StarcoderdataPython
|
3261304
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
import unicodedata
import codecs
import csv
import cPickle as pickle
import csv
fin = codecs.open("olam-enml.csv", "rb", "utf-8")
malayalam_dict = dict()
pre_data = ""
definition = ""
a=0
for row in fin:
a+=1
print a
data = row.split('\t')
data[3] = data[3].replace('\r',";")
if data[1] != pre_data:
malayalam_dict[data[1].lower()] = data[3]
definition = ""
else:
malayalam_dict[data[1].lower()] += data[3]
pre_data = data[1]
for item in malayalam_dict:
malayalam_dict[item] = malayalam_dict[item].replace('\n',';')
malayalam_dict[item] = malayalam_dict[item].replace(';;',';')
splitml = malayalam_dict[item].split(";")
uniqml = set(splitml)
listml = list(uniqml)
malayalam_dict[item] = ";".join(listml)
malayalam_dict[item] = malayalam_dict[item].replace(';;',';')
if malayalam_dict[item][0] == ";":
malayalam_dict[item] = malayalam_dict[item][1:]
if malayalam_dict[item][len(malayalam_dict[item])-1] == ";":
malayalam_dict[item] = malayalam_dict[item][:-1]
out1 = codecs.open("database.dat", "wb", "utf-8")
out2 = codecs.open("pickledatabase.dat", "wb")
pickle.dump(malayalam_dict, out2)
for key in malayalam_dict:
out1.write("%s\t%s\n" % (key,malayalam_dict[key]))
out1.close()
out2.close()
fin.close()
|
StarcoderdataPython
|
73848
|
<gh_stars>0
from django.urls import path
from .views import IndexView
app_name = 'home'
urlpatterns = [
path('', IndexView.as_view(), name='indexView')
]
|
StarcoderdataPython
|
3395936
|
<reponame>Dloar/stocks_games
from datetime import datetime
cur_time = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
print("Currently is " + cur_time)
|
StarcoderdataPython
|
1617506
|
#!/usr/bin/env python3
"""Squid helper for authenticating basic auth against bcrypt hashes.
See Authenticator > Basic Scheme here:
https://wiki.squid-cache.org/Features/AddonHelpers
Designed to work with bcrypt hash files created with htpasswd:
EXAMPLE: htpasswd -cbB -C 10 /path/to/password_file username password
This program loads the password file content into memory based on the
assumption the underlying host is ephemeral and the password file is
populated when the host is bootstrapped.
"""
import sys
import bcrypt
def load_hashes_to_memory(filename: str) -> dict:
"""Return dictionary of usernames and bcrypt hashes.
Ex: {'myusername': <PASSWORD>'}
"""
password_kv = {}
with open(filename, 'r') as f:
for line in f:
sections = line.strip().split(':')
try:
user = sections[0].strip().lower()
hash = sections[1].strip()
except IndexError:
raise RuntimeError("password file has invalid content")
else:
password_kv[user] = hash
return password_kv
def write_stdout(response: str) -> None:
"""Write to stdout and flush. Make sure one and
only one newline exists before writing."""
response = response.strip()
sys.stdout.write(f'{response}\n')
sys.stdout.flush()
def run_loop(password_kv: dict) -> None:
"""Validate username and passwords from the squid proxy
using bcrypt."""
while True:
try:
line = sys.stdin.readline()
line = line.strip()
if line == '':
write_stdout('BH message="empty line from proxy')
continue
parts = line.split(' ', 1) # setting maxsplit to 1 makes sure we handle passwords with spaces in them
try:
username = parts[0].strip()
password = parts[1].strip()
except IndexError:
write_stdout('BH message="stdin message invalid format')
continue
password_hash = password_kv.get(username.lower(), None)
if password_hash is None:
write_stdout('ERR message="invalid credentials')
continue
authenticated = bcrypt.checkpw(password.encode('utf-8'), password_hash.encode('utf-8'))
if authenticated:
write_stdout('OK')
continue
write_stdout('ERR message="invalid credentials"')
continue
except Exception:
write_stdout('BH message="unknown error"')
continue
def main():
"""Load hashes from file into memory and start the
bcrypt validation service."""
password_file = sys.argv[1]
user_hash_kv = load_hashes_to_memory(password_file)
run_loop(user_hash_kv)
if __name__ == "__main__":
main()
exit(0)
|
StarcoderdataPython
|
870
|
import enum
from typing import Union
@enum.unique
class PPT(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/powerpoint.ppsaveasfiletype
AnimatedGIF = 40
BMP = 19
Default = 11
EMF = 23
External = 64000
GIF = 16
JPG = 17
META = 15
MP4 = 39
OpenPresentation = 35
PDF = 32
PNG = 18
Presentation = 1
RTF = 6
SHOW = 7
Template = 5
TIF = 21
WMV = 37
XPS = 33
app = 'Powerpoint.Application'
extensions = ('.ppt', '.pptx')
@enum.unique
class WORD(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/word.wdsaveformat
DosText = 4
DosTextLineBreaks = 5
FilteredHTML = 10
FlatXML = 19
OpenDocumentText = 23
HTML = 8
RTF = 6
Template = 1
Text = 2
TextLineBreaks = 3
UnicodeText = 7
WebArchive = 9
XML = 11
Document97 = 0
DocumentDefault = 16
PDF = 17
XPS = 18
app = 'Word.Application'
extensions = ('.doc', '.docx')
@enum.unique
class XL(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/excel.xlfixedformattype
# TODO: Implement "SaveAs" methods, see: https://docs.microsoft.com/en-us/office/vba/api/excel.workbook.saveas
PDF = 0
XPS = 1
app = 'Excel.Application'
extensions = ('.xls', '.xlsx')
enum_types = Union[PPT, WORD, XL]
|
StarcoderdataPython
|
1657292
|
"""
np_rw_buffer.buffer
SeaLandAire Technologies
@author: jengel
Numpy circular buffer to help store audio data.
"""
import numpy as np
import threading
from .utils import make_thread_safe
from .circular_indexes import get_indexes
__all__ = ["UnderflowError", "get_shape_columns", "get_shape", "reshape", "RingBuffer", "RingBufferThreadSafe"]
UnderflowError = ValueError
def get_shape(shape):
"""Return rows, columns for the shape."""
try:
return (shape[0], shape[1]) + shape[2:]
except IndexError:
return (shape[0], 0) + shape[2:]
except TypeError:
return int(shape), 0
# get_shape
def get_shape_columns(shape):
"""Return the number of columns for the shape."""
try:
return shape[1]
except (IndexError, TypeError):
return 0
# end get_shape_columns
def reshape(ring_buffer, shape):
"""Safely reshape the data.
Args:
ring_buffer (RingBuffer/np.ndarray/np.array): Array to reshape
shape (tuple): New shape
"""
try:
buffer = ring_buffer._data
except AttributeError:
buffer = ring_buffer
new_shape = get_shape(shape)
myshape = get_shape(buffer.shape)
if new_shape[1] == 0:
new_shape = (new_shape[0], 1) + new_shape[2:]
if new_shape[0] == -1:
try: # Only change the column shape
buffer.shape = new_shape
except ValueError: # Change the entire array shape
rows = int(np.ceil(myshape[0]/new_shape[1]))
new_shape = (rows, ) + new_shape[1:]
buffer.resize(new_shape, refcheck=False)
else:
# Force proper sizing
buffer.resize(new_shape, refcheck=False)
# Clear the buffer if it did anything but grow in length
# if not (new_shape[0] > myshape[0] and new_shape[1:] == myshape[1:]):
try:
ring_buffer.clear()
except AttributeError:
pass
def format_write_data(data, mydtype):
"""Format the given data to the proper shape that can be written into this buffer."""
try:
len(data) # Raise TypeError if no len
dshape = data.shape
except TypeError:
# Data has no length
data = np.asarray(data, dtype=mydtype)
dshape = data.shape
except AttributeError:
# Data is not a numpy array
data = np.asarray(data, dtype=mydtype)
dshape = data.shape
# Force at least 1 column
if get_shape_columns(dshape) == 0:
data = np.reshape(data, (-1, 1))
dshape = data.shape
return data, dshape
# end format_write_data
class RingBuffer(object):
"""Numpy circular buffer to help store audio data.
Args:
shape (tuple/int): Length of the buffer.
columns (int)[1]: Columns for the buffer.
dtype (numpy.dtype)[numpy.float32]: Numpy data type for the buffer.
"""
def __init__(self, shape, columns=None, dtype=np.float32):
self._start = 0
self._end = 0
self._length = 0
self.lock = threading.RLock()
# Configure the shape (check if the given length was really the shape)
if isinstance(shape, (tuple, list)):
shape = shape
if columns is not None and columns > 0:
shape = (shape[0], columns) + shape[2:]
else:
if columns is None:
columns = 1
shape = (shape, columns)
# Force columns
if get_shape_columns(shape) == 0:
shape = (shape[0], 1) + shape[2:]
# Create the data buffer
shape = tuple((int(np.ceil(i)) for i in shape))
self._data = np.zeros(shape=shape, dtype=dtype)
# end constructor
def clear(self):
"""Clear the data."""
self._start = 0
self._end = 0
self._length = 0
# end clear
def get_data(self):
"""Return the data in the buffer without moving the start pointer."""
idxs = self.get_indexes(self._start, self._length, self.maxsize)
return self._data[idxs].copy()
# end get_data
def set_data(self, data):
"""Set the data."""
self.dtype = data.dtype
self.shape = data.shape
self.clear()
self.expanding_write(data)
# end set_data
def _write(self, data, length, error, move_start=True):
"""Actually write the data to the numpy array.
Args:
data (np.array/np.ndarray): Numpy array of data to write. This should already be in the correct format.
length (int): Length of data to write. (This argument needs to be here for error purposes).
error (bool): Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
move_start (bool)[True]: If error is false should overrun occur or should the start pointer move.
Raises:
OverflowError: If error is True and more data is being written then there is space available.
"""
idxs = self.get_indexes(self._end, length, self.maxsize)
self.move_end(length, error, move_start)
self._data[idxs] = data
def expanding_write(self, data, error=True):
"""Write data into the buffer. If the data is larger than the buffer expand the buffer.
Args:
data (numpy.array): Data to write into the buffer.
error (bool)[True]: Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
Raises:
ValueError: If data shape does not match this shape. Arrays without a column will be convert to 1 column
Example: (5,) will become (5, 1) and will not error if there is 1 column
OverflowError: If the written data will overflow the buffer.
"""
data, shape = format_write_data(data, self.dtype)
length = shape[0]
if shape[1:] != self.shape[1:]:
msg = "could not broadcast input array from shape {:s} into shape {:s}".format(str(shape), str(self.shape))
raise ValueError(msg)
elif length > self.maxsize:
self.shape = (length, ) + self.shape[1:]
self._write(data, length, error)
# end expanding_write
def growing_write(self, data):
"""Write data into the buffer. If there is not enough available space then grow the buffer.
Args:
data (numpy.array): Data to write into the buffer.
Raises:
ValueError: If data shape does not match this shape. Arrays without a column will be convert to 1 column
Example: (5,) will become (5, 1) and will not error if there is 1 column
OverflowError: If the written data will overflow the buffer.
"""
data, shape = format_write_data(data, self.dtype)
length = shape[0]
available = self.get_available_space()
if shape[1:] != self.shape[1:]:
msg = "could not broadcast input array from shape {:s} into shape {:s}".format(str(shape), str(self.shape))
raise ValueError(msg)
elif length > available:
# Keep the old data and reshape
old_data = self.get_data()
self.shape = (self.maxsize + (length - available),) + self.shape[1:]
if len(old_data) > 0:
self._write(old_data, len(old_data), False)
self._write(data, length, error=True)
# end expanding_write
def write_value(self, value, length, error=True, move_start=True):
"""Write a value into the buffer for the given length. This is more efficient then creating and writing an array of a single value.
Args:
value (int/float/object): Value to put in the buffer
length (int): Number of blank samples
error (bool)[True]: Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
move_start (bool)[True]: If error is false should overrun occur or should the start pointer move.
Raises:
OverflowError: If the written data will overflow the buffer.
"""
if not error and length > self.maxsize:
length = self.maxsize
idxs = self.get_indexes(self._end, length, self.maxsize)
self.move_end(length, error, move_start)
self._data[idxs] = value
def write_zeros(self, length, error=True, move_start=True):
"""Write zeros into the buffer for the specified length.
Args:
length (int): Number of blank samples
error (bool)[True]: Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
move_start (bool)[True]: If error is false should overrun occur or should the start pointer move.
Raises:
OverflowError: If the written data will overflow the buffer.
"""
self.write_value(0, length, error=error, move_start=move_start)
def write(self, data, error=True):
"""Write data into the buffer.
Args:
data (numpy.array): Data to write into the buffer.
error (bool)[True]: Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
Raises:
ValueError: If data shape does not match this shape. Arrays without a column will be convert to 1 column
Example: (5,) will become (5, 1) and will not error if there is 1 column
OverflowError: If the written data will overflow the buffer.
"""
data, shape = format_write_data(data, self.dtype)
length = shape[0]
if shape[1:] != self.shape[1:]:
msg = "could not broadcast input array from shape {:s} into shape {:s}".format(str(shape), str(self.shape))
raise ValueError(msg)
elif not error and length > self.maxsize:
data = data[-self.maxsize:]
length = self.maxsize
self._write(data, length, error)
# end write
def read(self, amount=None):
"""Read the data and move the start/read pointer, so that data is not read again.
This method reads empty if the amount specified is greater than the amount in the buffer.
Args:
amount (int)[None]: Amount of data to read
"""
if amount is None:
amount = self._length
# Check available read size
if amount == 0 or amount > self._length:
return self._data[0:0].copy()
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(amount)
return self._data[idxs].copy()
# end read
def read_remaining(self, amount=None):
"""Read the data and move the start/read pointer, so that the data is not read again.
This method reads the remaining data if the amount specified is greater than the amount in the buffer.
Args:
amount (int)[None]: Amount of data to read
"""
if amount is None or amount > self._length:
amount = self._length
# Check available read size
if amount == 0:
return self._data[0:0].copy()
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(amount)
return self._data[idxs].copy()
# end read_remaining
def read_overlap(self, amount=None, increment=None):
"""Read the data and move the start/read pointer.
This method only increments the start/read pointer the given increment amount. This way the same data can be
read multiple times.
This method reads empty if the amount specified is greater than the amount in the buffer.
Args:
amount (int)[None]: Amount of data to read
increment (int)[None]: Amount to move the start/read pointer allowing overlap if increment is less than the
given amount.
"""
if amount is None:
amount = self._length
if increment is None:
increment = amount
# Check available read size
if amount == 0 or amount > self._length:
return self._data[0:0].copy()
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(increment)
return self._data[idxs].copy()
# end read_overlap
def read_last(self, amount=None, update_rate=None):
"""Read the last amount of data and move the start/read pointer.
This is an odd method for FFT calculations. It reads the newest data moving the start pointer by the
update_rate amount that it was given. The returned skips number is the number of update_rate values.
Example:
.. code-block :: python
>>> buffer = RingBuffer(11, 1)
>>> buffer.write([0, 1, 2, 3, 4, 5, 6, 7 ,8, 9, 10])
>>> buffer.read_last(6, 2))
(array([[4.],
[5.],
[6.],
[7.],
[8.],
[9.]], dtype=float32), 3)
>>> # Note must read in a multiple of the amount and moves by a multiple of the update rate.
Args:
amount (int)[None]: Amount of data to read. NFFT value.
update_rate (int)[None]: The fft update rate value. How many samples to move the pointer by
to cause overlap.
Returns:
data (np.array/np.ndarray) [None]: Data that is of length amount.
updates (int) [0]: Number of updates (Total number of update rates until the end of the data was
found including the data that was returned).
"""
if amount is None:
amount = self._length
if update_rate is None:
update_rate = amount
# Check available read size
if amount == 0 or amount > self._length:
return None, 0
skips = (self._length - amount) // update_rate
if skips > 0:
self.move_start(update_rate * skips)
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(update_rate)
return self._data[idxs].copy(), skips + 1
# end read_last
def __len__(self):
"""Return the current size of the buffer."""
return self._length
def __str__(self):
return self.get_data().__str__()
get_indexes = staticmethod(get_indexes)
def move_start(self, amount, error=True, limit_amount=True):
"""This is an internal method and should not need to be called by the user.
Move the start pointer the given amount (+/-).
Raises:
UnderflowError: If the amount is > the length.
Args:
amount (int): Amount to move the start pointer by.
error (bool)[True]: Raise a ValueError else sync the end pointer and length.
limit_amount (bool)[True]: If True force the amount to be less than or equal to the amount in the buffer.
"""
if amount == 0:
return
elif amount > self._length:
if error:
raise UnderflowError("Not enough data in the buffer " + repr(self))
if limit_amount:
# You cannot read more than what you have
amount = self._length
# end error
stop = self._start + amount
try:
self._start = stop % self.maxsize
except ZeroDivisionError:
self._start = stop
self.sync_length(False or amount < 0) # Length grows if amount was negative.
# end move_start
def move_end(self, amount, error=True, move_start=True):
"""This is an internal method and should not need to be called by the user.
Move the end pointer the given amount (+/-).
Raises:
OverflowError: If the amount is > the available buffer space.
Args:
amount (int): Amount to move the end pointer by.
error (bool)[True]: Raise an OverflowError else sync the start pointer and length.
move_start (bool)[True]: If True and amount > available move the start pointer with the end pointer.
"""
# Check for overflow
avaliable = self.maxsize - self._length
if amount == 0:
return
elif amount > 0 and amount > avaliable:
if error:
raise OverflowError("Not enough space in the buffer " + repr(self) +
" " + repr(len(self)) + " < " + repr(amount))
if move_start:
# Move the start to make it a circular
make_available = amount - avaliable
self.move_start(make_available, False) # Needs to move for sync_length
if amount > self.maxsize:
self.move_start(-(amount - self.maxsize) - 1, False) # Needs to move for sync_length
stop = self._end + amount
try:
self._end = stop % self.maxsize
except ZeroDivisionError:
self._end = stop
self.sync_length(True and amount >= 0) # Length shrinks if amount was negative.
# end move_end
def sync_length(self, should_grow=True):
"""Sync the length with the start and end pointers.
Args:
should_grow (int): Determines if start and end equal means full or empty.
Writing can make full, reading empty.
"""
try:
self._length = (self._end - self._start) % self.maxsize
except ZeroDivisionError:
self._length = 0
if self._length == 0 and should_grow:
self._length = self.maxsize
# end sync_length
@property
def maxsize(self):
"""Return the maximum buffer size."""
return len(self._data)
@maxsize.setter
def maxsize(self, maxsize):
"""Set the maximum size."""
self.shape = (int(maxsize), ) + self.shape[1:]
self.clear()
def get_available_space(self):
"""Return the available space."""
return self.maxsize - len(self)
@property
def columns(self):
"""Return the number of columns/columns."""
try:
return self._data.shape[1] or 1
except (AttributeError, IndexError):
return 1
@columns.setter
def columns(self, columns):
"""Set the columns."""
self.shape = (self.maxsize, columns) + self.shape[2:]
self.clear()
@property
def shape(self):
"""Return the shape of the data."""
return self._data.shape
@shape.setter
def shape(self, new_shape):
"""Set the shape."""
reshape(self, new_shape)
@property
def dtype(self):
"""Return the dtype of the data."""
return self._data.dtype
@dtype.setter
def dtype(self, dtype):
try:
self._data = self._data.astype(dtype)
except (AttributeError, ValueError, TypeError, Exception):
self._data = np.zeros(shape=self.shape, dtype=dtype)
self.clear()
# end class RingBuffer
class RingBufferThreadSafe(RingBuffer):
"""Standard numpy circular buffer.
Args:
length (tuple/int): Length of the buffer.
columns (int)[1]: Columns for the buffer.
dtype (numpy.dtype)[numpy.float32]: Numpy data type for the buffer.
"""
def __init__(self, shape, columns=None, dtype=np.float32):
self.lock = threading.RLock()
super().__init__(shape=shape, columns=columns, dtype=dtype)
# end constructor
clear = make_thread_safe(RingBuffer.clear)
get_data = make_thread_safe(RingBuffer.get_data)
set_data = make_thread_safe(RingBuffer.set_data)
expanding_write = make_thread_safe(RingBuffer.expanding_write)
growing_write = make_thread_safe(RingBuffer.growing_write)
write_value = make_thread_safe(RingBuffer.write_value)
write_zeros = make_thread_safe(RingBuffer.write_zeros)
write = make_thread_safe(RingBuffer.write)
read = make_thread_safe(RingBuffer.read)
read_remaining = make_thread_safe(RingBuffer.read_remaining)
read_overlap = make_thread_safe(RingBuffer.read_overlap)
read_last = make_thread_safe(RingBuffer.read_last)
__len__ = make_thread_safe(RingBuffer.__len__)
__str__ = make_thread_safe(RingBuffer.__str__)
move_start = make_thread_safe(RingBuffer.move_start)
move_end = make_thread_safe(RingBuffer.move_end)
sync_length = make_thread_safe(RingBuffer.sync_length)
get_available_space = make_thread_safe(RingBuffer.get_available_space)
maxsize = make_thread_safe(RingBuffer.maxsize)
columns = make_thread_safe(RingBuffer.columns)
shape = make_thread_safe(RingBuffer.shape)
dtype = make_thread_safe(RingBuffer.dtype)
|
StarcoderdataPython
|
173014
|
<gh_stars>1-10
import logging
from google.appengine.api.taskqueue import Task
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.db import GeoPt
from google.appengine.ext.db import TransactionFailedError
from google.appengine.ext.db import Timeout
from google.appengine.runtime import DeadlineExceededError
from data_model import StopLocation
from data_model import StopLocationLoader
from data_model import RouteListing
from data_model import RouteListingLoader
from data_model import DestinationListing
CRAWL_URLBASE = "http://webwatch.cityofmadison.com/tmwebwatch/LiveADADepartureTimes"
#
# a collection of handlers that will transform RouteListingLoader entities
# into routes and destinations
#
# note that order matters. the Stop transformation has to happen first.
#
class RouteTransformationStart(webapp.RequestHandler):
def get(self) :
# shove a task in the queue because we need more time then
# we may be able to get in the browser
for route in range(1, 100):
task = Task(url="/gtfs/port/routes/task",params={'route':route})
task.add('crawler')
self.response.out.write('done. spawned a task to go do the route transformations')
## end RouteTransformationStart
class RouteTransformationTask(webapp.RequestHandler):
def post(self):
try:
routeID = self.request.get('route')
if len(routeID) == 1:
routeID = '0' + routeID
q = RouteListingLoader.all()
q.filter("routeID = ", routeID)
for r in q.run(keys_only=True):
logging.debug('launch key query %s', r)
task = Task(url="/gtfs/port/routes/transform/task",params={'rll_key':r})
task.add('crawler')
self.response.set_status(200)
except Timeout:
logging.error('FAIL : timeout getting the route loader tasks spawned')
self.response.set_status(200)
self.response.out.write("timeout")
return
## end RouteTransformationParent
class RouteTransformationChildTask(webapp.RequestHandler):
def post(self):
route_loader_key = self.request.get('rll_key')
logging.debug('work on %s' % self.request.get('rll_key'))
route_loader = RouteListingLoader.get(route_loader_key)
if route_loader is None:
logging.error('total fail. unable to find %s' % route_loader_key)
else:
logging.debug(route_loader.routeID)
# find the corresponding stop details
stop = db.GqlQuery("SELECT * FROM StopLocation WHERE stopID = :1", route_loader.stopID).get()
if stop is None:
logging.error("Missing stop %s which should be impossible",route_loader.stopID);
try:
url = CRAWL_URLBASE + '?r=' + route_loader.routeCode + '&d=' + route_loader.directionCode + '&s=' + route_loader.stopCode
logging.debug(url)
route = RouteListing()
route.route = route_loader.routeID
route.routeCode = route_loader.routeCode
route.direction = route_loader.directionCode
route.stopID = route_loader.stopID
route.stopCode = route_loader.stopCode
route.scheduleURL = url
route.stopLocation = stop
route.put()
logging.info("added new route listing entry to the database!")
DestinationListing.get_or_insert(route_loader.direction, id=route_loader.directionCode, label=route_loader.direction)
except TransactionFailedError:
logging.error('FAIL : unable to store RouteListing for route %s, stop %s', (route_loader.routeID,route_loader.stopID))
self.response.set_status(2)
self.response.out.write('transaction fail')
return
## end RouteTransformationChild
#
# This handler is designed to port the GTFS Stops loaded via the bulk loader
# It simply creates a number of background tasks to do the grunt work
#
class PortStopsHandler(webapp.RequestHandler):
def get(self):
# query the StopLocationLoader for all stops
stops = StopLocationLoader.all()
for s in stops:
# create a new task for each stop
task = Task(url='/gtfs/port/stop/task/',
params={'stopID':s.stopID,
'name':s.name,
'description':s.description,
'lat':str(s.lat),
'lon':str(s.lon),
'direction':s.direction,
})
task.add('crawler')
logging.debug('Finished spawning StopLocationLoader tasks!')
self.response.out.write('done spawning porting tasks!')
## end PortStopsHandler
#
# This handler is the task handler for porting an individual GTFS stop
#
class PortStopTask(webapp.RequestHandler):
def post(self):
stop_list = []
stopID = self.request.get('stopID')
if len(stopID) == 1:
stopID = "000" + stopID
if len(stopID) == 2:
stopID = "00" + stopID
if len(stopID) == 3:
stopID = "0" + stopID
name = self.request.get('name')
description = self.request.get('description')
lat = self.request.get('lat')
lon = self.request.get('lon')
direction = self.request.get('direction')
s = StopLocation()
s.stopID = stopID
s.intersection = name.split('(')[0].rstrip()
s.direction = direction
s.description = description
s.location = GeoPt(lat,lon)
s.update_location()
stop_list.append(s)
# put the new stop in the datastore
db.put(stop_list)
logging.info('done updating stop locations for stopID %s' % stopID)
self.response.set_status(200)
## end PortStopTask
application = webapp.WSGIApplication([('/gtfs/port/stops', PortStopsHandler),
('/gtfs/port/stop/task/', PortStopTask),
('/gtfs/port/routes', RouteTransformationStart),
('/gtfs/port/routes/task', RouteTransformationTask),
('/gtfs/port/routes/transform/task', RouteTransformationChildTask)
],
debug=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1720736
|
import math
import numpy as np
import logging
logger = logging.getLogger('cwl')
class CWLMetric(object):
def __init__(self):
self.expected_utility = 0.0
self.expected_cost = 0.0
self.expected_total_utility = 0.0
self.expected_total_cost = 0.0
self.expected_items = 0.0
self.residual_expected_utility = None
self.residual_expected_cost = None
self.residual_expected_total_utility = None
self.residual_expected_total_cost = None
self.residual_expected_items = None
self.residuals = False
self.metric_name = "Undefined"
self.ranking = None
self.bibtex = ""
def name(self):
return self.metric_name
def c_vector(self, ranking, worse_case=True):
"""
Create a vector of C probabilities (i.e. probability of continuing from position i to position i+1)
Note: when defining a metric is best/easiest to re-implement this function.
:param ranking: CWL Ranking object
:param worse_case: Boolean, to denote whether to estimate based on assuming the
worse case i.e. unjudged are considered to be zero gain, and max cost or
best case i.e. worse_case=False, and unjudged are considered to be max gain, and min cost
Note that the Ranking object handles what is returned in the gain and cost vectors.
:return: returns the C vector probabilities
"""
cvec = np.ones(len(ranking.get_gain_vector(worse_case)))
return cvec
def l_vector(self, ranking, worse_case=True):
"""
Create a vector of L probabilities (i.e. the Likelihoods of stopping at position i given the C vector)
:param ranking: CWL Ranking object
:param worse_case: Boolean, to denote whether to estimate based on assuming the
:return: returns the L vector probabilities
"""
cvec = self.c_vector(ranking, worse_case)
logger.debug("{0} {1} {2} {3}".format(ranking.topic_id, self.name(), "cvec", cvec[0:11]))
cshift = np.append(np.array([1.0]), cvec[0:-1])
lvec = np.cumprod(cshift)
lvec = np.multiply(lvec, (np.subtract(np.ones(len(cvec)), cvec)))
logger.debug("{0} {1} {2} {3}".format(ranking.topic_id, self.name(), "lvec", lvec[0:11]))
return lvec
def w_vector(self, ranking, worse_case=True):
"""
Create a vector of E probabilities (i.e. probability of examining item i)
Note: when defining a metric is best/easiest to re-implement this function.
:param ranking: CWL Ranking object
:param worse_case: Boolean, to denote whether to estimate based on assuming the
:return: returns the W vector probabilities
"""
cvec = self.c_vector(ranking, worse_case)
cvec = cvec[0:-1]
cvec_prod = np.cumprod(cvec)
cvec_prod = np.pad(cvec_prod, (1, 0), 'constant', constant_values=1.0)
w1 = np.divide(1.0, np.sum(cvec_prod))
w_tail = np.multiply(cvec_prod[1:len(cvec_prod)], w1)
wvec = np.append(w1, w_tail)
logger.debug("{0} {1} {2} {3}".format(ranking.topic_id, self.name(), "wvec", wvec[0:11]))
return wvec
def measure(self, ranking):
"""
Given the ranking, measure estimates the various measurements given the CWL framework
if residuals are required, these are also computed.
:param ranking: CWL Ranking object
:return: the expected utility per item
"""
self.ranking = ranking
# score based on worse case - lower bounds
(eu, etu, ec, etc, ei) = self._do_score(ranking, True)
self.expected_utility = eu
self.expected_total_utility = etu
self.expected_cost = ec
self.expected_total_cost = etc
self.expected_items = ei
if self.residuals:
# score based on best case - upper bounds
(eu, etu, ec, etc, ei) = self._do_score(ranking, False)
# compute the residual i.e. the difference between the upper and lower bounds
self.residual_expected_utility = eu - self.expected_utility
self.residual_expected_total_utility = etu - self.expected_total_utility
self.residual_expected_cost = ec - self.expected_cost
self.residual_expected_total_cost = etc - self.expected_total_cost
self.residual_expected_items = ei - self.expected_items
# return the rate of gain per document
return self.expected_utility
def _do_score(self, ranking, worse_case=True):
"""
An internal function that handles the scoring of a ranking given the CWL machinery.
:param ranking: CWL Ranking object
:return: the expected utility per item
:return: returns the expected utility per item, etc..
"""
wvec = self.w_vector(ranking, worse_case)
lvec = self.l_vector(ranking, worse_case)
gain_vec = ranking.get_gain_vector(worse_case)
cost_vec = ranking.get_cost_vector(worse_case)
cum_gains = np.cumsum(gain_vec)
cum_costs = np.cumsum(cost_vec)
expected_utility = np.sum(np.dot(wvec, gain_vec))
expected_total_utility = np.sum(np.dot(lvec, cum_gains))
expected_cost = np.sum(np.dot(wvec, cost_vec))
expected_total_cost = np.sum(np.dot(lvec, cum_costs))
expected_items = 1.0 / wvec[0]
return expected_utility, expected_total_utility, expected_cost, expected_total_cost, expected_items
def report(self):
if self.residuals:
print("{0}\t{1}\t{2:.4f}\t{3:.4f}\t{4:.4f}\t{5:.4f}\t{6:.4f}\t{7:.4f}\t{8:.4f}\t{9:.4f}\t{10:.4f}\t{11:.4f}".format(
self.ranking.topic_id, self.name(), self.expected_utility, self.expected_total_utility,
self.expected_cost, self.expected_total_cost, self.expected_items,
self.residual_expected_utility, self.residual_expected_total_utility,
self.residual_expected_cost, self.residual_expected_total_cost, self.residual_expected_items
))
else:
print("{0}\t{1}\t{2:.4f}\t{3:.4f}\t{4:.4f}\t{5:.4f}\t{6:.4f}".format(
self.ranking.topic_id, self.name(), self.expected_utility, self.expected_total_utility,
self.expected_cost, self.expected_total_cost, self.expected_items,
))
def csv(self):
return ("{0},{1:.3f},{2:.3f},{3:.3f},{4:.3f},{5:.3f}".format(
self.name(), self.expected_utility, self.expected_total_utility, self.expected_cost,
self.expected_total_cost, self.expected_items))
def get_scores(self):
"""
:return: list with values of each measurement for the previously measured ranking
"""
scores = [
self.expected_utility,
self.expected_total_utility,
self.expected_cost,
self.expected_total_cost,
self.expected_items]
return scores
def _pad_vector(self, vec1, n, val):
"""
Pads vector 1 up to size n, with the value val
:param vec1: np array
:param n: size of the desired array
:param val: the value to be inserted if padding is required
:return: the padded vector
"""
if len(vec1) < n:
vec1 = np.pad(vec1, (0, n-len(vec1)), 'constant', constant_values=val)
return vec1
def validate_gain_range(self, min_allowed_gain, max_allowed_gain, gain_vec):
"""
Checks that the gain vector does not violate any metric assumptions
These assumptions (about the min or max gain) should be provided by
the calling metric class.
"""
if np.min(gain_vec) < min_allowed_gain:
raise ValueError("Supplied gain values violate metric assumptions: Metric = {}.\n "
"The minimum allowable gain for this metric is: {}.".format(self.name(), min_allowed_gain))
if np.max(gain_vec) > max_allowed_gain:
raise ValueError("Supplied gain values ({}) violate metric assumptions: Metric = {}.\n "
"The maximum allowable gain for this "
"metric is: {}.".format(np.max(gain_vec), self.name(), max_allowed_gain))
|
StarcoderdataPython
|
183377
|
# -*- coding: utf-8 -*-
from time import sleep
from requests import get
import utils
print("2bTracker\nuses https://2bqueue.info/\n")
utils.init()
print("Getting 2b2t player lists...")
oldQueuePlayerList = get("https://2bqueue.info/players").json()["queue"]["players"]
oldMainPlayerList = get("https://2bqueue.info/players").json()["server"]["players"]
print("Done.")
while True:
sleep(5)
newQueuePlayerList = get("https://2bqueue.info/players").json()["queue"]["players"]
newMainPlayerList = get("https://2bqueue.info/players").json()["server"]["players"]
utils.compare(oldQueuePlayerList, newQueuePlayerList, oldMainPlayerList, newMainPlayerList)
oldQueuePlayerList = newQueuePlayerList
oldMainPlayerList = newMainPlayerList
|
StarcoderdataPython
|
3234655
|
#!/usr/bin/python
from __future__ import print_function
import sys
import os
from roundup import instance
dir = os.getcwd ()
tracker = instance.open (dir)
db = tracker.open ('admin')
for id in db.user_dynamic.getnodeids (retired = False) :
dyn = db.user_dynamic.getnode (id)
if dyn.exemption is None :
db.user_dynamic.set (id, exemption = False)
db.commit()
|
StarcoderdataPython
|
3221188
|
"""Tests for the Bulb API with a socket."""
from typing import AsyncGenerator
import pytest
from pywizlight import wizlight
from pywizlight.bulblibrary import BulbClass, BulbType, Features, KelvinRange
from pywizlight.tests.fake_bulb import startup_bulb
@pytest.fixture()
async def socket() -> AsyncGenerator[wizlight, None]:
shutdown, port = await startup_bulb(
module_name="ESP10_SOCKET_06", firmware_version="1.16.71"
)
bulb = wizlight(ip="127.0.0.1", port=port)
yield bulb
await bulb.async_close()
shutdown()
@pytest.mark.asyncio
async def test_model_description_socket(socket: wizlight) -> None:
"""Test fetching the model description of a socket is None."""
bulb_type = await socket.get_bulbtype()
assert bulb_type == BulbType(
features=Features(
color=False,
color_tmp=False,
effect=False,
brightness=False,
dual_head=False,
),
name="ESP10_SOCKET_06",
kelvin_range=KelvinRange(max=6500, min=2700),
bulb_type=BulbClass.SOCKET,
fw_version="1.16.71",
white_channels=2,
white_to_color_ratio=20,
)
|
StarcoderdataPython
|
21591
|
<filename>Desafio 46.py<gh_stars>0
print('====== DESAFIO 46 ======')
import time
for c in range(10,-1,-1):
time.sleep(1)
print(c)
|
StarcoderdataPython
|
1721059
|
from dbacademy.dbrest import DBAcademyRestClient
class ScimServicePrincipalsClient:
def __init__(self, client: DBAcademyRestClient, token: str, endpoint: str):
self.client = client # Client API exposing other operations to this class
self.token = token # The authentication token
self.endpoint = endpoint # The API endpoint
self.base_url = f"{self.endpoint}/api/2.0/preview/scim/v2/ServicePrincipals"
def list(self):
response = self.client.execute_get_json(f"{self.base_url}")
all_items = response.get("Resources", [])
total = response.get("totalResults")
while len(all_items) != total:
response = self.client.execute_get_json(f"{self.base_url}")
total = response.get("totalResults")
items = response.get("Resources", [])
all_items.extend(items)
return all_items
def get_by_id(self, service_principle_id: str):
return self.client.execute_get_json(f"{self.base_url}/{service_principle_id}")
def get_by_name(self, display_name):
all_items = self.list()
for item in all_items:
if item.get("displayName") == display_name:
return item
return None
def create(self, display_name: str, group_ids: list = [], entitlements: list = []):
params = {
"displayName": display_name,
"entitlements": [],
"groups": [],
"schemas": ["urn:ietf:params:scim:schemas:core:2.0:ServicePrincipal"],
"active": True
}
group_ids = group_ids if group_ids is not None else list()
for group_id in group_ids:
value = {"value": group_id}
params["groups"].append(value)
entitlements = entitlements if entitlements is not None else list()
for entitlement in entitlements:
value = {"value": entitlement}
params["entitlements"].append(value)
return self.client.execute_post_json(f"{self.base_url}", params, expected=201)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.