id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1746851
|
<reponame>kstaken/salt
try:
from mock import MagicMock, patch
has_mock = True
except ImportError:
has_mock = False
patch = lambda x: lambda y: None
from saltunittest import TestCase, skipIf
from salt.modules import pip
pip.__salt__ = {"cmd.which_bin":lambda _:"pip"}
@skipIf(has_mock is False, "mock python module is unavailable")
class PipTestCase(TestCase):
def test_fix4361(self):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all':mock}):
pip.install(requirements="requirements.txt")
expected_cmd = 'pip install --requirement "requirements.txt" '
mock.assert_called_once_with(expected_cmd, runas=None, cwd=None)
@patch('salt.modules.pip._get_cached_requirements')
def test_failed_cached_requirements(self, get_cached_requirements):
get_cached_requirements.return_value = False
ret = pip.install(requirements='salt://my_test_reqs')
self.assertEqual(False, ret['result'])
self.assertIn('my_test_reqs', ret['comment'])
@patch('salt.modules.pip._get_cached_requirements')
def test_cached_requirements_used(self, get_cached_requirements):
get_cached_requirements.return_value = 'my_cached_reqs'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements="salt://requirements.txt")
expected_cmd = 'pip install --requirement "my_cached_reqs" '
mock.assert_called_once_with(expected_cmd, runas=None, cwd=None)
@patch('os.path')
def test_fix_activate_env(self, mock_path):
mock_path.is_file.return_value = True
mock_path.isdir.return_value = True
def join(*args):
return '/'.join(args)
mock_path.join = join
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install('mock', bin_env='/test_env', activate=True)
expected_cmd = '. /test_env/bin/activate && /test_env/bin/pip install mock '
mock.assert_called_once_with(expected_cmd, runas=None, cwd=None)
|
StarcoderdataPython
|
3247766
|
<gh_stars>0
from fabric.context_managers import hide
import re
from calyptos.plugins.debugger.debuggerplugin import DebuggerPlugin
class CheckComputeRequirements(DebuggerPlugin):
def debug(self):
# Supported CentOS/RHEL OS version for each component
self.os_version = 6
# Default clock skew allowed for cloud components
self.clock_skew_sec = 20
all_hosts = self.component_deployer.all_hosts
roles = self.component_deployer.get_roles()
self._verify_os_proc(all_hosts)
self._verify_clocks(all_hosts)
self._check_virtualization(roles['node-controller'])
return (self.passed, self.failed)
def _verify_os_proc(self, all_hosts):
"""
Verifies supported OS, correct chip architecture and
recommended minimum number of processors on all cloud components.
:param all_hosts: a set of Eucalyptus cloud components
"""
self.info('Operation System and Processor verification on all hosts')
os_output = 'cat /etc/system-release'
with hide('everything'):
os_version = self.run_command_on_hosts(os_output, all_hosts)
os_search_string = '(CentOS|Red).*(' + str(self.os_version) + '.\w+)'
for host in all_hosts:
if re.search(os_search_string, os_version[host]):
self.success(host + ': Correct OS Version')
else:
self.failure(host + ': Incorrect OS Version')
arch_output = 'uname -m'
with hide('everything'):
arch_version = self.run_command_on_hosts(arch_output, all_hosts)
for host in all_hosts:
if re.search('x86_64', arch_version[host]):
self.success(host + ': Correct chip architecture')
else:
self.failure(host + ': Incorrect chip architecture')
cpu_output = 'cat /proc/cpuinfo | grep processor'
cputype_output = 'cat /proc/cpuinfo | grep \"model name\"'
with hide('everything'):
cpu_count = self.run_command_on_hosts(cpu_output, all_hosts)
cpu_type = self.run_command_on_hosts(cputype_output, all_hosts)
for host in all_hosts:
cpus = re.findall('processor', cpu_count[host])
if len(cpus) >= 2:
self.success(host + ': Passed minimum number of'
+ ' processors requirement')
else:
self.failure(host + ': Failed minimum number of'
+ ' processors requirement')
proc_type = re.findall('(model).*([Intel|AMD].*)\w+',
cpu_type[host])
if len(cpus) == len(proc_type):
self.success(host + ': Passed requirement of '
+ 'Intel/AMD processor support')
else:
self.failure(host + ': Failed requirement of '
+ 'Intel/AMD processor support')
def _verify_clocks(self, all_hosts):
"""
Verifies that ntp, and ntpdate are installed on each
cloud component. In addition, confirms components clock
skew isn't greater than the default maximum clock skew allowed for
the cloud
:param all_hosts: a set of Eucalyptus cloud components
"""
self.info('NTP/NTPD Test on all hosts')
packages = ['ntp', 'ntpdate']
for package in packages:
with hide('everything'):
# Use rpm --query --all to confirm packages exist
rpm_output = self.run_command_on_hosts('rpm '
+ '--query --all ' + package,
all_hosts)
for host in all_hosts:
if re.search(package, rpm_output[host]):
self.success(host + ':Package found - ' + package)
else:
self.failure(host + ':Package not found - ' + package)
service_output = 'service ntpd status'
with hide('everything'):
ntpd_output = self.run_command_on_hosts(service_output, all_hosts)
for host in all_hosts:
if re.search('running', ntpd_output[host]):
self.success(host + ':ntpd running')
else:
self.failure(host + ':ntpd not running')
# Check to see if ntpd is set to default-start runlevel
chkconfig_output = 'chkconfig --list ntpd | awk \'{print $4,$5,$6,$7}\''
with hide('everything'):
runlevel_output = self.run_command_on_hosts(chkconfig_output,
all_hosts)
for host in all_hosts:
if re.search('off', runlevel_output[host]):
self.failure(host + ':runlevel for ntpd'
+ ' has not been set to default-start')
else:
self.success(host + ':runlevel for ntpd'
+ ' has been set to default-start')
"""
Compare date across all cloud components. Date is compared in UTC
format. In addition, compare time on each cloud component, and
confirm there isn't more than the clock skew (in seconds) between
all components.
"""
date_output = 'date --utc +%m%d%y'
time_output = 'date --utc +%H%M%S'
with hide('everything'):
date_stamp = self.run_command_on_hosts(date_output, all_hosts)
time_stamp = self.run_command_on_hosts(time_output, all_hosts)
host_dates = []
host_times = []
for host in all_hosts:
if not date_stamp[host] or not time_stamp[host]:
self.failure(host + ': No date returned. Make sure machine clock'
+ ' is set and synced across all nodes')
return
else:
host_dates.append(date_stamp[host])
host_times.append(time_stamp[host])
if all(date == host_dates[0] for date in host_dates):
self.success('All cloud components are using the same date')
else:
self.failure('Date is not consistent across cloud components')
max_time = max(host_times)
for time in host_times:
if abs(int(max_time) - int(time)) > int(self.clock_skew_sec):
self.failure('Clock skew is greater than 1 minute across hosts.'
+ ' Please confirm clocks are synced across all hosts.')
return
self.success('Clocks are synced within allowed threshold')
def _check_virtualization(self, nodes):
"""
Confirm that node controller(s) have hardware virtualization
enabled on either Intel or AMD chips.
:param nodes: a set of Eucalyptus Node Controller components
"""
self.info('Confirm virtualization is enabled on Node Controllers')
virt_output = 'egrep -m1 -w \'^flags[[:blank:]]*:\' /proc/cpuinfo |' \
' egrep -wo \'(vmx|svm)\''
with hide('everything'):
virt_test = self.run_command_on_hosts(virt_output, nodes)
for host in nodes:
if re.match('(vmx|svm)', virt_test[host]):
self.success(host + ': Passed requirement of '
+ 'Intel/AMD hardware virtualization support')
else:
self.failure(host + ': Failed requirement of '
+ 'Intel/AMD hardware virtualization support')
|
StarcoderdataPython
|
89023
|
<filename>constants.py<gh_stars>1-10
for name in 'channel pitch time duration velocity'.split():
globals()[name.upper()] = name
|
StarcoderdataPython
|
42606
|
<filename>app/moisturechecker/app.py
import paho.mqtt.client as mqtt
from gpiozero import Button as Sensor
import os
def on_event(client, topics, message):
def func():
for topic in topics:
client.publish(topic, message)
return func
if __name__ == '__main__':
mqtt_url = os.environ['growmemoisturechecker_mqtturl']
mqtt_topics = os.environ['growmemoisturechecker_mqtttopics'].split(",")
sensor_pin = int(os.environ['growmemoisturechecker_sensor'])
client = mqtt.Client("moisture_54263841")
client.username_pw_set(mqtt_url.split("@")[0].split(":")[0], mqtt_url.split("@")[0].split(":")[1])
client.connect(mqtt_url.split("@")[1].split(":")[0], int(mqtt_url.split("@")[1].split(":")[1]))
sensor = Sensor(sensor_pin)
sensor.when_pressed = on_event(client, mqtt_topics, "wet")
sensor.when_released = on_event(client, mqtt_topics, "dry")
client.loop_forever()
|
StarcoderdataPython
|
3367688
|
"""Constants for the Harmony component."""
DOMAIN = "harmony"
SERVICE_SYNC = "sync"
SERVICE_CHANGE_CHANNEL = "change_channel"
PLATFORMS = ["remote", "switch"]
UNIQUE_ID = "unique_id"
ACTIVITY_POWER_OFF = "PowerOff"
HARMONY_OPTIONS_UPDATE = "harmony_options_update"
ATTR_DEVICES_LIST = "devices_list"
ATTR_LAST_ACTIVITY = "last_activity"
ATTR_ACTIVITY_STARTING = "activity_starting"
PREVIOUS_ACTIVE_ACTIVITY = "Previous Active Activity"
|
StarcoderdataPython
|
25923
|
<filename>src/cuda_ai/mean.py
# -*- coding: utf-8 -*-
"""
Created on Sun May 21 15:35:38 2017
@author: Liron
"""
import numpy as np
np.set_printoptions(threshold=np.nan)
data = np.genfromtxt("cuda_times.csv", delimiter=",", usecols=(0,1), max_rows=97, skip_header=96+96+96)
print data[0]
print data[data.shape[0]-1]
mean = np.zeros((16,2))
for i in np.arange(mean.shape[0]):
x = data[6*i + 1:6*(i+1) + 1,1]
mean[i,1] = np.mean(x)
mean[i,0] = data[6*i + 1,0]
print mean
#np.savetxt("cuda_1000threads.txt", mean, delimiter=',')
|
StarcoderdataPython
|
3313809
|
# -*- coding:utf8 -*-
# File : progress.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 2/26/17
#
# This file is part of TensorArtist.
from tartist.core.utils.thirdparty import get_tqdm_defaults
import tqdm
import numpy as np
def enable_epoch_progress(trainer):
pbar = None
def epoch_progress_on_iter_after(trainer, inp, out):
nonlocal pbar
if pbar is None:
pbar = tqdm.tqdm(total=trainer.epoch_size, leave=False, initial=trainer.iter % trainer.epoch_size,
**get_tqdm_defaults())
desc = 'Iter={}'.format(trainer.iter)
if 'error' in trainer.runtime:
desc += ', error={:.4f}'.format(trainer.runtime['error'])
for k in sorted(out.keys()):
v = out[k]
if isinstance(v, (str, int, float, np.ndarray, np.float32, np.float64, np.int32, np.int64)):
try:
v = float(v)
desc += ', {}={:.4f}'.format(k, v)
except ValueError:
pass
pbar.set_description(desc)
pbar.update()
def epoch_progress_on_epoch_after(trainer):
nonlocal pbar
pbar.close()
pbar = None
trainer.register_event('iter:after', epoch_progress_on_iter_after, priority=25)
trainer.register_event('epoch:after', epoch_progress_on_epoch_after, priority=5)
|
StarcoderdataPython
|
159374
|
<gh_stars>0
"""! @file
# Class Documenter
@package src """
## build a profile of each function and include in the documentation
INCLUDE_FUNCTION_PROFILE = True
## pull inline comments up to function docstring
INCLUDE_INLINE_COMMENTS = False
import sys, os
from util.log import setup_logging
logger = setup_logging()
from util.config import GetGlobalConfig
config = GetGlobalConfig()
CLASS_TEMPLATE = config['CLASS_TEMPLATE']
from python_code.CodeLine import CodeLine
from function_documenter import DocumentFunction
def doClassInit(codeblock):
# make sure first argument is 'self'. Not a functional requirement, but necessary for doxygen to identify public members
args = codeblock.getArguments()
if args[0] != 'self':
logger.warning('function {} does not have self as first argument in __init__. This will prevent doxygen from finding members.')
members = codeblock.getMembers(args[0])
# TODO: we need the line #s and then to stick a new comment line above each...
for member in members:
word = "{}.{}".format(args[0], member)
# we'll loop through the lines here, but this is not good style. Should make this a class function.
for item, i in zip(codeblock.block, range(0, len(codeblock.block))):
if item.find(word) > -1:
break
if i < len(codeblock.block):
# check previous line, is it a comment already?
if CodeLine.RemoveLeadingWhitespace(codeblock.block[i-1])[:2] != '##':
ind = codeblock.block[i].indent()
codeblock.block[i:i] = [CodeLine("## TODO_DOC: what is {} class member variable?".format(member)).indent(ind)]
# codeblock.indent()
def DocumentClasses(filename, FORCE):
with open(filename, 'r') as f:
raw = f.read()
from python_code.CodeBlock import CodeBlock
code_lines = CodeBlock.ParsePython(raw)
logger.info('read {} bytes over {} blocks of code'.format(len(raw),len(code_lines)))
write_it = False
for cb in code_lines:
if cb.isClass():
# do all the methods
methods = cb.getAllFunctions()
for method in methods:
if method.getFunctionName() == '__init__':
doClassInit(method)
if method.hasDocumentation():
if FORCE:
method.removeDocumentation()
else: continue
# document each function
DocumentFunction(method, ignore_args=['self'])
write_it = True
if cb.hasDocumentation():
if FORCE:
cb.removeDocumentation()
else: continue
# need special handling of __init__ function to document public members
docs = CLASS_TEMPLATE.split('\n')
docs = [CodeLine(d) for d in docs]
cb.addDocumentation(docs)
write_it = True
if write_it:
code_lines.indent() # makes changes in place
# move original
os.rename(filename, filename + '.old')
logger.info('moved original file to {}'.format(filename + '.old'))
with open(filename, 'w') as f:
f.write( str(code_lines) )
logger.info('added new class documentation to {}'.format(filename))
else:
logger.info('no changes were made.')
if __name__ == "__main__":
if len(sys.argv) < 2:
logger.error('missing required path to file argument')
exit()
filename = sys.argv[1]
FORCE = '-force' in sys.argv
DocumentClasses(filename, FORCE)
|
StarcoderdataPython
|
113283
|
import os
from flask import Flask, jsonify, send_file
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from api.login import blueprint as login_blueprint
from api.odoo import blueprint as odoo_blueprint
app = Flask(__name__)
ALLOW_ALL_ORIGINS = os.getenv("ALLOW_ALL_ORIGINS")
if ALLOW_ALL_ORIGINS:
CORS(app)
app.config["JWT_SECRET_KEY"] = os.environ.get("JWT_SECRET_KEY")
jwt = JWTManager(app)
app.register_blueprint(login_blueprint)
app.register_blueprint(odoo_blueprint)
@app.route("/api/ping")
def ping():
return jsonify({"name": "auto-conso", "status": "ok"})
@app.route("/")
def main():
index_path = os.path.join(app.static_folder, "index.html")
return send_file(index_path)
# Everything not declared before (not a Flask route / API endpoint)...
@app.route("/<path:path>")
def route_frontend(path):
# ...could be a static file needed by the front end that
# doesn't use the `static` path (like in `<script src="bundle.js">`)
file_path = os.path.join(app.static_folder, path)
if os.path.isfile(file_path):
return send_file(file_path)
# ...or should be handled by the SPA's "router" in front end
else:
index_path = os.path.join(app.static_folder, "index.html")
return send_file(index_path)
if __name__ == "__main__":
app.run(debug=True)
|
StarcoderdataPython
|
3276961
|
<gh_stars>1-10
if __name__ == '__main__':
from compute_realizability import *
else:
from contracts.compute_realizability import *
if len(sys.argv) > 2:
i = int(sys.argv[1])
j = int(sys.argv[2])
synthesize_by_ij(i,j)
elif len(sys.argv) > 1 and sys.argv[1] == 'all':
check_all()
else:
print('Checking stale contract...')
|
StarcoderdataPython
|
3316910
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012 <NAME> <<EMAIL>>
"""extracts the openrave version from the root CMakeLists.txt file and returns it
"""
import sys
if __name__=='__main__':
data = open(sys.argv[1],'r').read()
indices = [data.find('OPENRAVE_VERSION_MAJOR'), data.find('OPENRAVE_VERSION_MINOR'), data.find('OPENRAVE_VERSION_PATCH')]
versions = []
for index in indices:
startindex = data.find(' ',index)
endindex = data.find(')',startindex)
versions.append(data[startindex:endindex].strip())
print '.'.join(versions)
|
StarcoderdataPython
|
1720860
|
from traitlets import Bool, validate
from .Material_autogen import Material as MaterialAutogen
class Material(MaterialAutogen):
# Do not sync this automatically:
needsUpdate = Bool(False)
@validate('needsUpdate')
def onNeedsUpdate(self, proposal):
if proposal.value:
content = {
"type": "needsUpdate",
}
self.send(content=content, buffers=None)
# Never actually set value
return False
|
StarcoderdataPython
|
3303240
|
from django.test import override_settings
from django.urls import reverse
class TestSettings:
@override_settings(COMPRESS_ENABLED=True)
def test_compression(self, django_app, volunteer):
response = django_app.get(reverse("core:index"), user=volunteer)
assert response.status_code == 200
|
StarcoderdataPython
|
1614232
|
import logging
from aiohttp import web
from eth_typing import BLSSignature
from eth_utils import decode_hex, encode_hex, humanize_hash
from lahja.base import EndpointAPI
from ssz.tools.dump import to_formatted_dict
from ssz.tools.parse import from_formatted_dict
from eth2.beacon.chains.base import BaseBeaconChain
from eth2.beacon.types.attestations import Attestation, AttestationData
from eth2.beacon.types.blocks import BeaconBlock, BeaconBlockBody
from eth2.beacon.typing import Bitfield, CommitteeIndex, Slot
from eth2.api.http.validator import Paths as APIEndpoint
from trinity._utils.version import construct_trinity_client_identifier
from trinity.http.apps.base_handler import BaseHandler, get, post
class ValidatorAPIHandler(BaseHandler):
logger = logging.getLogger("trinity.http.apps.validator_api.ValidatorAPIHandler")
def __init__(
self, chain: BaseBeaconChain, event_bus: EndpointAPI, genesis_time: int
):
self._chain = chain
self._event_bus = event_bus
self._genesis_time = genesis_time
self._client_identifier = construct_trinity_client_identifier()
@get(APIEndpoint.node_version)
async def _get_client_version(self, request: web.Request) -> web.Response:
return web.json_response(self._client_identifier)
@get(APIEndpoint.genesis_time)
async def _get_genesis_time(self, request: web.Request) -> web.Response:
return web.json_response(self._genesis_time)
@get(APIEndpoint.sync_status)
async def _get_sync_status(self, request: web.Request) -> web.Response:
# TODO: get actual status in real tim
status = {
"is_syncing": False,
"sync_status": {"starting_slot": 0, "current_slot": 0, "highest_slot": 0},
}
return web.json_response(status)
@get(APIEndpoint.validator_duties)
async def _get_validator_duties(self, request: web.Request) -> web.Response:
public_keys = tuple(
map(decode_hex, request.query["validator_pubkeys"].split(","))
)
# epoch = Epoch(request.query["epoch"])
duties = tuple(
{
"validator_pubkey": encode_hex(public_key),
"attestation_slot": 2222,
"attestation_shard": 22,
"block_proposal_slot": 90,
}
for public_key in public_keys
)
return web.json_response(duties)
@get(APIEndpoint.block_proposal)
async def _get_block_proposal(self, request: web.Request) -> web.Response:
slot = Slot(int(request.query["slot"]))
randao_reveal = BLSSignature(
decode_hex(request.query["randao_reveal"]).ljust(96, b"\x00")
)
block = BeaconBlock.create(
slot=slot, body=BeaconBlockBody.create(randao_reveal=randao_reveal)
)
return web.json_response(to_formatted_dict(block))
@post(APIEndpoint.block_proposal)
async def _post_block_proposal(self, request: web.Request) -> web.Response:
block_data = await request.json()
block = from_formatted_dict(block_data, BeaconBlock)
self.logger.info(
"broadcasting block with root %s", humanize_hash(block.hash_tree_root)
)
# TODO the actual brodcast
return web.Response()
@get(APIEndpoint.attestation)
async def _get_attestation(self, request: web.Request) -> web.Response:
# _public_key = BLSPubkey(decode_hex(request.query["validator_pubkey"]))
slot = Slot(int(request.query["slot"]))
committee_index = CommitteeIndex(int(request.query["committee_index"]))
attestation = Attestation.create(
aggregation_bits=Bitfield([True, False, False]),
data=AttestationData.create(index=committee_index, slot=slot),
)
return web.json_response(to_formatted_dict(attestation))
@post(APIEndpoint.attestation)
async def _post_attestation(self, request: web.Request) -> web.Response:
attestation_data = await request.json()
attestation = from_formatted_dict(attestation_data, Attestation)
self.logger.info(
"broadcasting attestation with root %s",
humanize_hash(attestation.hash_tree_root),
)
# TODO the actual brodcast
return web.Response()
|
StarcoderdataPython
|
185896
|
<filename>models/models.py
from __future__ import print_function
def create_model(opt):
if opt.model == 'sr_resnet':
from .sr_resnet_model import SRResNetModel
model = SRResNetModel()
elif opt.model == 'sr_resnet_test':
from .sr_resnet_test_model import SRResNetTestModel
model = SRResNetTestModel()
elif opt.model == 'sr_gan':
from .sr_gan_model import SRGANModel
model = SRGANModel()
else:
raise NotImplementedError('Model [%s] not recognized.' % opt.model)
model.initialize(opt)
print('Model [%s] is created.' % model.name())
return model
|
StarcoderdataPython
|
3220855
|
<filename>Beginnings.py
'''
* * *
Sheet music generator using ABC notation
* * *
'''
# The bread and butter of proc-gen
import random as r
# Some basic values related to ABC:
# Song number
X='1' # First song, increase this to make an opus
# Title
T="Beginnings" # Song title
# Key
keys=['C','C# ','D','D# ','E','F','F# ','G','G# ','A','A# ','B']
K=r.choice(keys) # Random key, they're all the same mostly
K='C'
# and will only be used at the end for transposition.
scales=['Penta','Major','Minor','Dev']
# Dev is my personal scale that drops the septet altogether, because I dislike the B sound in C
scalesDic={
'Penta':[1,4,6,8,11,13,16,18,20,23,-1],
'Major':[1,3,4,6,8,9,11,13,15,16,18,20,21,23,-1],
'Minor':[1,3,4,6,7,9,11,13,15,16,18,19,21,23,-1],
'Dev':[1,4,6,8,9,11,13,16,18,20,21,23,-1]
}
# 'BC'D'EF'G'A'BC
scale=r.choice(scales)
# Meter
M=4 # Beats per bar
M2=4 # note that is said beat (inverted)
# Tempo/rate
Q=180 # 3 beats per minute
# Default note length
L=1 # eighths
L2=8
# Voice
V=3 # Number of voices, the main, the base, and the arp (not used)
# These are important in some way. The variables are what makes ABC
# But there are also a few non-standard variables that'll help
instrument='80' # Square wave
sequence=1 # track 1
# Make the basic output
output='X:'+X+'\nT:'+T+'\nK:'+K+'\nM:'+str(M)+'/'+str(M2)+'\nQ:'+str(Q)+'\nL:'+str(L)+'/'+str(L2)+'\n%%MIDI instrument%%:'+instrument+'\nV:1\n'
# All these variables are set differently in the ABC, but that'll sort itself out.
# Introducing the brand new, all improved, 2.0 version of chord progressions
# Featuring your all time favorites such as majors, minors, 7ths and power chords
# palette=['C','D_','D','E_','E','F','G_','G','A_','A','B_','B']
flavor=['','m','7','M7','dim','aug','-5','sus4','sus2']
flavorDic={'':[0,4,7],'m':[0,3,7],'7':[0,4,7,10],'M7':[0,4,7,11],'dim':[0,3,6],'aug':[0,4,8],'-5':[0,7],'sus4':[0,6,7],'sus2':[0,3,7]}
# The concept here is to randomly generate a sequence of
# - Notes, represented by a-g,A-G
# - Note lengths, represented by numbers after the note
# - Chords in the second track
# Then end it all with the end token, which looks like |]
# all the valid notes in a neat 5x5 grid
notes=['_A,','A,' ,'_B,','B,','C',
'_D','D' ,'_E' ,'E' ,'F',
'_G','G' ,'_A' ,'A' ,'_B',
'B' ,'c,','_d' ,'d' ,'_e',
'e' ,'f' ,'_g' ,'g' ,'z']
# A routine that generates a bar
def bar():
# The result list where everything is added to
result=[]
# A counter that counts the number of notes in a bar
# It comes from M/L; in standard time with eighths, that's (4/4)/(1/8)=8
x=(M/M2)/(L/L2)
while(x>0):
# Pick a random note from the chosen scale
note=notes[r.choice(scalesDic[scale])]
# This is to make sure the note fits the bar
fitting=False
# Pick notes until something fits.
while(not fitting):
# Random note length, because only eighths is boring as all fuck.
notelength=int(abs(r.gauss(0,1.5))+1)
# Check to see if the note fits the bar.
if(notelength<=x):
# It does.
fitting=True
# If the bar isn't full yet, add the note.
if(notelength>1):
# If the note is actually longer than an eighth, it needs a number.
result.append(note+str(notelength))
else:
result.append(note)
# Another bit of the bar filled.
x-=notelength
# The bar is complete.
return result
# A routine that makes a song
def songify():
result=[]
# x bars per song
for k in range(90):
result.append(bar())
return result
# The bass consists of the first actual note of the bar and is made fun of.
# If no such note exists, because the whole bar is rests, send a C (might change this later on).
def bass(note):
# Output string of the routine
result=''
# These are the note lengths, 8=whole, 1=eights, you do the math.
lengths=[8,4,2,1]
# Choose a length
length=r.choice(lengths)
length=8
# This next bit is pretty blatantly proc-gen, literally random stuff built upon the previous random step.
if length!=8:
# Random boolean in Python is fun
alternating=bool(int(r.randint(0,1))) # Generate a random number, turn int into boolean
if alternating:
# Make a new note that's one octave higher or lower
altNotes=[note+'\'',note+',']
altNote=r.choice(altNotes)
# Fill up the bar
flag=True
for x in range(int(8/length)):
if flag:
# Add the input node to the output string, which is one bar's worth of bass note
result+=note+str(length)
# This routine is called every bar, so it's possible one bar has a whole note, then 8 alternating, then 4 non-alternating
if alternating:
# toggle the flag
flag=False
else:
# Add the alternated note instead.
result+=altNote+str(length)
flag=True
else:
result+=note+'8'
return result
# The bass writing routine
def bassify(songNotes):
result=[]
# Which is the first note of the bar
for x in songNotes:
# Unless it's a rest
for y in x:
# This is for edge cases where there is no note on the bar, it'll default to C
note2='C'
# A note is a name, a pitch modifier and a length (the latter is optional), if the name is 'z' it's a rest
if not y[0]=='z':
# I need the name, but the length is going to fit the whole bar eventually
# Check for accidentals
if y[0]=='_':
note2=bass(y[0]+y[1].upper())
else:
note2=bass(y[0].upper())
# A note is found, break the loop
break
# Whether we broke out the loop or found no note, add the note2 to the staff.
result.append(note2)
return result
# Every bar has a chord, which will be used for the arp and is based on the bass note. Eventually it can be replaced with an actual chord
def progression(note,length):
# Uppercase the note name, so it won't ever go out of bounds.
note=note.upper()
# The result string
result=''
# Choose a flavor
chordflavor=''
# The synergy of a dictionary and its index list is not to be understated
chord=flavorDic[chordflavor]
# Now it needs to be resolved to the 2 to 4 notes
# For playback purposes of course, but also for the arp
up=r.choice([True,False])
# Separate activity based on the chord length
# Length 2 should flip between the two notes, possibly with extra octave support later.
for x in range(length*2):
if len(chord)==2:
# Length 1 means the whole arp takes 1/1 note, write / for half time
# Length 4 means the whole arp takes 1/4 note, write /4 for eighth time
for y in chord:
result+=notes[y+notes.index(note)]+'/'+str(length)+' '
for y in chord:
result+=notes[y+notes.index(note)]+'/'+str(length)+' '
# Length 3 should oscillate between the notes.
if len(chord)==3:
# Oscillation goes one longer than the actual chord, doing the 2nd note again at the end.
for y in range(len(chord)+1):
if y>2:
result+=notes[chord[1]+notes.index(note)]+'/'+str(length)+' '
else:
result+=notes[chord[y]+notes.index(note)]+'/'+str(length)+' '
# Length 4 should go up or down the notes.
if len(chord)==4:
if up:
for y in chord:
result+=notes[y+notes.index(note)]+'/'+str(length)+' '
else:
for y in range(len(chord)):
result+=notes[chord[len(chord)-y-1]+notes.index(note)]+'/'+str(length)+' '
return result
# The arp writing routine
def progressify(bassNotes):
result=[]
# Choose a length, which is actually going to be really small and should be constant
length=r.choice([1,2])
# Take a bar from the bass notes
for i in bassNotes:
# Put each first note of it through the wrangler
# Since it's already sorted, bass notes is easier than sorting out the actual notes again.
if i[0]=='_':
result.append(progression((i[0]+i[1]),length))
else:
result.append(progression(i[0],length))
return result
'''Here be actual main'''
# Generate a song
# The notes
songNotes=songify()
# This is for outputting the song notes
for i in range(18):
for j in range(5):
for k in songNotes[j+i*5]:
# Add each bar of the note list to the output
output+=k
# And put a | delimiter at the end of it.
output+='|'
# End of the line'
if i*j<17*4:
output+='\n'
# voice end
output+=']\n\n'
# For my next trick, I'll do the bass notes, which is the second voice
output+='V:2\n'
bassNotes=bassify(songNotes)
# This is for outputting the bass notes
for i in range(18):
for j in range(5):
for k in bassNotes[j+i*5]:
# Add each bar of the note list to the output
output+=k # This can be changed so instead it outputs alternating octaved notes in half, quarter, or eighths, which should be a def
# And put a | delimiter at the end of it.
output+='|'
# End of the line, unless end of song
if i*j<17*4:
output+='\n'
# voice end
output+=']\n\n'
# For my last trick, I'll do an arp
output+='V:3\n'
progNotes=progressify(bassNotes)
for i in range(18):
for j in range(5):
for k in progNotes[j+i*5]:
# Add each bar of the note list to the output
output+=k # This can be changed so instead it outputs alternating octaved notes in half, quarter, or eighths, which should be a def
# And put a | delimiter at the end of it.
output+='|'
# End of the line, unless end of song
if i*j<17*4:
output+='\n'
# voice end
output+=']\n\n'
fileStream=open(T+'.abc','w')
fileStream.write(output)
fileStream.close()
'''todo: actual chord progression
the reason it's not here is because this version bases itself on the notes, not the chords
i.e. it makes the notes first, then the chords from that.
Other ways of making a song proc-gen is starting with drums, a base note (i.e. not chords), or just a function for the pitch (e.g. sine)'''
# output is now a genuine, bonafide ABC file, write it to a .abc file and it will work.
# That's not the purpose, so instead parse it with the
import music21 as mu
# package for further processing
song=mu.converter.parse(output)
# one such process is transposing according to the key.
transpose={'C':0,'C# ':1,'D':2,'D# ':3,'E':4,'F':5,'F# ':6,'G':-5,'G# ':-4,'A':-3,'A# ':-2,'B':-1}
# This is where the key is being actually used.
song.transpose(transpose[K])
# print(song)
song.show()
# Now everything works.
|
StarcoderdataPython
|
3315792
|
<filename>lanedetect.py
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
from lanedetect_helpers import process_image
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def lane_detect_images():
test_data_dir = "test_images/"
# Read Test images
test_images = os.listdir(test_data_dir)
for test_image in test_images:
image = mpimg.imread(os.path.join(test_data_dir, test_image))
final_image = process_image(image)
def lane_detect_videos():
test_data_dir = "test_videos/"
video_out_dir = "test_videos_output/"
test_videos = os.listdir(test_data_dir)
for test_video in test_videos:
test_video_input = os.path.join(test_data_dir, test_video)
test_video_output = os.path.join(video_out_dir, test_video)
video_clip = VideoFileClip(test_video_input)
video_frame = video_clip.fl_image(process_image)
video_frame.write_videofile(test_video_output, audio=False)
if __name__ =="__main__":
lane_detect_images()
#lane_detect_videos()
|
StarcoderdataPython
|
3258953
|
from datetime import date
import factory
from factory import fuzzy
from dataactcore.models import domainModels
class SF133Factory(factory.Factory):
class Meta:
model = domainModels.SF133
sf133_id = None
agency_identifier = fuzzy.FuzzyText()
allocation_transfer_agency = fuzzy.FuzzyText()
availability_type_code = fuzzy.FuzzyText()
beginning_period_of_availa = fuzzy.FuzzyText()
ending_period_of_availabil = fuzzy.FuzzyText()
main_account_code = fuzzy.FuzzyText()
sub_account_code = fuzzy.FuzzyText()
tas = fuzzy.FuzzyText()
fiscal_year = fuzzy.FuzzyInteger(2010, 2040)
period = fuzzy.FuzzyInteger(1, 12)
line = fuzzy.FuzzyInteger(1, 9999)
amount = 0
class CGACFactory(factory.Factory):
class Meta:
model = domainModels.CGAC
cgac_id = None
cgac_code = fuzzy.FuzzyText()
agency_name = fuzzy.FuzzyText()
class FRECFactory(factory.Factory):
class Meta:
model = domainModels.FREC
frec_id = None
frec_code = fuzzy.FuzzyText()
agency_name = fuzzy.FuzzyText()
cgac = factory.SubFactory(CGACFactory)
class TASFactory(factory.Factory):
class Meta:
model = domainModels.TASLookup
tas_id = None
account_num = fuzzy.FuzzyInteger(1, 9999)
allocation_transfer_agency = fuzzy.FuzzyText()
agency_identifier = fuzzy.FuzzyText()
beginning_period_of_availa = fuzzy.FuzzyText()
ending_period_of_availabil = fuzzy.FuzzyText()
availability_type_code = fuzzy.FuzzyText()
fr_entity_description = fuzzy.FuzzyText()
fr_entity_type = fuzzy.FuzzyText()
main_account_code = fuzzy.FuzzyText()
sub_account_code = fuzzy.FuzzyText()
internal_start_date = fuzzy.FuzzyDate(date(2015, 1, 1), date(2015, 12, 31))
internal_end_date = None
class ProgramActivityFactory(factory.Factory):
class Meta:
model = domainModels.ProgramActivity
program_activity_id = None
fiscal_year_quarter = fuzzy.FuzzyText()
agency_id = fuzzy.FuzzyText()
allocation_transfer_id = fuzzy.FuzzyText()
account_number = fuzzy.FuzzyText()
program_activity_code = fuzzy.FuzzyText()
program_activity_name = fuzzy.FuzzyText()
class ObjectClassFactory(factory.Factory):
class Meta:
model = domainModels.ObjectClass
object_class_id = None
object_class_code = fuzzy.FuzzyText()
object_class_name = fuzzy.FuzzyText()
class CFDAProgramFactory(factory.Factory):
class Meta:
model = domainModels.CFDAProgram
cfda_program_id = None
program_number = fuzzy.FuzzyDecimal(0, 99, 3)
program_title = fuzzy.FuzzyText()
popular_name = fuzzy.FuzzyText()
federal_agency = fuzzy.FuzzyText()
authorization = fuzzy.FuzzyText()
objectives = fuzzy.FuzzyText()
types_of_assistance = fuzzy.FuzzyText()
uses_and_use_restrictions = fuzzy.FuzzyText()
applicant_eligibility = fuzzy.FuzzyText()
beneficiary_eligibility = fuzzy.FuzzyText()
credentials_documentation = fuzzy.FuzzyText()
preapplication_coordination = fuzzy.FuzzyText()
application_procedures = fuzzy.FuzzyText()
award_procedure = fuzzy.FuzzyText()
deadlines = fuzzy.FuzzyText()
range_of_approval_disapproval_time = fuzzy.FuzzyText()
website_address = fuzzy.FuzzyText()
formula_and_matching_requirements = fuzzy.FuzzyText()
length_and_time_phasing_of_assistance = fuzzy.FuzzyText()
reports = fuzzy.FuzzyText()
audits = fuzzy.FuzzyText()
records = fuzzy.FuzzyText()
account_identification = fuzzy.FuzzyText()
obligations = fuzzy.FuzzyText()
range_and_average_of_financial_assistance = fuzzy.FuzzyText()
appeals = fuzzy.FuzzyText()
renewals = fuzzy.FuzzyText()
program_accomplishments = fuzzy.FuzzyText()
regulations_guidelines_and_literature = fuzzy.FuzzyText()
regional_or_local_office = fuzzy.FuzzyText()
headquarters_office = fuzzy.FuzzyText()
related_programs = fuzzy.FuzzyText()
examples_of_funded_projects = fuzzy.FuzzyText()
criteria_for_selecting_proposals = fuzzy.FuzzyText()
url = fuzzy.FuzzyText()
recovery = fuzzy.FuzzyText()
omb_agency_code = fuzzy.FuzzyText()
omb_bureau_code = fuzzy.FuzzyText()
published_date = fuzzy.FuzzyText()
archived_date = fuzzy.FuzzyText()
class ZipsFactory(factory.Factory):
class Meta:
model = domainModels.Zips
zips_id = None
zip5 = fuzzy.FuzzyText()
zip_last4 = fuzzy.FuzzyText()
state_abbreviation = fuzzy.FuzzyText()
county_number = fuzzy.FuzzyText()
congressional_district_no = fuzzy.FuzzyText()
class SubTierAgencyFactory(factory.Factory):
class Meta:
model = domainModels.SubTierAgency
sub_tier_agency_id = None
sub_tier_agency_code = fuzzy.FuzzyText()
sub_tier_agency_name = fuzzy.FuzzyText()
cgac = factory.SubFactory(CGACFactory)
frec = factory.SubFactory(FRECFactory)
priority = fuzzy.FuzzyInteger(1, 2)
class OfficeFactory(factory.Factory):
class Meta:
model = domainModels.Office
office_id = None
office_code = fuzzy.FuzzyText()
office_name = fuzzy.FuzzyText()
sub_tier_code = fuzzy.FuzzyText()
agency_code = fuzzy.FuzzyText()
contract_awards_office = fuzzy.FuzzyChoice((False, True))
contract_funding_office = fuzzy.FuzzyChoice((False, True))
financial_assistance_awards_office = fuzzy.FuzzyChoice((False, True))
financial_assistance_funding_office = fuzzy.FuzzyChoice((False, True))
class StatesFactory(factory.Factory):
class Meta:
model = domainModels.States
states_id = None
state_code = fuzzy.FuzzyText()
state_name = fuzzy.FuzzyText()
class CountyCodeFactory(factory.Factory):
class Meta:
model = domainModels.CountyCode
county_code_id = None
county_number = fuzzy.FuzzyText()
county_name = fuzzy.FuzzyText()
state_code = fuzzy.FuzzyText()
class CityCodeFactory(factory.Factory):
class Meta:
model = domainModels.CityCode
city_code_id = None
feature_name = fuzzy.FuzzyText()
feature_class = fuzzy.FuzzyText()
city_code = fuzzy.FuzzyText()
state_code = fuzzy.FuzzyText()
county_number = fuzzy.FuzzyText()
county_name = fuzzy.FuzzyText()
latitude = fuzzy.FuzzyText()
longitude = fuzzy.FuzzyText()
class ZipCityFactory(factory.Factory):
class Meta:
model = domainModels.ZipCity
zip_city_id = None
zip_code = fuzzy.FuzzyText()
city_name = fuzzy.FuzzyText()
class CountryCodeFactory(factory.Factory):
class Meta:
model = domainModels.CountryCode
country_code_id = None
country_code = fuzzy.FuzzyText()
country_name = fuzzy.FuzzyText()
class DunsFactory(factory.Factory):
class Meta:
model = domainModels.DUNS
duns_id = None
awardee_or_recipient_uniqu = fuzzy.FuzzyText()
legal_business_name = fuzzy.FuzzyText()
dba_name = fuzzy.FuzzyText()
activation_date = fuzzy.FuzzyDate(date(2000, 1, 1), date(2020, 12, 31))
deactivation_date = fuzzy.FuzzyDate(date(2000, 1, 1), date(2020, 12, 31))
registration_date = fuzzy.FuzzyDate(date(2000, 1, 1), date(2020, 12, 31))
expiration_date = fuzzy.FuzzyDate(date(2000, 1, 1), date(2020, 12, 31))
last_sam_mod_date = fuzzy.FuzzyDate(date(2000, 1, 1), date(2020, 12, 31))
address_line_1 = fuzzy.FuzzyText()
address_line_2 = fuzzy.FuzzyText()
city = fuzzy.FuzzyText()
state = fuzzy.FuzzyText()
zip = fuzzy.FuzzyText()
zip4 = fuzzy.FuzzyText()
country_code = fuzzy.FuzzyText()
congressional_district = fuzzy.FuzzyText()
entity_structure = fuzzy.FuzzyText()
business_types_codes = ['A', 'B', 'C']
ultimate_parent_unique_ide = fuzzy.FuzzyText()
ultimate_parent_legal_enti = fuzzy.FuzzyText()
high_comp_officer1_full_na = fuzzy.FuzzyText()
high_comp_officer1_amount = fuzzy.FuzzyText()
high_comp_officer2_full_na = fuzzy.FuzzyText()
high_comp_officer2_amount = fuzzy.FuzzyText()
high_comp_officer3_full_na = fuzzy.FuzzyText()
high_comp_officer3_amount = fuzzy.FuzzyText()
high_comp_officer4_full_na = fuzzy.FuzzyText()
high_comp_officer4_amount = fuzzy.FuzzyText()
high_comp_officer5_full_na = fuzzy.FuzzyText()
high_comp_officer5_amount = fuzzy.FuzzyText()
last_exec_comp_mod_date = fuzzy.FuzzyDate(date(2000, 1, 1), date(2020, 12, 31))
class StateCongressionalFactory(factory.Factory):
class Meta:
model = domainModels.StateCongressional
state_congressional_id = None
state_code = fuzzy.FuzzyText()
congressional_district_no = fuzzy.FuzzyText()
census_year = fuzzy.FuzzyInteger(1990, 2040)
|
StarcoderdataPython
|
1722212
|
'''
LANGUAGE: Python
AUTHOR: <NAME>
GITHUB: https://github.com/Chandra-Sekhar-Bala
'''
print('Hello, World!')
|
StarcoderdataPython
|
80907
|
import math
opposite = int(input("Enter the opposite side: "))
adjacent = int(input("Enter the opposite side: "))
hypotenuse = math.sqrt(math.pow(opposite, 2) + math.pow(adjacent, 2))
print(f'hypotenuse = {hypotenuse}')
|
StarcoderdataPython
|
1739491
|
<filename>mixpyBuild/FileDialog.py
from tkinter import *
import tkinter.filedialog
def getOneFile():
fn = tkinter.filedialog.askopenfilename()
return fn
def getManyFiles():
files = tkinter.filedialog.askopenfilenames()
if files:
ofiles = []
for filename in files:
ofiles.append(filename)
return ofiles
def getDirectory():
dr = tkinter.filedialog.askdirectory()
return dr
|
StarcoderdataPython
|
1743395
|
<reponame>crowdbotics-apps/nccaa-rfp-33947<filename>backend/inquiry/apps.py<gh_stars>0
from django.apps import AppConfig
class InquiryConfig(AppConfig):
name = 'inquiry'
|
StarcoderdataPython
|
3573
|
<reponame>startupgrind/mezzanine
__version__ = "4.3.1.post1"
|
StarcoderdataPython
|
3379911
|
"""
This module provides functions for time evolution of a state given as MPS,
MPO or PMPS via the tMPS algorithm.
This is based on functions which calculate the time evolution of an operator in
MPS, MPO or PMPS form from Hamiltonians acting on every single and every two
adjacent sites.
tMPS is a method to evolve a one dimensional quantum state, represented
as an MPS, MPO or PMPS, in time. It requires that the Hamiltonian is only
comprised of terms acting on single sites or pairs of adjacent sites of
the state. This allows the Hamiltonian :math:`H` to be written as
.. math::
H = \\sum_j h_{j, j+1},
where :math:`j` is the index of the respective site in the state.
These components can be grouped into those acting on even and those acting
on odd sites, leading to a time evolution operator
.. math::
U(\\tau) = \\text{e}^{\mathrm{i}(H_{\\text{even}}+H_{\\text{
odd}})\\tau},
with
.. math::
H_{\\text{even}} = \\sum_{j\\text{ even}} h_{j, j+1}
.. math::
H_{\\text{odd}} = \\sum_{j\\text{ odd}} h_{j, j+1}
This allows to perform Trotter-Suzuki decompositions of :math:`U`,
for example of second order:
.. math::
U(\\tau) = \\text{e}^{\mathrm{i} H_{\\text{odd}} \\tau/2} \\text{e}^{\mathrm{i}
H_{\\text{even}} \\tau} \\text{e}^{\mathrm{i} H_{\\text{odd}} \\tau/2}
+ \\mathcal{O}(\\tau^3).
These decompositions provide the advantage that :math:`U` does not need to
be calculated as a whole matrix, which could potentially become way too
big. Since the elements within :math:`H_{\\text{even}}` and those within
:math:`H_{\\text{odd}}` commute, :math:`U` can be broken up into smaller pieces
which a computer can handle even for very large systems.
For more information, see chapter 7 in Schollwöck’s paper Annals of
Physics 326, 96-192 (2011); doi: 10.1016/j.aop.2010.09.012
In this file, :func:`evolve` is the main function to be called to evolve a
state in time. It will itself call :func:`_trotter_slice` which will call
:func:`_trotter_two` or :func:`_trotter_four` to calculate the :math:`U(
\\tau)` representing one Trotter slice. When that is done, :func:`evolve`
will take it and pass it on to :func:`_time_evolution` which will then go
through the Trotter iterations, thus actually evolving the state in time,
and store the requested results on the way.
"""
from collections import Counter
from itertools import repeat
import numpy as np
from scipy.linalg import expm
import mpnum as mp
def _get_subsystems_list(subsystems, len_step_numbers):
"""
This function just brings subsystems, which indicates which subsystem
should be returned at the respective step number, in the right form.
Args:
subsystems (list):
Same as that described in :func:`evolve`
len_step_numbers (int):
The length of the array containing the step numbers for which the
evolved state is to be stored.
Returns:
list[list[int]]:
Sites for which the subsystem should be returned at the
respective time,
"""
if type(subsystems[0]) != list:
if len(subsystems) != 2:
raise ValueError("Subsystems must be of the form [a, b] or [[a1, "
"a2, ...], [b1, b2, ...]].")
subsystems = [subsystems] * len_step_numbers
return subsystems
def _times_to_steps(ts, num_trotter_slices):
"""
Based on the requested times `ts`, calculate Trotter step numbers at which
(subsystems of) evolved states need to be saved.
.. doctest::
>>> _times_to_steps([10, 25, 30], 100)
([33, 83, 100], 0.3)
>>> _times_to_steps([8, 26, 19], 80)
([25, 80, 58], 0.325)
Args:
ts (list[float]):
The times for which the evolution should be computed and the
state of the full system or a subsystem returned (i.e. it's reduced
density matrix (for now only works with method='mpo'. If the
method is not mpo, omit subsystems)). The algorithm will
calculate the evolution using the given number of Trotter steps
for the largest number in ts. On the way there it will store the
evolved states for smaller times.
NB: Beware of memory overload since len(t)
mpnum.MPArrays will be stored
num_trotter_slices (int): Number of Trotter slices to be used for the
largest t in ts.
Returns:
tuple[list[int], float]: step numbers, tau = maximal t /
num_trotter_slices
"""
tau = max(ts) / num_trotter_slices
step_numbers = [int(round(t / tau)) for t in ts]
return step_numbers, tau
def _trotter_slice(hamiltonians, tau, num_sites, trotter_order, compr):
"""
Get a list of ordered operator exponentials for one Trotter slice.
The Trotter-Suzuki formula approximates the time-evolution during a single
Trotter slice
.. math::
U(\\tau) = \\text{e}^{\\mathrm{i}\\sum_{j=1}^m H_j \\tau},
with
.. math::
{U}^\\prime(\\tau) =\\prod_{p=1}^N U_p,
which is a product of :math:`N` operator exponentials
.. math::
U_p := {\\text{e}^{H_{j_p}\\tau_p}}
of :math:`H_j`. Here :math:`\\{\\tau_p\\}` is a sequence of real numbers
such that :math:`\\sum_p \\tau_p = \\tau`. The :math:`H_{j_p}` for a
certain :math:`p` are all elements of the Hamiltonian acting either on even
or on odd pairs of adjacent sites. This ensures that within one :math:`U_p`
all terms in the exponential commute.
This function returns the list of operators :math:`U_p` as MPOs.
For more information on Trotter-Suzuki, see chapter 7 in Schollwöck's paper
Annals of Physics 326, 96-192 (2011); doi: 10.1016/j.aop.2010.09.012.
Args:
hamiltonians (list):
List of two lists of Hamiltonians, the Hamiltonians in the first
acting on every single site, the Hamiltonians in the second acting
on every pair of two adjacent sites
tau (float):
As defined in :func:`_times_to_steps`
num_sites (int):
Number of sites of the state to be evolved
trotter_order (int):
Order of Trotter-Suzuki decomposition to be used
compr (dict): Parameters for the compression which is executed on every
MPA during the calculations, except for the Trotter calculation
where trotter_compr is used
Returns:
list[mpnum.MPArray]:
The time evolution operator parts, which, applied one after
another, give one Trotter slice
"""
if trotter_order == 2:
return _trotter_two(hamiltonians, tau, num_sites, compr)
if trotter_order == 4:
return _trotter_four(hamiltonians, tau, num_sites, compr)
else:
raise ValueError("Trotter order " + str(trotter_order) +
" is currently not implemented.")
def _trotter_two(hamiltonians, tau, num_sites, compr):
"""
Get a list of ordered operator exponentials for one second-order Trotter
slice.
Based on the description in the documentation of :func:`_trotter_slice`
and on the paper by Schollwöck, :math:`N` = 3, with :math:`\\tau_1
=\\tau_3 = \\tau/2` and :math:`\\tau_2=\\tau`.
Args:
hamiltonians (list):
List of two lists of Hamiltonians, the Hamiltonians in the first
acting on every single site, the Hamiltonians in the second acting
on every pair of two adjacent sites
tau (float):
As defined in :func:`_times_to_steps`
num_sites (int):
Number of sites of the state to be evolved
compr (dict): Parameters for the compression which is executed on every
MPA during the calculations, except for the Trotter calculation
where trotter_compr is used
Returns:
list[mpnum.MPArray]:
The time evolution operator parts, which, applied one after
another, give one Trotter slice
"""
h_single, h_adjacent = _get_h_list(hamiltonians=hamiltonians,
num_sites=num_sites)
dims = [len(h_single[i]) for i in range(len(h_single))]
u_odd_list = _get_u_list_odd(dims, h_single, h_adjacent, tau=tau / 2)
u_even_list = _get_u_list_even(dims, h_single, h_adjacent, tau=tau)
u_odd = _u_list_to_mpo_odd(dims, u_odd_list, compr)
u_even = _u_list_to_mpo_even(dims, u_even_list, compr)
return [u_odd, u_even, u_odd]
def _trotter_four(hamiltonians, tau, num_sites, compr):
"""
Get a list of ordered operator exponentials for one fourth-order Trotter
slice.
Based on the description in the documentation of :func:`_trotter_slice`
and on the paper by Schollwöck, :math:`N` = 11, with
.. math::
\\tau_1 = \\tau_{11} = \\frac{\\tau}{2(4 - 4^{1/3})},
.. math::
\\tau_2 = \\tau_3 = \\tau_4 = \\tau_8 = \\tau_9 = \\tau_{10} =
\\frac{\\tau}{4 - 4^{1 / 3}},
.. math::
\\tau_5 = \\tau_7 = \\frac{\\tau (1 - 3)}{2(4 - 4^{1 / 3})},
and
.. math::
\\tau_6 = \\frac{\\tau (1 - 4)}{4 - 4^{1 / 3}}.
Args:
hamiltonians (list):
List of two lists of Hamiltonians, the Hamiltonians in the first
acting on every single site, the Hamiltonians in the second acting
on every pair of two adjacent sites
tau (float):
As defined in :func:`_times_to_steps`
num_sites (int):
Number of sites of the state to be evolved
compr (dict):
Parameters for the compression which is executed on every MPA during
the calculations, except for the Trotter calculation where
trotter_compr is used
Returns:
list[mpnum.MPArray]:
The time evolution operator parts, which, applied one after another,
give one Trotter slice
"""
taus_for_odd = [tau * .5 / (4 - 4 ** (1 / 3)),
tau / (4 - 4 ** (1 / 3)),
tau * .5 * (1 - 3 / (4 - 4 ** (1 / 3)))]
taus_for_even = [tau / (4 - 4 ** (1 / 3)),
tau * (1 - 4 / (4 - 4 ** (1 / 3)))]
h_single, h_adjacent = _get_h_list(hamiltonians=hamiltonians,
num_sites=num_sites)
dims = [len(h_single[i]) for i in range(len(h_single))]
u_odd_lists = [_get_u_list_odd(dims, h_single, h_adjacent, t) for t in
taus_for_odd]
u_even_lists = [_get_u_list_even(dims, h_single, h_adjacent, t) for t in
taus_for_even]
multiplication_order = [0, 0, 1, 0, 2, 1, 2, 0, 1, 0, 0]
us = []
for i in range(11):
if i % 2 == 1:
us = us + [_u_list_to_mpo_even(dims,
u_even_lists[
multiplication_order[i]],
compr)]
else:
us = us + [_u_list_to_mpo_odd(
dims, u_odd_lists[multiplication_order[i]], compr)]
return us
def _get_h_list(hamiltonians, num_sites):
"""
Convert given list of Hamiltonians into form suitable for exponentiation.
If only one Hamiltonian acting on every single site and one acting on every
two adjacent sites is given, transform it into the form returned. If not,
check whether the lengths of the lists match the number of sites.
Args:
hamiltonians (list):
Hamiltonians as in :func:`evolve`
num_sites (int):
Number of sites of the state to be evolved
Returns:
list[list[numpy.ndarray], list[numpy.ndarray]]:
A list with two items: The first is a list of Hamiltonians acting
on the single sites, like [h1, h2, h3, ...] and the second is a list
of Hamiltonians acting on each two adjacent sites, like [h12, h23,
h34, ...]
"""
if type(hamiltonians[0]) is not list:
hamiltonians = [list(repeat(hamiltonians[0], num_sites)),
list(repeat(hamiltonians[1], num_sites - 1))]
elif (len(hamiltonians[0]) != num_sites) or (
len(hamiltonians[1]) != num_sites - 1):
raise ValueError(
"Number of given Hamiltonians does not match number of sites")
return hamiltonians[0], hamiltonians[1]
def _get_u_list_odd(dims, h_single, h_adjacent, tau):
"""
Calculates individual operator exponentials of adjacent odd-even sites,
i.e. transforms :math:`\\{h_{j, j+1} : j \\text{ odd}\\}` into :math:`\\{
\\text{e}^{\\mathrm{i} h_{j,j+1} \\tau} : j \\text{ odd}\\}`
.. doctest::
>>> dims = [2, 2, 2, 2]
>>> sx = np.array([[0, 1], [1, 0]])
>>> sz = np.array([[1, 0], [0, -1]])
>>> tau = 1
>>> actual_result = _get_u_list_odd(dims, [sx] * 4, [np.kron(sz, sz)] * 3, tau)
>>> expected_result = [expm(-1j * tau * (np.kron(sz,sz) + .5 * (np.kron(sx, np.identity(2)) + np.kron(np.identity(2), sx))))] * 2
>>> print(np.array_equal(expected_result, actual_result))
True
Args:
dims (list):
The dimensions of the single sites of the state U will be applied to
h_single (list):
The Hamiltonians acting on every single site
h_adjacent (list):
The Hamiltonians acting on every two adjacent sites
tau (float):
The time step for the time evolution of U
Returns:
list[numpy.ndarray]:
List of operators acting on odd adjacent sites, like [u12, u34, ...]
"""
h_2sites = [
1 / 2 * (np.kron(h_single[i], np.identity(dims[i + 1])) +
np.kron(np.identity(dims[i]), h_single[i + 1]))
for i in range(0, len(h_single) - 1, 2)]
u_odd = list(expm(-1j * tau * (h + h_2sites[i]))
for i, h in enumerate(h_adjacent[::2]))
if len(dims) % 2 == 1:
u_odd = u_odd + [expm(-1j * tau * h_single[-1] / 2)]
return u_odd
def _get_u_list_even(dims, h_single, h_adjacent, tau):
"""
Calculates individual operator exponentials of adjacent even-odd sites,
i.e. transforms :math:`\\{h_{j,j+1} : j \\text{ even}\\}` into :math:`\\{
\\text{e}^{\\mathrm{i} h_{j,j+1} \\tau} : j \\text{ even}\\}`
Args:
dims (list):
The dimensions of the single sites of the state U will be applied to
h_single (list):
The Hamiltonians acting on every single site
h_adjacent (list):
The Hamiltonians acting on every two adjacent sites
tau (float):
The time step for the time evolution of U
Returns:
list[numpy.ndarray]:
List of operators acting on even adjacent sites, like
[u23, u45, ...]
"""
h_2sites = [
1 / 2 * (np.kron(h_single[i], np.identity(dims[i + 1])) +
np.kron(np.identity(dims[i]), h_single[i + 1]))
for i in range(1, len(h_single) - 1, 2)]
u_even = list(expm(-1j * tau * (h + h_2sites[i])) for i, h in
enumerate(h_adjacent[1::2]))
u_even = [expm(-1j * tau * h_single[0] / 2)] + u_even
if len(dims) % 2 == 0:
u_even = u_even + [expm(-1j * tau * h_single[-1] / 2)]
return u_even
def _u_list_to_mpo_odd(dims, u_odd, compr):
"""
Transforms list of matrices on odd-even sites to MPO acting on full
state. So the list of u_odd :math:`\\{u_{j,j+1} : j \\text{ odd}\\}`,
which are ``numpy.ndarrays``, is transformed into :math:`\\bigotimes_j
u_{j,j+1} : j \\text{ odd}` of the type ``mpnum.MPArray``.
Args:
dims (list):
List of dimensions of each site
u_odd (list):
List of time evolution operators acting on odd adjacent sites
compr (dict):
Parameters for the compression which is executed on every MPA during
the calculations, except for the Trotter calculation where
trotter_compr is used
Returns:
mpnum.MPArray:
The MPO for the full state acting on odd-even adjacent sites
"""
if len(dims) % 2 == 1:
last_h = u_odd[-1]
u_odd = u_odd[:-1]
odd = mp.chain(matrix_to_mpo(
u, [[dims[2 * i]] * 2, [dims[2 * i + 1]] * 2], compr)
for i, u in enumerate(u_odd))
if len(dims) % 2 == 1:
odd = mp.chain([odd, matrix_to_mpo(last_h, [[dims[-1]] * 2], compr)])
return odd
def _u_list_to_mpo_even(dims, u_even, compr):
"""
Transforms list of matrices on even-odd sites to MPO acting on full
state. So the list of u_even :math:`\\{u_{j,j+1} : j \\text{ even}\\}`,
which are ``numpy.ndarrays``, is transformed into :math:`\\bigotimes_j
u_{j,j+1} : j \\text{ even}` of the type ``mpnum.MPArray``.
Args:
dims (list):
List of dimensions of each site
u_even (list):
List of time evolution operators acting on even adjacent sites
compr (dict): Parameters for the compression which is executed on every
MPA during the calculations, except for the Trotter calculation
where trotter_compr is used
Returns:
mpnum.MPArray:
The MPO for the full state acting on even-odd adjacent sites
"""
if len(dims) % 2 == 0:
last_h = u_even[-1]
u_even = u_even[:-1]
even = mp.chain(matrix_to_mpo(
u, [[dims[2 * i + 1]] * 2, [dims[2 * i + 2]] * 2], compr)
for i, u in enumerate(u_even[1::]))
even = mp.chain([matrix_to_mpo(u_even[0], [[dims[0]] * 2], compr), even])
if len(dims) % 2 == 0:
even = mp.chain([even, matrix_to_mpo(last_h, [[dims[-1]] * 2], compr)])
return even
def matrix_to_mpo(matrix, shape, compr=None):
"""
Convert matrix to MPO
Converts given :math:`M \\times N` matrix in global form into an MPO with
the given shape. The number of legs per site must be the same for all sites.
.. doctest::
>>> matrix = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
>>> mpo = matrix_to_mpo(matrix, [[3, 3]])
>>> print(mpo.to_array_global())
[[1 0 0]
[0 0 0]
[0 0 0]]
Args:
matrix (numpy.ndarray):
The matrix to be transformed to an MPO
shape (list):
The shape the single sites of the resulting MPO should have, as used
in mpnum. For example three sites with two legs each might look like
``[[3, 3], [2, 2], [2, 2]]``. Format same as ``numpy.ndarray.shape``
compr (dict):
Parameters for the compression which is executed on every MPA during
the calculations, except for the Trotter calculation where
trotter_compr is used
Returns:
mpnum.MPArray:
The MPO with shape ``shape`` representing the matrix
"""
if compr == None:
compr = dict(method='svd', relerr=1e-6)
num_legs = len(shape[0])
if not (np.array([len(shape[i]) for i in
range(len(shape))]) == num_legs).all():
raise ValueError("Not all sites have the same number of physical legs")
newShape = []
for i in range(num_legs):
for j in range(len(shape)):
newShape = newShape + [shape[j][i]]
matrix = matrix.reshape(newShape)
mpo = mp.MPArray.from_array_global(matrix, ndims=num_legs)
mpo.compress(**compr)
return mpo
def normalize(state, method):
"""
Normalize a state.
Args:
state (mpnum.MPArray): The state to be normalized
method (str): Whether it is a MPS, MPO or PMPS state
Returns:
mpnum.MPArray: The normalized state
"""
if method == 'pmps' or method == 'mps':
state = state / mp.norm(state)
if method == 'mpo':
state = state / mp.trace(state)
return state
def _set_compr_params():
"""
A function to set default compression parameters if none were provided.
They will suffice in many cases, but might well lead to
problems described in the Pitfalls section of the introduction notebook.
If that is the case, try to find and provide your own suitable
compression parameters, which is also recommended to have more control
over the calculations and their precision. For more information on this,
read the introduction notebook and make use of the verbose output option to
monitor bond dimensions during calculations.
Returns:
list[dict]:
Some default compression and Trotter compression parameters
"""
return dict(method='svd', relerr=1e-10), dict(method='svd', relerr=1e-4,
rank=30)
def evolve(state, hamiltonians, num_trotter_slices, method, trotter_order,
ts, trotter_compr=None, compr=None, subsystems=None,
v=0):
"""
Evolve a one dimensional MPS, MPO or PMPS state using tMPS as described in
the module's documentation.
The initial state, Hamiltonians and certain parameters are required. The
output is a list of times and a list of the evolved states at these times.
Those states might be subsystems of the whole evolved system,
which allows for the user to keep memory consumption small by
focusing on the subsystems of interest.
.. todo::
Raise exception if hamiltonians are not of the right dimension
.. todo::
Implement tracking of compression errors.
.. todo::
Get variable compression to work (might involve changing mpnum).
Args:
state (mpnum.MPArray):
The state to be evolved in time. The state has to be an MPS, MPO or
PMPS, depending on which method is chosen
hamiltonians (list):
Either a list containing the Hamiltonian acting on every single site
and the Hamiltonian acting on every two adjacents sites, like
``[H_i, H_ij]``, or a list containing a list of Hamiltonians acting
on the single sites and a list of Hamiltonians acting on each two
adjacent sites, like ``[[h1, h2, h3, ...], [h12, h23, h34, ...]]``
num_trotter_slices (int):
Number of Trotter slices to be used for evolution over time equal to
the largest t in ts.
method (str):
Which method to use. Either 'mps', 'mpo' or 'pmps'.
trotter_order (int):
Order of Trotter-Suzuki decomposition to be used. Currently only 2
and 4 are implemented
ts (list[float]):
The times for which the evolution should be computed and the state
of the full system or a subsystem returned (i.e. it's reduced
density matrix). The algorithm will calculate the
evolution using the given number of Trotter steps for the largest
number in ts. On the way there it will store the evolved states for
smaller times. NB: Beware of memory overload since len(t) number of
mpnum.MPArrays will be stored
trotter_compr (dict):
Compression parameters used in the iterations of Trotter (in the
form required by :func:`mpnum.MPArray.compress`. If unsure, look at
https://github.com/dseuss/mpnum/blob/master/examples/mpnum_intro.ipynb .)
If omitted, some default compression will be used that will
probably work but might lead to problems. See
:func:`_set_compr_params` for more information.
compr (dict):
Parameters for the compression which is executed on every MPA during
the calculations, except for the Trotter calculation, where
trotter_compr is used. compr = dict(method='svd', rank=10) would for
example ensure that the ranks of any MPA never exceed 10 during all
of the calculations. An accepted relative error for the
compression can be provided in addition to or instead of ranks,
which would lead to e.g.
compr = dict(method='svd', rank=10, relerr=1e-12).
If omitted, some default compression will be used that will
probably work but might lead to problems. See
:func:`_set_compr_params` for more information.
subsystems (list):
A list defining for which subsystem the reduced density matrix or
whether the full state should be returned for a time in ``ts``.
This can be a list of the length same as that of ``ts`` looking
like ``[[a1, b1], [a2, b2], ...]`` or just a list like ``[a, b]``.
In the first case the respective subsystem for every entry in ts
will be returned, in the second case the same subsystem will be
returned for all entries in ``ts``. ``[a, b]`` will lead to a
return of the reduced density matrix of the sites from ``a`` up to,
but not including, ``b``. For example ``[0, 3]`` if the reduced
density matrix of the first three sites should be returned. A time
can occur twice in ``ts`` and then different subsystems to be
returned can be defined for that same time. If this parameter is
omitted, the full system will be returned for every time in ``ts``.
v (int):
Level of verbose output. 0 means no output, 1 means that some
basic output showing the progress of calculations is produced. 2
will in addition show the bond dimensions of the state after every
couple of iterations, 3 will show bond dimensions after every
Trotter iteration.
Returns:
list[list[float], list[list[int]], list[mpnum.MPArray]]:
A list with five items: (i) The list of times for which the density
matrices have been computed (ii) The list indicating which
subsystems of the system are returned at the respective time of the
first list (iii) The list of density matrices as MPO or PMPS as
mpnum.MPArray, depending on the input "method". If that was MPS, the
full states will still be MPSs, the reduced ones will be MPOs.
"""
if compr is None: compr, _ = _set_compr_params()
if trotter_compr is None: _, trotter_compr = _set_compr_params()
state.compress(**compr)
state = normalize(state, method)
if len(state) < 3:
raise ValueError("State has too few sites")
if (np.array(ts) == 0).all():
raise ValueError(
"No time evolution requested by the user. Check your input 't'")
if subsystems == None:
subsystems = [0, len(state)]
step_numbers, tau = _times_to_steps(ts, num_trotter_slices)
subsystems = _get_subsystems_list(subsystems, len(step_numbers))
us = _trotter_slice(hamiltonians=hamiltonians, tau=tau,
num_sites=len(state), trotter_order=trotter_order,
compr=compr)
if v != 0:
print("Time evolution operator for Trotter slice calculated, "
"starting "
"Trotter iterations...")
return _time_evolution(state, us, step_numbers, subsystems, tau, method,
trotter_compr, v)
def _time_evolution(state, us, step_numbers, subsystems, tau, method,
trotter_compr, v):
"""
Implements time-evolution via Trotter-Suzuki decomposition
Args:
state (mpnum.MPArray):
The state to be evolved in time
us (list[mpnum.MPArray]):
List of ordered operator exponentials for a single Trotter slice
step_numbers (list[int]):
List of time steps as generated by :func:`_times_to_steps`
subsystems (list[list[int]]):
Sites for which the subsystem states should be returned at the
respective times
tau (float):
Duration of one Trotter slice. As defined in :func:`_times_to_steps`
method (str):
Which method to use. Either 'mps', 'mpo' or 'pmps'.
trotter_compr (dict):
Compression parameters used in the iterations of Trotter-Suzuki
decomposition.
v (int):
Level of verbose output. 0 means no output, 1 means that some
basic output showing the progress of calculations is produced. 2
will in addition show the bond dimensions of the state after every
couple of iterations, 3 will show bond dimensions after every
Trotter iteration.
Returns:
list[list[float], list[list[int]], list[mpnum.MPArray]:
A list with five items: (i) The list of times for which the density
matrices have been computed (ii) The list indicating which
subsystems of the system are returned at the respective time of the
first list (iii) The list of density matrices as MPO or PMPS as
mpnum.MPArray, depending on the input "method". If that was MPS, the
full states will still be MPSs, the reduced ones will be MPOs.
"""
c = Counter(step_numbers)
times = []
states = []
compr_errors = []
trot_errors = []
var_compression = False
if trotter_compr['method'] == 'var':
var_compression = True
accumulated_overlap = 1
accumulated_trotter_error = 0
for i in range(max(step_numbers) + 1):
for j in range(c[i]):
_append(times, states, compr_errors, trot_errors, tau, i, j,
step_numbers, subsystems, state, accumulated_overlap,
accumulated_trotter_error, method)
for u in us:
if var_compression:
trotter_compr['startmpa'] = mp.MPArray.copy(state)
state = mp.dot(u, state)
accumulated_overlap *= state.compress(**trotter_compr)
if method == 'mpo':
for u in us:
if var_compression:
trotter_compr['startmpa'] = mp.MPArray.copy(state)
state = mp.dot(state, u.T.conj())
accumulated_overlap *= state.compress(**trotter_compr)
state = normalize(state, method)
accumulated_trotter_error += tau ** 3
if (v == 1 or v == 2) and np.sqrt(i + 1) % 1 == 0 and i < \
step_numbers[-1]:
print(str(i + 1) + " Trotter iterations finished...")
if v == 2:
print("Ranks: " + str(state.ranks))
if v == 3 and i < step_numbers[-1]:
print(str(i + 1) + " Trotter iterations finished...")
print("Ranks: " + str(state.ranks))
if v != 0:
print("Done with time evolution")
return times, subsystems, states # , compr_errors, trot_errors
def _append(times, states, compr_errors, trot_errors, tau, i, j, step_numbers,
subsystems, state, accumulated_overlap,
accumulated_trotter_error, method):
"""
Function to append time evolved state etc to output of
:func:`_time_evolution`
Args:
times (list[float]):
List containing the times to which the states are evolved
states (list[mpnum.MPArray]):
List containing the evolved states
compr_errors (list[float]):
List containing the respective compression errors
trot_errors (list[float]):
List containing the respective Trotter errors
tau (float):
The time of one Trotter slice
i (int):
Number indicating which is the current Trotter slice
j (int):
Number indicating how many times a state related to the
current i has been appended already
step_numbers (list[int]):
List containing the time steps
subsystems (list[list[int]]):
List of sites for which the subsystem should be returned at the
respective time
state (mpnum.MPArray):
The current state
accumulated_overlap (float):
The accumulated overlap error
accumulated_trotter_error (float):
The accumulated Trotter error
method (str):
Method to use as defined in :func:`evolve`
Returns:
None: Nothing, changes happen in place
"""
times.append(tau * i)
sites = [x for t, x in zip(step_numbers, subsystems) if t == i][j]
if sites == [0, len(state)]:
states.append(state.copy())
elif method == 'mpo':
states.append(next(
mp.reductions_mpo(state, sites[1] - sites[0], [sites[0]])))
elif method == 'pmps':
states.append(next(
mp.reductions_pmps(state, sites[1] - sites[0], [sites[0]])))
elif method == 'mps':
states.append(next(
mp.reductions_mps_as_mpo(state, sites[1] - sites[0], [sites[0]])))
compr_errors.append(accumulated_overlap)
trot_errors.append(accumulated_trotter_error)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
StarcoderdataPython
|
1704421
|
<reponame>line/networking-sr
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import os
import sys
from neutron_lib.agent import topics
from neutron_lib import constants
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import excutils
import pyroute2
from pyroute2.config.eventlet import eventlet_config
from neutron.agent.common import utils
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import profiler as setup_profiler
from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb
from neutron.privileged.agent.linux import ip_lib as privileged
from networking_sr.agent import rpc as sr_rpc
from networking_sr.common import config # noqa
from networking_sr.ml2.agent import sr_agent_loop
eventlet_config()
LOG = logging.getLogger(__name__)
SR_AGENT_BINARY = 'neutron-sr-agent'
AGENT_TYPE_SR = 'SR agent'
EXTENSION_DRIVER_TYPE = 'sr'
INTERFACE_FS = "/sys/class/net/"
RESOURCE_ID_LENGTH = 11
VRF_TABLE_NAMBER_BASE = 1000
class SysctlCommandError(exceptions.NeutronException):
message = "Sysctl command %(cmd)s failed."
class SrManager(amb.CommonAgentManagerBase):
def __init__(self):
super(SrManager, self).__init__()
self.process_monitor = external_process.ProcessMonitor(
cfg.CONF, resource_type="sr-agent")
self.node_id = cfg.CONF.sr.segment_node_id
if not self.node_id:
LOG.error("Segment Node ID is not set in config.")
sys.exit(1)
self.gw_id = cfg.CONF.sr.segment_gw_id
self._setup_system()
self._setup_ipv6()
# vrf_tables = {"vrf name": vrf_table_id}
self.vrf_tables = {}
# Check existing vrf
# TODO(hichihara): Refactor the following codes
# Exteded privileged ip_lib should be created
with pyroute2.IPDB() as ipdb:
interfaces = ipdb.by_name.keys()
vrfs = []
for i in interfaces:
if i[:3] == "vrf":
vrfs.append(i)
with pyroute2.IPRoute() as ip:
for vrf in vrfs:
try:
vrf_id = ip.link_lookup(ifname=vrf)[0]
except IndexError:
privileged.NetworkInterfaceNotFound(device=vrf,
namespace=None)
link = ip.link("get", index=vrf_id)[0]
linkinfo = self._nlattr_get(link['attrs'], 'IFLA_LINKINFO')
if not linkinfo:
LOG.error("Failed to cannot found attr "
"IFLA_LINKINFO from vrf interface")
sys.exit(1)
info_data = self._nlattr_get(linkinfo["attrs"],
"IFLA_INFO_DATA")
if not info_data:
LOG.error("Failed to cannot found attr "
"IFLA_INFO_DATA from vrf interface")
sys.exit(1)
vrf_table = self._nlattr_get(info_data["attrs"],
"IFLA_VRF_TABLE")
if not vrf_table:
LOG.error("Failed to cannot found attr "
"IFLA_VRF_TABLE from vrf interface")
sys.exit(1)
self.vrf_tables[vrf] = vrf_table
LOG.debug("Found existing vrf %(vrf)s with table id "
"%(table_id)d", {"vrf": vrf, "table_id": vrf_table})
# TODO(hichihara): Replace this to a way which actually gets
# current rules
self.encap_info = []
def _nlattr_get(self, attrs, key):
# Search by key from attrs, if not found, return None
for attr in attrs:
if attr[0] == key:
return attr[1]
return None
def _setup_system(self):
# Make sure to allow ip forward
cmd = ['net.ipv4.ip_forward=1']
result = ip_lib.sysctl(cmd)
if result == 1:
LOG.error("Failed to enable net.ipv4.ip_forward=1.")
sys.exit(1)
# Make sure to allow tcp packet to pass though default vrf
cmd = ['net.ipv4.tcp_l3mdev_accept=1']
result = ip_lib.sysctl(cmd)
if result == 1:
LOG.error("Failed to enable net.ipv4.tcp_l3mdev_accept=1.")
sys.exit(1)
# Make sure to allow udp packet to pass though default vrf
cmd = ['net.ipv4.udp_l3mdev_accept=1']
result = ip_lib.sysctl(cmd)
if result == 1:
LOG.error("Failed to enable net.ipv4.udp_l3mdev_accept=1.")
sys.exit(1)
cmd = ['net.ipv6.conf.all.seg6_enabled=1']
result = ip_lib.sysctl(cmd)
if result == 1:
LOG.error("Failed to enable net.ipv6.conf.all.seg6_enabled=1.")
sys.exit(1)
cmd = ['net.ipv6.conf.all.forwarding=1']
result = ip_lib.sysctl(cmd)
if result == 1:
LOG.error("Failed to enable net.ipv6.conf.all.forwarding=1.")
sys.exit(1)
cmd = ['net.ipv4.conf.all.rp_filter=0']
result = ip_lib.sysctl(cmd)
if result == 1:
LOG.error("Failed to enable net.ipv4.conf.all.rp_filter=0.")
sys.exit(1)
for interface in cfg.CONF.sr.srv6_interfaces:
cmd = ['net.ipv4.conf.%s.rp_filter=0' % interface]
result = ip_lib.sysctl(cmd)
if result == 1:
LOG.error("Failed to enable net.ipv4.conf.%s.rp_filter=0.",
interface)
sys.exit(1)
# Make sure to allow bridge to call iptables
cmd = ['net.bridge.bridge-nf-call-iptables=1']
result = ip_lib.sysctl(cmd)
if result == 1:
LOG.error("Failed to enable net.bridge.bridge-nf-call-iptables=1.")
sys.exit(1)
def _setup_ipv6(self):
# Setup SRv6 configuration
# TODO(hichihara): Refactor to use ip_lib instead of command execute
cmd = ["ip", "-6", "rule", "add", "pref", "32765", "table", "local"]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
cmd = ["ip", "-6", "rule", "del", "pref", "0"]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
def _setup_interface_ip(self, ip, interface='lo'):
"""Sets up an IP address on the target interface
Args:
ip(String): ip address with cidr
interface(String): network interface, 'lo' by default
Return:
None
"""
dev = ip_lib.IPDevice(interface)
dev.addr = ip_lib.IpAddrCommand(dev)
existing_addreses = ip_lib.get_devices_with_ip(None, name=dev.name)
existing_ips = [addr['cidr'] for addr in existing_addreses]
if ip not in existing_ips:
LOG.info("Adding %s to %s interface" % (ip, dev.name))
dev.addr.add(cidr=ip)
else:
LOG.debug("%s interface already have %s ip" % (dev.name, ip))
def get_agent_configurations(self):
configurations = {}
configurations['segment_node_id'] = self.node_id
return configurations
def get_agent_id(self):
devices = ip_lib.IPWrapper().get_devices(True)
if devices:
mac = ip_lib.get_device_mac(devices[0].name)
return 'sr%s' % mac.replace(":", "")
else:
LOG.error("Unable to obtain MAC address for unique ID. "
"Agent terminated!")
sys.exit(1)
def get_all_devices(self, with_ifindex=False):
"""Return all existing tap devices
They are technically devices having name starting with
constants.TAP_DEVICE_PREFIX
Args:
with_ifindex(bool): if True, return dict include device index,
if False, return set include just device name
Return:
if with_ifindex is True:
devices_with_ifindex(dict): {"<device_name>": "<device index>"}
if with_ifindex is False:
devices(set<String>): set contains device name
"""
devices = {} if with_ifindex else set()
for device in os.listdir(INTERFACE_FS):
if not device.startswith(constants.TAP_DEVICE_PREFIX):
continue
# Try to lookup interface index as well
if with_ifindex:
try:
with open(os.path.join(
INTERFACE_FS, device, 'ifindex'), 'r') as f:
devices[device] = int(f.read().strip())
except (IOError, ValueError):
# if we faied to lookup, this device has been deleted
# after exec listdir, so we should not that device as
# current device
continue
else:
devices.add(device)
return devices
def get_all_encap_rules(self):
return self.encap_info
def get_devices_modified_timestamps(self, devices):
return {}
def get_extension_driver_type(self):
return EXTENSION_DRIVER_TYPE
def get_rpc_callbacks(self, context, agent, sg_agent):
return SrRpcCallbacks(context, agent, sg_agent)
def get_agent_api(self, **kwargs):
pass
def get_rpc_consumers(self):
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE],
[sr_rpc.TOPICS_ENCAP, topics.DELETE],
[sr_rpc.TOPICS_ENCAP_RULE, topics.UPDATE],
[sr_rpc.TOPICS_ENCAP, topics.UPDATE],
[sr_rpc.TOPICS_VRF, topics.DELETE]]
return consumers
def plug_interface(self, vrf, device, device_details, ports, vrf_ip,
vrf_cidr):
tap_device_name = device
try:
if not ip_lib.device_exists(tap_device_name):
LOG.debug("Tap device: %s does not exist on "
"this host, skipped", tap_device_name)
return False
self.configure_tap(tap_device_name, device_details['mac_address'],
device_details['related_ips'], ports,
vrf, vrf_ip, vrf_cidr)
LOG.debug("Finished to configure tap %s device", tap_device_name)
return True
except Exception:
with excutils.save_and_reraise_exception() as ctx:
if not ip_lib.device_exists(tap_device_name):
# the exception was likely a side effect of the tap device
# being removed during handling so we just return false
# like we would if it didn't exist to begin with.
ctx.reraise = False
return False
def configure_tap(self, tap_device_name, vm_mac, related_ips,
ports, vrf, vrf_ip, vrf_cidr):
"""Configure tap device
The traffic for vm's ip goes to tap device vm connected to.
NB: 1 port could have multiple ip address. that's why
related_ips is list including ip informations
Args:
tap_device_name(String): tap device name
vm_mac(String): mac address VM use
related_ips(list<dict>): [{'gw_ip': <gateway_ip>,
'cidr': <cidr of subnet>,
'vm_ip': <vm ip address>}]
Return:
None
"""
tap_dev = ip_lib.IPDevice(tap_device_name)
tap_dev.addr = IpAddrCommandAcceptArgs(tap_dev)
for related_ip in related_ips:
# Ensure veth
qvb, qvr = self._get_veth_pair_names(tap_device_name[3:])
qvr_dev = self._add_veth(qvb, qvr)
# Create brdige
br_name = "qbr%s" % tap_device_name[3:]
self._ensure_bridge(br_name, [qvb, tap_dev.name])
cidr = '/' + related_ip['cidr']
# assign virtual gateway ip to qvr
qvr_address = related_ip['gw_ip'] + cidr
LOG.debug("Ensure %s having %s" % (qvr_dev.name, qvr_address))
self._ensure_dev_having_ip(qvr_dev, qvr_address)
# Ensure vrf exist
vrf_table = self._ensure_vrf(vrf, vrf_ip, vrf_cidr)
# assign qvr to vrf
self._add_avr_to_vrf(vrf, qvr)
# Configure SRv6
self._set_srv6_rules(vrf, vrf_ip, ports)
# add static route /32 to tap
vm_ip_for_route = related_ip['vm_ip'] + '/' + '32'
LOG.debug("Ensure root namespace having route %s via %s" % (
vm_ip_for_route, qvr_dev.name))
self._ensure_vm_route(qvr_dev, vm_ip_for_route, vrf_table)
for kernel_opts in ("net.ipv4.conf.%s.proxy_arp=1",
"net.ipv4.neigh.%s.proxy_delay=0"):
cmd = [kernel_opts % qvr]
result = ip_lib.sysctl(cmd)
if result == 1:
raise SysctlCommandError(cmd=cmd)
def _get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id), ("qvr%s" % iface_id))
def _add_veth(self, qvb, qvr):
ip = ip_lib.IPWrapper()
try:
qvb_dev, qvr_dev = ip.add_veth(qvb, qvr)
qvb_dev.link.set_up()
qvr_dev.link.set_up()
except RuntimeError:
qvr_dev = ip_lib.IPDevice(qvr)
qvr_dev.addr = IpAddrCommandAcceptArgs(qvr_dev)
return qvr_dev
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name)
br.set_log_fail_as_error(False)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def _ensure_bridge(self, bridge_name, interfaces):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
LOG.debug("Starting bridge %(bridge_name)s for subinterface "
"%(interfaces)s",
{'bridge_name': bridge_name, 'interfaces': interfaces})
bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
if bridge_device.setfd(0):
return
if bridge_device.disable_stp():
return
if bridge_device.disable_ipv6():
return
if bridge_device.link.set_up():
return
LOG.debug("Done starting bridge %(bridge_name)s for "
"subinterface %(interfaces)s",
{'bridge_name': bridge_name, 'interfaces': interfaces})
else:
bridge_device = bridge_lib.BridgeDevice(bridge_name)
# Check if the interface is part of the bridge
for interface in interfaces:
if not bridge_device.owns_interface(interface):
try:
bridge_device.addif(interface)
except Exception as e:
LOG.error(("Unable to add %(interface)s to %(bridge_name)s"
"! Exception: %(e)s"),
{'interface': interface,
'bridge_name': bridge_name,
'e': e})
# Try ip link set
cmd = ["ip", "link", "set", "dev", interface, "master",
bridge_name]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
return
return bridge_name
def _ensure_dev_having_ip(self, target_dev, ip):
"""Ensure target device have ip
Args:
target_dev(ip_lib.IPDevice):
ip(String): ip address with cidr
Return:
None
"""
existing_addreses = ip_lib.get_devices_with_ip(None,
name=target_dev.name)
existing_ips = [addr['cidr'] for addr in existing_addreses]
LOG.debug("The existing address of dev %s are %s" % (target_dev.name,
existing_ips))
if ip not in existing_ips:
target_dev.addr.add(
cidr=ip, additional_args=['noprefixroute', ])
else:
LOG.debug("%s already have ip %s" % (target_dev.name, ip))
def _ensure_vrf(self, vrf, vrf_ip, cidr):
"""Ensure vrf interface
return: vrf_table
"""
if self.vrf_tables:
vrf_table = max(list(self.vrf_tables.values())) + 1
else:
vrf_table = VRF_TABLE_NAMBER_BASE
if vrf not in list(self.vrf_tables):
privileged.create_interface(vrf, None, "vrf", vrf_table=vrf_table)
privileged.set_link_attribute(vrf, None, state="up")
LOG.debug("VRF %s is created" % vrf)
self.vrf_tables[vrf] = vrf_table
# TODO(hichihara): Refactor to use ip_lib instead of command
ip = vrf_ip + '/' + cidr
self._setup_interface_ip(ip, vrf)
cmd = ["ip", "route", "replace", vrf_ip, "dev", vrf]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
vrf_sid = ("%(node_id)s:%(vrf_ip)s/128" % {"node_id": self.node_id,
"vrf_ip": vrf_ip})
self._setup_interface_ip(vrf_sid, vrf)
self._setup_interface_ip("169.254.169.254/32", vrf)
# Create encap rules
for encap_info in self.encap_info:
if vrf == encap_info['vrf']:
self.add_encap_rules([encap_info], add_flag=False)
break
else:
vrf_table = self.vrf_tables[vrf]
return vrf_table
def _add_avr_to_vrf(self, vrf, qvr):
vrf_idx = privileged.get_link_id(vrf, None)
privileged.set_link_attribute(qvr, None, master=vrf_idx)
def _set_srv6_rules(self, vrf, vrf_ip, ports):
# Encap rules
for port in ports:
# TODO(hichihara): Configure multiple fixed_ips
target_ip = port["ip"] + "/32"
target_node_id = port["segment_node_id"]
if target_node_id is None:
continue
# Ensure connection between VMs have same network(vrf)
target_vrf = port["vrf"]
if target_vrf != vrf:
continue
if target_node_id != self.node_id:
# Create target_sid
target_sid = ("%(node_id)s:%(vrf_ip)s" % {
"node_id": target_node_id,
"vrf_ip": vrf_ip})
cmd = ["ip", "route", "replace", target_ip, "encap", "seg6",
"mode", "encap", "segs", target_sid, "dev", vrf,
"vrf", vrf]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
# Default route to network nodes
if self.gw_id:
target_sid = ("%(node_id)s:%(vrf_ip)s" % {
"node_id": self.gw_id,
"vrf_ip": vrf_ip})
cmd = ["ip", "route", "replace", "0.0.0.0/0", "encap", "seg6",
"mode", "encap", "segs", target_sid, "dev", vrf, "vrf", vrf]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
# Decap rules
# TODO(hichihara): Refactor to use ip_lib instead of command execute
decap_sid = ("%(node_id)s:%(vrf_ip)s" % {"node_id": self.node_id,
"vrf_ip": vrf_ip})
cmd = ["ip", "-6", "route", "replace", "local", decap_sid, "encap",
"seg6local", "action", "End.DX4", "nh4", vrf_ip, "dev", vrf]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
def _ensure_vm_route(self, target_dev, vm_route, vrf_table):
"""Ensure root namespace on host have vm_route
Args:
target_dev(ip_lib.IPDevice):
vm_route(String): ip address for this vm with /32
e.g. If vm's ip is 192.168.0.2/16,
vm_route should be 192.168.0.2/32
Return:
None
"""
target_dev.route.add_route(cidr=vm_route, table=vrf_table)
def _get_ip_version(self, cidr):
"""Check if cidr is ip version 4 or not by existence of :
Args:
cidr(String): ip address with cidr
Return:
version(Int): 4 or 6 depending on cidr
"""
if ":" in cidr:
return 6
else:
return 4
def add_encap_rules(self, encap_rules, add_flag=True):
for target in encap_rules:
# Set srv6 rule on the vrf
vrf = target['vrf']
encap_info = None
for encap in self.encap_info:
if encap['id'] == target['id']:
encap_info = encap
break
for rule in target['rules']:
ip = rule['destination']
target_sid = rule['nexthop']
cmd = ["ip", "route", "replace", ip, "encap", "seg6", "mode",
"encap", "segs", target_sid, "dev", vrf, "vrf", vrf]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
if add_flag:
if encap_info is not None:
encap_info['rules'] += target['rules']
else:
self.encap_info.append(target)
def remove_encap_rules(self, encap_rules):
for target in encap_rules:
# Remove srv6 rule on the vrf
vrf = target['vrf']
encap_info = None
for encap in self.encap_info:
if encap['id'] == target['id']:
encap_info = encap
break
else:
break
for rule in target['rules']:
ip = rule['destination']
target_sid = rule['nexthop']
cmd = ["ip", "route", "del", ip, "encap", "seg6", "mode",
"encap", "segs", target_sid, "dev", vrf, "vrf", vrf]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
encap_info['rules'].remove(rule)
def setup_target_sr(self, updated_targets):
for target in updated_targets:
# if target node is same as local node_id,
# we should not configure encap rule
if target["segment_node_id"] == self.node_id:
continue
# Set srv6 rule on the vrf
vrf = target["vrf"]
vrf_ip = target["vrf_ip"]
# Ensure vrf exist
self._ensure_vrf(vrf, vrf_ip, target["cidr"])
ip = target["ip"] + "/32"
node_id = target["segment_node_id"]
target_sid = ("%(node_id)s:%(vrf_ip)s" % {
"node_id": node_id,
"vrf_ip": vrf_ip})
cmd = ["ip", "route", "replace", ip, "encap", "seg6", "mode",
"encap", "segs", target_sid, "dev", vrf, "vrf", vrf]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
def clear_target_sr(self, removed_targets):
for target in removed_targets:
# Remove srv6 rule on the vrf
vrf = target["vrf"]
vrf_ip = target["vrf_ip"]
ip = target["ip"] + "/32"
node_id = target["segment_node_id"]
target_sid = ("%(node_id)s:%(vrf_ip)s" % {
"node_id": node_id,
"vrf_ip": vrf_ip})
cmd = ["ip", "route", "del", ip, "encap", "seg6", "mode",
"encap", "segs", target_sid, "dev", vrf, "vrf", vrf]
utils.execute(cmd, run_as_root=True,
check_exit_code=False)
def remove_vrf(self, vrf):
if self.vrf_tables.get(vrf):
privileged.set_link_attribute(vrf, None, state="down")
privileged.delete_interface(vrf, None)
self.vrf_tables.pop(vrf)
LOG.debug("Removed vrf %s", vrf)
def get_tap_device_name(self, interface_id):
"""Get tap device name by interface_id.
Normally tap device name is the "tap" + first RESOURCE_ID_LENGTH
characters of port id
Args:
interface_id(String): port uuid
Return:
tap_device_name(String): tap device name on the based of port id
"""
if not interface_id:
LOG.warning("Invalid Interface ID, will lead to incorrect "
"tap device name")
tap_device_name = constants.TAP_DEVICE_PREFIX + \
interface_id[:RESOURCE_ID_LENGTH]
return tap_device_name
def ensure_port_admin_state(self, tap_name, admin_state_up):
"""Ensure the tap device is same status as admin_state_up
Args:
tap_name(String): tap device name
admin_state_up(Bool): port admin status neutron maintain
Return:
None
"""
LOG.debug("Setting admin_state_up to %s for device %s",
admin_state_up, tap_name)
if admin_state_up:
ip_lib.IPDevice(tap_name).link.set_up()
else:
ip_lib.IPDevice(tap_name).link.set_down()
def _delete_bridge(self, bridge_name):
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if bridge_device.exists():
try:
LOG.debug("Deleting bridge %s", bridge_name)
if bridge_device.link.set_down():
return
if bridge_device.delbr():
return
LOG.debug("Done deleting bridge %s", bridge_name)
return
except RuntimeError:
pass
LOG.debug("Cannot delete bridge %s; it does not exist",
bridge_name)
def delete_port(self, device):
# Delete veth
qvb, qvr = self._get_veth_pair_names(device[3:])
ip = ip_lib.IPWrapper()
try:
ip.del_veth(qvb)
LOG.debug("Delete veth pair %s %s", qvb, qvr)
except RuntimeError:
pass
# Delete bridge
br_name = "qbr%s" % device[3:]
self._delete_bridge(br_name)
def setup_arp_spoofing_protection(self, device, device_details):
pass
def delete_arp_spoofing_protection(self, devices):
pass
def delete_unreferenced_arp_protection(self, current_devices):
pass
class SrRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
amb.CommonAgentManagerRpcCallBackBase):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, context, agent, sg_agent):
super(SrRpcCallbacks, self).__init__(context, agent, sg_agent)
self.removed_devices_encap = set()
self.removed_ports = {}
self.encap_info = []
self.updated_devices_encap = set()
self.updated_ports = {}
self.removed_vrfs = set()
def port_update(self, context, **kwargs):
"""RPC for port_update event
This method will be called when port is updated in neutron server
this method just add device_name associating updated port into
updated_devices list and this device recognized as updated device in
next iteration and if tap is in own host, plug_interaface method
will be executed with that tap
"""
port_id = kwargs['port']['id']
device_name = self.agent.mgr.get_tap_device_name(port_id)
# Put the device name in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.updated_devices.add(device_name)
LOG.debug("port_update RPC received for port: %s", port_id)
def encap_rule_update(self, context, **kwargs):
encap_info = kwargs['encap_info']
for encap in self.encap_info:
if encap['id'] == encap_info['id']:
self.encap_info.remove(encap)
break
self.encap_info.append(encap_info)
LOG.debug("encap_update RPC received for encap rules: %s",
encap_info)
def get_and_clear_updated_encaps(self):
encap_info = self.encap_info
self.encap_info = []
return encap_info
def encap_delete(self, context, **kwargs):
port = kwargs['port']
port_id = port['id']
device_name = self.agent.mgr.get_tap_device_name(port_id)
self.removed_devices_encap.add(device_name)
self.removed_ports[device_name] = port
LOG.debug("encap_delete RPC received for port: %s", port_id)
def encap_update(self, context, **kwargs):
port = kwargs['port']
port_id = port['id']
device_name = self.agent.mgr.get_tap_device_name(port_id)
self.updated_devices_encap.add(device_name)
self.updated_ports[device_name] = port
LOG.debug("encap_update RPC received for port: %s", port_id)
def network_update(self, context, **kwargs):
"""RPC for network_update event
This method will be called when network is updated in neutron server
this method add all ports under this network into updated_devices list
"""
network_id = kwargs['network']['id']
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.agent.network_ports[network_id]})
for port_data in self.agent.network_ports[network_id]:
self.updated_devices.add(port_data['device'])
def get_and_clear_removed_devices_encap(self):
"""Get and clear the list of devices for which a removed was received.
:return: set - A set with removed devices. Format is ['tap1', 'tap2']
"""
# Save and reinitialize the set variable that the port_delete RPC uses.
# This should be thread-safe as the greenthread should not yield
# between these two statements.
removed_devices_encap = self.removed_devices_encap
self.removed_devices_encap = set()
return removed_devices_encap
def get_removed_ports(self, devices):
for device in devices:
try:
yield self.removed_ports[device]
except KeyError:
# Already removed
pass
def clear_removed_ports(self, devices):
for device in devices:
self.removed_ports.pop(device, None)
def network_delete(self, context, **kwargs):
pass
def get_and_clear_updated_devices_encap(self):
"""Get and clear the list of devices for which a updated was received.
:return: set - A set with updated devices. Format is ['tap1', 'tap2']
"""
# Save and reinitialize the set variable that the port_delete RPC uses.
# This should be thread-safe as the greenthread should not yield
# between these two statements.
updated_devices_encap = self.updated_devices_encap
self.updated_devices_encap = set()
return updated_devices_encap
def get_updated_ports(self, devices):
for device in devices:
try:
yield self.updated_ports[device]
except KeyError:
# Already removed
pass
def clear_updated_ports(self, devices):
for device in devices:
self.updated_ports.pop(device, None)
def vrf_delete(self, context, **kwargs):
vrf = kwargs['vrf']
LOG.debug("vrf_delete message processed for vrf "
"%(vrf)s", {'vrf': vrf})
self.removed_vrfs.add(vrf)
def get_and_clear_removed_vrfs(self):
"""Get and clear the list of vrfs for which a removed was received.
:return: set - A set with removed vrfs.
"""
removed_vrfs = self.removed_vrfs
self.removed_vrfs = set()
return removed_vrfs
class IpAddrCommandAcceptArgs(ip_lib.IpAddrCommand):
def add(self, cidr, scope='global', add_broadcast=True,
additional_args=None):
"""This is method for executing "ip addr add" as root
The reason why it override is we want to specify option.
but super class doesn't allow us to pass additional option
Args:
cidr(String): ip address with subnet
scope(String): scope of this address
add_broadcast(Bool): if True, it add "brd" option
additional_args(list<String>): additional arguments
Return:
None
"""
net = netaddr.IPNetwork(cidr)
args = ['add', cidr,
'scope', scope,
'dev', self.name]
if add_broadcast and net.version == 4:
args += ['brd', str(net[-1])]
if additional_args:
args += additional_args
self._as_root([net.version], tuple(args))
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
manager = SrManager()
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = sr_agent_loop.SrAgentLoop(manager, polling_interval,
quitting_rpc_timeout,
AGENT_TYPE_SR,
SR_AGENT_BINARY)
setup_profiler.setup(SR_AGENT_BINARY, cfg.CONF.host)
LOG.info("Agent initialized successfully, now running... ")
launcher = service.launch(cfg.CONF, agent, restart_method='mutate')
launcher.wait()
|
StarcoderdataPython
|
3255589
|
<reponame>santoshmano/pybricks<gh_stars>0
def print_board(board):
print("N-queens Board")
[print(_) for _ in board]
def create_board(size):
return [[0 for _ in range(size)] for _ in range(size)]
def is_safe(board, row, col):
for r in range(len(board)):
if board[r][col] == 1:
return False
for c in range(len(board)):
if board[row][c] == 1:
return False
r = row+1
c = col+1
while r < len(board) and c < len(board):
if board[r][c] == 1:
return False
r += 1
c += 1
r = row-1
c = col-1
while r >=0 and c >= 0:
if board[r][c] == 1:
return False
r -= 1
c -= 1
r = row-1
c = col+1
while r >=0 and c < len(board):
if board[r][c] == 1:
return False
r -= 1
c += 1
r = row+1
c = col-1
while c >=0 and r < len(board):
if board[r][c] == 1:
return False
c -= 1
r += 1
return True
def nqueens(board, row, col, queens):
if queens == 0:
print_board(board)
return
if col == len(board):
col = 0
row += 1
for i in range(row, len(board)):
for j in range(col, len(board)):
if is_safe(board, i, j):
board[i][j] = 1
nqueens(board, row, col+1, queens-1)
board[i][j] = 0
return
if __name__ == "__main__":
size = 5
board = create_board(size)
print_board(board)
nqueens(board, 0, 0, size)
|
StarcoderdataPython
|
170228
|
#!/usr/bin/env python3
import os
import sys
import json
from pathlib import Path
from shutil import copyfile
appName = "Warspite Map Exporter"
appVer = "1.0.1.0"
appDesc = """Corrects paths and moves maps and their dependencies."""
sImgFormats = [
".png",
".jpg",
".webp",
".tiff"
]
def DoesParameterExist(parmName):
for i in sys.argv:
if i == parmName:
return True
return False
def GetParameterValue(parmName, required=False):
for i, j in enumerate(sys.argv):
if j == parmName and (i+1 < len(sys.argv)):
if not sys.argv[i+1].startswith("-"):
return sys.argv[i+1]
if required:
raise RuntimeError("Cannot find specified required parameter \"{0}\"!".format(parmName))
return 0
print("{0}".format(appName))
print("Version: {0}".format(appVer))
print("Description:\n{0}\n".format(appDesc))
try:
workingDir = Path(GetParameterValue("-workDir", True))
except RuntimeError:
workingDir = Path(os.getcwd())
print("Using working directory: \"{0}\"".format(workingDir))
mapFile = Path(GetParameterValue("-map", True)) # expecting the first parameter after the script to be the map file.
baseFolder = GetParameterValue("-baseFolder") if DoesParameterExist("-baseFolder") else "assets" # the base folder for storing assets - can be game folder or generic engine assets
print("Processing map %s..." % mapFile.name)
# A few checks
if not (os.path.isfile(mapFile)):
print("Cannot find map file \"%s\"!" % mapFile)
if not (os.path.exists(workingDir)):
print("Cannot find path \"%s\"!" % workingDir)
if not (os.path.exists(workingDir.joinpath(baseFolder))):
print("Cannot find base folder \"%s\"!" % workingDir.joinpath(baseFolder))
mapData = dict()
try:
mf = open(mapFile, "r")
except OSError as e:
print("Failed to read mapFile!")
print("Error:\n%s" % str(e))
sys.exit(-1)
else:
with mf:
mapData = json.load(mf)
print("Successfully read mapFile \"%s\"!" % mapFile)
for j, i in enumerate(mapData["tilesets"]):
cTileset = dict()
pTileset = mapFile
npTileset = ""
# Check if we are dealing with an external tileset file
if "source" in i:
try:
# Set the path of the tileset
pTileset = Path(os.path.join(mapFile.parent, i["source"]))
# Usually relative to the mapFile
fs = open(pTileset, "r")
except OSError as e:
print("Failed to read tileset!")
print("Error:\n%s" % str(e))
sys.exit(-1)
else:
with fs:
cTileset = json.load(fs)
else:
# It's an embedded tileset
cTileset = i
npTileset = Path(os.path.join(workingDir, baseFolder, "tilesets", pTileset.name))
# Usually relative to the tileset
iTileSet = Path(os.path.join(pTileset.parent, cTileset["image"]))
dTileSet = Path(os.path.join(workingDir, baseFolder, "textures", iTileSet.name))
if not (os.path.exists(dTileSet.parent)):
os.mkdir(dTileSet.parent)
copyfile(iTileSet, dTileSet)
cTileset["image"] = str(dTileSet.relative_to(workingDir))
# If we are dealing with an external file...
if pTileset != mapFile:
try:
ws = open(npTileset, "w")
except OSError as e:
print("Cannot write tileset!")
print("Error:\n%s" % str(e))
sys.exit(-1)
else:
with ws:
json.dump(cTileset, ws)
mapData["tilesets"][j]["source"] = str(npTileset.relative_to(workingDir))
else:
mapData["tilesets"][j] = cTileset
print("Copied tilesets!")
# Does the map have custom properties?
if "properties" in mapData:
for i, j in enumerate(mapData["properties"]):
if j["type"] == "file":
propFile=Path(mapFile.parent, j["value"])
if (propFile.suffix in sImgFormats) and (propFile.parent.name == "dev"):
nPropFile = Path(workingDir, baseFolder, "textures", "dev", propFile.name)
elif propFile.suffix in sImgFormats:
nPropFile=Path(workingDir, baseFolder, "textures", propFile.name)
elif propFile.suffix in ".py":
nPropFile = Path(workingDir, baseFolder, "scripts", propFile.name)
else:
nPropFile=Path(workingDir, baseFolder, propFile.name)
if propFile.resolve() != nPropFile.resolve():
copyfile(propFile, nPropFile)
print("Moving file from {0} to {1}...".format(propFile, nPropFile))
mapData["properties"][i]["value"] = str(nPropFile.relative_to(workingDir))
print("Checked property values")
nMapFile = Path(os.path.join(workingDir, baseFolder, "maps", mapFile.name))
try:
jw = open(nMapFile, "w")
except OSError as e:
print("An error occurred while writing \"%s\"!" % str(nMapFile.absolute()))
print("Error:\n%s" % str(e))
else:
with jw:
json.dump(mapData, jw)
print("Rewrote map files!")
print("Moved \"%s\" to \"%s\"" % (str(nMapFile.name), str(nMapFile.parent)))
|
StarcoderdataPython
|
3205735
|
# -*- coding: utf-8 -*-
from AccessControl.unauthorized import Unauthorized
from plone import api
from plone.app.testing import setRoles
from plone.app.testing import SITE_OWNER_NAME
from plone.app.testing import SITE_OWNER_PASSWORD
from plone.app.testing import TEST_USER_ID
from plone.dexterity.interfaces import IDexterityFTI
from plone.testing.z2 import Browser
from plonetraining.testing.testing import PLONETRAINING_TESTING_FUNCTIONAL_TESTING
from plonetraining.testing.testing import PLONETRAINING_TESTING_INTEGRATION_TESTING
from zope.component import createObject
from zope.component import queryUtility
import unittest
try:
from plone.dexterity.schema import portalTypeToSchemaName
except ImportError:
# Plone < 5
from plone.dexterity.utils import portalTypeToSchemaName
class TestingItemIntegrationTest(unittest.TestCase):
layer = PLONETRAINING_TESTING_INTEGRATION_TESTING
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
setRoles(self.portal, TEST_USER_ID, ['Manager'])
self.parent = self.portal
def test_ct_testing_item_schema(self):
fti = queryUtility(IDexterityFTI, name='TestingItem')
schema = fti.lookupSchema()
schema_name = portalTypeToSchemaName('TestingItem')
self.assertEqual(schema_name, schema.getName())
def test_ct_testing_item_fti(self):
fti = queryUtility(IDexterityFTI, name='TestingItem')
self.assertTrue(fti)
def test_ct_testing_item_factory(self):
fti = queryUtility(IDexterityFTI, name='TestingItem')
factory = fti.factory
obj = createObject(factory)
def test_ct_testing_item_adding(self):
setRoles(self.portal, TEST_USER_ID, ['Contributor'])
obj = api.content.create(
container=self.portal, type='TestingItem', id='testing_item',
)
parent = obj.__parent__
self.assertIn('testing_item', parent.objectIds())
# check that deleting the object works too
api.content.delete(obj=obj)
self.assertNotIn('testing_item', parent.objectIds())
def test_ct_testing_item_globally_addable(self):
setRoles(self.portal, TEST_USER_ID, ['Contributor'])
fti = queryUtility(IDexterityFTI, name='TestingItem')
self.assertTrue(
fti.global_allow, u'{0} is not globally addable!'.format(fti.id),
)
def test_ct_testing_item_contributor_cant_add(self):
setRoles(self.portal, TEST_USER_ID, ['Contributor'])
with self.assertRaises(Unauthorized):
api.content.create(
container=self.portal, type='TestingItem', id='testing_item',
)
class TestingItemFunctionalTest(unittest.TestCase):
layer = PLONETRAINING_TESTING_FUNCTIONAL_TESTING
def setUp(self):
app = self.layer['app']
self.portal = self.layer['portal']
self.request = self.layer['request']
self.portal_url = self.portal.absolute_url()
# Set up browser
self.browser = Browser(app)
self.browser.handleErrors = False
self.browser.addHeader(
'Authorization',
'Basic {username}:{password}'.format(
username=SITE_OWNER_NAME,
password=SITE_OWNER_PASSWORD),
)
def test_add_testing_item(self):
self.browser.open(self.portal_url + '/++add++TestingItem')
self.browser.getControl(name='form.widgets.IBasic.title').value = 'Foo'
self.browser.getControl('Save').click()
self.assertTrue(
'<h1 class="documentFirstHeading">Foo</h1>'
in self.browser.contents
)
self.assertEqual('Foo', self.portal['foo'].title)
def test_view_testing_item(self):
setRoles(self.portal, TEST_USER_ID, ['Manager'])
api.content.create(
type='TestingItem',
title='Bar',
description='This is a description',
container=self.portal,
)
import transaction
transaction.commit()
self.browser.open(self.portal_url + '/bar')
self.assertTrue('Bar' in self.browser.contents)
self.assertIn('This is a description', self.browser.contents)
def test_rich_text_field(self):
self.browser.open(self.portal_url + '/++add++TestingItem')
self.assertIn(
'form.widgets.IRichTextBehavior.text', self.browser.contents,
)
self.browser.getControl(
name='form.widgets.IBasic.title'
).value = 'A content with text'
self.browser.getControl(
name='form.widgets.IRichTextBehavior.text'
).value = 'Some text'
self.browser.getControl('Save').click()
self.assertIn('Some text', self.browser.contents)
|
StarcoderdataPython
|
43895
|
# -*- coding: utf-8 -*-
# Import libraries from api
from visual_api import *
class MplCanvas(FigureCanvas):
"""Base MPL widget for plotting
Parameters
----------
FigureCanvas : FigureCanvasQTAgg
Canvas for plotting
Returns
-------
None
"""
def __init__(self, parent=None, dpi=100):
self.fig = plt.figure(dpi=dpi)
self.axes = self.fig.add_subplot(111)
self._reg_predictions_added = 0
self._clf_predictions_added = 0
self._clust_predictions_added = 0
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class DynamicMplCanvas(MplCanvas):
"""A canvas that updates itself on call with a new plot"""
def __init__(self, *args, **kwargs):
MplCanvas.__init__(self, *args, **kwargs)
self.compute_initial_figure()
def compute_initial_figure(self):
"""Initial demo plot for matplotlib canvas"""
self.axes.plot([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[60, 70, 85, 100, 125, 160, 195, 225, 0, 0], '-o',
color='green')
self.axes.set_xlabel("Number of Espresso Shots")
self.axes.set_ylabel("Heart Rate")
self.axes.set_ylim([-2, 230])
self.axes.set_title("Demo Plot: Number of Espresso Shots x Heart Rate")
plt.tight_layout()
def _reset_plots(self):
"""Resets plots for update_plot function"""
try:
self.fig.delaxes(self.axes)
self.axes = self.fig.add_subplot(111)
except:
pass
try:
self.fig.delaxes(self.axes_x)
self.fig.delaxes(self.axes_y)
self.axes = self.fig.add_subplot(111)
except:
pass
def update_plot(self, x, y, xlabel, ylabel, plot_type, plot_generated, checkbox):
"""Updates plot based on user input and plot type
# TODO: Fix axis x-axis tick marks and labels for bar charts, especially when two
# variables plotted together
Parameters
----------
x : pandas Series
x variable for plotting
y : pandas Series
y variable for plotting
xlabel : str
Name of x variable
ylabel : str
Name of y variable
plot_type : str
Name of plot to generate
plot_generated : dict
Holds status information about generated plot
checkbox : PySide CheckBox widget
"Add Predictions to Plot" check box widget
Returns
-------
status : str
Status of method
"""
# Clear plotting canvas and define variables used for plotting
self._reset_plots()
self.x = x
self.y = y
try:
# Scatter plot
if plot_type == 'Scatter':
title_str = "Scatter: {} x {}".format(xlabel, ylabel)
self.axes.scatter(x, y, alpha=.6)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.axes.set_title(title_str)
# Line plot
elif plot_type == 'Line':
title_str = "Line: {} x {}".format(xlabel, ylabel)
self.axes.plot(x, y, alpha=.6)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.axes.set_title(title_str)
# Scatter + Line plot
elif plot_type == 'Scatter + Line':
title_str = "Scatter + Line: {} x {}".format(xlabel, ylabel)
self.axes.plot(x, y, '-o', alpha=.6)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.axes.set_title(title_str)
# Histogram
elif plot_type == 'Histogram':
if x is not None: self.axes.hist(x, alpha=.6, label=xlabel, color='blue')
if y is not None: self.axes.hist(y, alpha=.6, label=ylabel, color='green')
# Add labels and title
if x is not None and y is not None:
title_str = "Histogram: {} and {}".format(xlabel, ylabel)
self.axes.set_xlabel(xlabel + ' and ' + ylabel)
elif x is not None and y is None:
title_str = "Histogram: {}".format(xlabel)
self.axes.set_xlabel(xlabel)
else:
title_str = "Histogram: {}".format(ylabel)
self.axes.set_xlabel(ylabel)
# Set title for any histogram
self.axes.set_title(title_str)
self.axes.set_ylabel('Count')
plt.legend(loc='best')
# Bar Chart
elif plot_type == 'Bar Chart':
if x is not None:
self.axes.bar(np.unique(x), pd.value_counts(x), alpha=.6, label=xlabel, color='blue')
if y is not None:
self.axes.bar(np.unique(y), pd.value_counts(y), alpha=.6, label=ylabel, color='green')
# Add labels and title
if x is not None and y is not None:
title_str = "Bar Chart: {} and {}".format(xlabel, ylabel)
self.axes.set_xlabel(xlabel + ' and ' + ylabel)
elif x is not None and y is None:
title_str = "Bar Chart: {}".format(xlabel)
self.axes.set_xlabel(xlabel)
else:
title_str = "Bar Chart: {}".format(ylabel)
self.axes.set_xlabel(ylabel)
# Set title for any bar chart
self.axes.set_title(title_str)
self.axes.set_ylabel('Count')
plt.legend(loc='best')
# Boxplot
else:
if x is not None and y is None:
title_str = "Boxplot: {}".format(xlabel)
self.axes.boxplot(x)
self.axes.set_ylabel('Value')
self.axes.set_title(title_str)
elif x is None and y is not None:
title_str = "Boxplot: {}".format(ylabel)
self.axes.boxplot(y)
self.axes.set_ylabel('Value')
self.axes.set_title(title_str)
else:
self.fig.delaxes(self.axes)
# X variable
self.axes_x = self.fig.add_subplot(121)
self.axes_x.boxplot(x)
self.axes_x.set_ylabel("Value")
self.axes_x.set_title("Boxplot: {}".format(xlabel))
# Y variable
self.axes_y = self.fig.add_subplot(122)
self.axes_y.boxplot(y)
self.axes_y.set_title("Boxplot: {}".format(ylabel))
# Create better layout and draw
plt.tight_layout()
self.draw()
# Update plot status
plot_generated['status'] = True # This lets main UI know the plot generated
plot_generated['xlabel'] = xlabel
plot_generated['ylabel'] = ylabel
# Enable/disable checkbox based on plot types
if plot_type in utils.PLOTS_FOR_PRED:
checkbox.setEnabled(True)
checkbox.setChecked(True)
else:
checkbox.setEnabled(False)
checkbox.setChecked(False)
return 'Success'
except Exception as e:
plot_generated['status'] = False # Sorry about your luck :(
plot_generated['xlabel'] = 'None'
plot_generated['ylabel'] = 'None'
return str(e)
def add_predictions_to_plot(self, y_pred, model_type, model_name):
"""Adds machine learning predictions to currently generated plot
Parameters
----------
y_pred : 1d array-like
Machine learning model predictions
model_type : str
Type of machine learning model
model_name : str
Name of machine learning model
Returns
-------
status : str
Status of method
"""
try:
if model_type == 'Regression':
if self._reg_predictions_added > (len(REG_COLORS)-1): self._reg_predictions_added = 0
self.axes.scatter(self.x, y_pred, label='Predicted: {}'.format(model_name),
color=REG_COLORS[self._reg_predictions_added], alpha=.6)
self._reg_predictions_added += 1
elif model_type == 'Classification':
if self._clf_predictions_added > (len(CLF_COLORS)-1): self._clf_predictions_added = 0
# Plot hits and misses
hits, misses = np.where(y_pred == self.y)[0], np.where(y_pred != self.y)[0]
self.axes.scatter(self.x.iloc[hits], y_pred[hits],
label='Correct: {}'.format(model_name), alpha=.6, facecolors='none',
edgecolors=CLF_COLORS[self._clf_predictions_added][0])
self.axes.scatter(self.x.iloc[misses], y_pred[misses],
label='Incorrect: {}'.format(model_name), alpha=.6, marker='x',
color=CLF_COLORS[self._clf_predictions_added][1])
self._clf_predictions_added += 1
else:
cluster_ids = np.unique(y_pred)
CLUST_COLORS = utils.get_spaced_colors(len(cluster_ids)+1, offset=self._clust_predictions_added)
for i, label in enumerate(cluster_ids):
idx = np.where(y_pred == label)[0]
self.axes.scatter(self.x.iloc[idx], self.y.iloc[idx],
label='Cluster {}: {}'.format(i, model_name),
color=next(CLUST_COLORS),
alpha=.6)
self._clust_predictions_added += 1
# Add legend and draw plot
plt.legend(loc='upper left')
self.draw()
return 'Success'
except Exception as e:
return str(e)
|
StarcoderdataPython
|
1638569
|
<filename>api/__init__.py
import geojson, datetime, pytz, json, os, importlib, datetime, math
from housepy import server, config, log, util, strings
from mongo import ASCENDING, DESCENDING, ObjectId
"""
Basically, it's like this: /api/<view>/<output>?<query>
The view is what kind of thing you want back (eg, a FeatureCollection (features), or a list of expeditions)
The output is json if it's missing, otherwise, how about a map? or HTML? a chart?
See templates/api/map.html for an example of how to subsequently load the JSON data asyncronously
The query defines the filter. This might be any property at all, but keyed ones are:
- Expedition (eg okavango_14)
- Member (eg Jer)
- startDate and endDate (endDate is one day later if omitted and startDate is present)
- geoBounds (upper left (NW), lower right (SE): lon_1,lat_1,lon_2,lat_2. So Okavango is something like 20,-17,26,-22
- region (arbitrary polygon)
Can also do expeditionDay=N for the 24 hour period N days after the expedition start date specified in the config
By default, returns the first 100 results. limit=N for more.
Sorted in ascending order by t_utc. To reverse, use order=descending.
Returns only one feature for every resolution seconds.
"""
class Api(server.Handler):
def post(self, nop=None):
return self.not_found()
def get(self, view_name=None, output=None):
# add a header for unrestricted access
self.set_header("Access-Control-Allow-Origin", "*")
csv = False
geo = False
# do the routing and load view module
if not len(view_name):
log.info("Listing views...")
views = ["/api/%s" % filename.split('.')[0] for filename in os.listdir(os.path.abspath(os.path.dirname(__file__))) if filename[0] != "_" and filename[-3:] == ".py"]
response = {'description': "API view endpoints", "views": views}
return self.json(response)
module_name = "api.%s" % view_name
try:
view = importlib.import_module(module_name)
log.info("Loaded %s module" % module_name)
except ImportError as e:
log.error(log.exc(e))
return self.error("View \"%s\" not recognized" % view_name)
if len(output):
if output == "csv":
csv = True
elif output == "geo":
geo = True
else:
feature_type = self.get_argument('FeatureType', None)
try:
return self.render("api/%s.html" % output, query=(self.request.uri).replace("/%s" % output, ""), feature_type=feature_type)
except Exception as e:
return self.error("Could not render %s" % output)
# time to build our search filter
search = {}
# special parsing for FeatureType
feature_type = self.get_argument('FeatureType', None)
if feature_type is not None:
if ',' in feature_type:
search['$or'] = [{'properties.FeatureType': ft} for ft in feature_type.split(',')]
else:
search['FeatureType'] = feature_type
del self.request.arguments['FeatureType']
# special parsing for id
feature_id = self.get_argument('id', None)
if feature_id is not None:
try:
search['_id'] = ObjectId(feature_id)
except Exception as e:
log.error(log.exc(e))
return self.error("Bad ID")
del self.request.arguments['id']
# special parsing for startDate and endDate
start_string = self.get_argument('startDate', None)
if start_string is not None:
try:
start_dt = util.parse_date(start_string, tz=config['local_tz'])
start_t = util.timestamp(start_dt)
end_string = self.get_argument('endDate', (start_dt + datetime.timedelta(days=1)).strftime("%Y-%m-%d"))
end_dt = util.parse_date(end_string, tz=config['local_tz'])
end_t = util.timestamp(end_dt)
log.debug("startDate %s" % start_dt)
log.debug("endDate %s" % end_dt)
search['t_utc'] = {'$gt': start_t, '$lt': end_t}
except Exception as e:
log.error(log.exc(e))
return self.error("Bad dates")
del self.request.arguments['startDate']
if 'endDate' in self.request.arguments:
del self.request.arguments['endDate']
# special parsing for rectangular location
# expecting bounds (upper left (NW), lower right (SE)): lon_1,lat_1,lon_2,lat_2
# oka: 20,-17,26,-22 nyc: -75,41,-71,40
geo_bounds = self.get_argument('geoBounds', None)
if geo_bounds is not None:
try:
lon_1, lat_1, lon_2, lat_2 = [float(coord) for coord in geo_bounds.split(',')]
log.debug("geo_bounds %f,%f %f,%f" % (lon_1, lat_1, lon_2, lat_2))
search['geometry'] = {'$geoWithin': {'$geometry': {'type': "Polygon", 'coordinates': [[ [lon_1, lat_1], [lon_2, lat_1], [lon_2, lat_2], [lon_1, lat_2], [lon_1, lat_1] ]]}}}
except Exception as e:
log.error(log.exc(e))
return self.error("Bad geometry")
del self.request.arguments['geoBounds']
# special parsing for polygonal region
# expecting an arbitrary polygon
# (rough) mombo: 22.731580,-19.186571,22.716444,-19.227478,22.766600,-19.295694,22.827821,-19.319120,22.874635,-19.336678,22.948241,-19.282060,22.747431,-19.132026
region = self.get_argument('region', None)
if region is not None:
try:
cs = [float(coord) for coord in region.split(',') if len(coord)]
coords = list(zip(cs[::2], cs[1::2]))
if coords[0] != coords[-1]:
coords.append(coords[0])
log.debug("region %s" % (coords))
search['geometry'] = {'$geoWithin': {'$geometry': {'type': "Polygon", 'coordinates': [coords]}}}
except Exception as e:
log.error(log.exc(e))
return self.error("Bad geometry")
del self.request.arguments['region']
# filter results without geo
if geo:
search['geometry'] = {'$exists': True, '$ne': None}
# special parsing for expeditionDay (overrides startDate / endDate)
expedition_day = self.get_argument('expeditionDay', None)
if expedition_day is not None:
try:
expedition = self.get_argument('expedition', config['expedition'])
expedition = self.get_argument('Expedition', expedition)
start_dt = util.parse_date(str(config['start_date'][expedition]), tz=config['local_tz'])
expedition_day = int(expedition_day) - 1
log.debug("%s days after %s" % (expedition_day, start_dt))
gt_t = util.timestamp(start_dt + datetime.timedelta(days=expedition_day))
lt_t = util.timestamp(start_dt + datetime.timedelta(days=expedition_day + 1))
search['t_utc'] = {'$gt': gt_t, '$lt': lt_t}
except Exception as e:
log.error(log.exc(e))
return self.error("Bad day")
del self.request.arguments['expeditionDay']
# special parsing for resolution
resolution = strings.as_numeric(self.request.arguments['resolution'][0]) if 'resolution' in self.request.arguments else 0
# special parsing for SpeciesSearch
species_search = self.get_argument('speciesSearch', None)
if species_search is not None:
search['$text'] = {'$search': species_search}
del self.request.arguments['speciesSearch']
# get limit and order
# limit = self.get_argument('limit', 100) # this fails on int arguments, which I think is a tornado bug
limit = strings.as_numeric(self.request.arguments['limit'][0]) if 'limit' in self.request.arguments else 100
order = self.request.arguments['order'][0].lower() if 'order' in self.request.arguments else 'ascending'
order = ASCENDING if order == "ascending" else DESCENDING
# get all the rest of the arguments and format as properties
try:
for param, value in self.request.arguments.items():
for i, item in enumerate(value):
item = item.decode('utf-8')
item = strings.as_numeric(item)
item = True if type(item) == str and item.lower() == "true" else item
item = False if type(item) == str and item.lower() == "false" else item
item = {'$exists': True} if item == '*' else item
value[i] = item
search[param] = value[0] if len(value) == 1 else value
search = { ('properties.%s' % (strings.camelcase(param) if param != 't_utc' and param != '_id' else param) if param != 'geometry' and param != '$text' and param != '$or' and param != '_id' else param): value for (param, value) in search.items() if param not in ['limit', 'order', 'resolution']}
except Exception as e:
log.error(log.exc(e))
return self.error("bad parameters")
# http://localhost:7777/api?geoBounds=20,-17,26,-22&startDate=2014-08-01&endDate=2014-09-01&Member=Jer
log.info("FILTER %s" % search)
# pass our search to the view module for execution and formatting
try:
result = view.assemble(self, search, limit, order, resolution)
if result is None:
return
if csv:
return self.csv(format_csv(result), "data.csv")
results, total, returned = result
if 'features' in results:
for feature in results['features']:
try:
if feature['geometry']['coordinates'][2] is None:
del feature['geometry']['coordinates'][2]
except Exception:
pass
for field in feature['properties']:
if type(feature['properties'][field]) is float and math.isnan(feature['properties'][field]):
feature['properties'][field] = None
if geo:
## cola
for feature in results['features']:
keys = list(feature['properties'].keys())
for key in keys:
if type(feature['properties'][key]) == dict:
ks = list(feature['properties'][key].keys())
for k in ks:
feature['properties'][key + "_" + k] = feature['properties'][key][k]
del feature['properties'][key]
return self.json(results)
search = {key.replace('properties.', ''): value for (key, value) in search.items()}
return self.json({'order': order, 'limit': limit, 'total': total, 'returned': len(results) if returned is None else returned, 'filter': search, 'results': results, 'resolution': resolution if resolution != 0 else "full"})
except Exception as e:
return self.error(log.exc(e))
def format_csv(data):
import csv
features = data[0]['features']
# build header
header = []
for feature in features:
feature.update(feature['properties'])
if 'Taxonomy' in feature and feature['Taxonomy'] is not None:
feature.update(feature['Taxonomy'])
del feature['Taxonomy']
if feature['geometry'] is not None:
feature.update({"Longitude": feature['geometry']['coordinates'][0], "Latitude": feature['geometry']['coordinates'][1]})
del feature['properties']
del feature['geometry']
for key in feature:
if key not in header:
header.append(key)
header.sort()
log.debug(header)
# populate rows
csv = []
csv.append(','.join(header))
with open('data.csv', 'w', newline='') as csvfile:
for feature in features:
row = []
for column in header:
if column in feature:
value = feature[column]
if type(value) == str:
value = strings.singlespace(value)
value.replace('"', "'")
value = "%s" % value
row.append(str(value).replace(",", ""))
else:
row.append("None")
csv.append(','.join(row))
return '\n'.join(csv)
# print(json.dumps(features, indent=4, default=lambda x: str(x)))
|
StarcoderdataPython
|
1705106
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 09:08:21 2018
@author: Xian_Work
"""
import scipy
from scipy.optimize import fmin, brute
from model_plotting import norm , compute_dist,gen_evolve_share_series, mk_mix_agent
#Import parameters and other UI modules
param_path="../Parameters/params_ui.json"
execfile("prelim.py")
####################################
#Model Target and Base Plots
####################################
#Data Series to plot against
data_tminus5 = norm(param.JPMC_cons_moments, param.plt_norm_index)
data_tminus5_search = param.JPMC_search_moments
###Use vcv weights###
cons_se_vcv_tminus5 =param.JPMC_cons_SE
search_se_vcv = param.JPMC_search_SE
#Targets
opt_target_cons = data_tminus5
cons_wmat = np.linalg.inv(np.asmatrix(np.diag(np.square(cons_se_vcv_tminus5))))
opt_target_search = data_tminus5_search[param.moments_len_diff: param.moments_len_diff + param.s_moments_len]
search_wmat = np.linalg.inv(np.asmatrix(np.diag(np.square(search_se_vcv))))
#########################################
# Baseline Parameters
#########################################
pd_base = {"a0": param.a0_data, "T_series":T_series, "T_solve":param.TT,
"e":param.e_extend,
"beta_var":param.beta, "beta_hyp": param.beta_hyp, "a_size": param.a_size,
"rho":param.rho, "verbose":False, "L_":param.L,
"constrained":param.constrained, "Pi_":np.zeros((param.TT+1,param.a_size+1,9,9)),
"z_vals" : param.z_vals, "R" : param.R, "Rbor" : param.R,
"phi": param.phi, "k":param.k, "spline_k":param.spline_k, "solve_V": True,
"solve_search": True}
for t in range(param.TT+1):
for a_index in range(param.a_size+1):
pd_base['Pi_'][t][a_index] = param.Pi
pd_base['T_series']=len(pd_base['e'])-1
### Estimated standard model
f = open("../Parameters/model_params_main.json")
models_params = json.load(f)
f.close
est_params_1b1k = models_params['est_params_1b2k']
est_params_1b2k = models_params['est_params_1b2k']
pd_1b1k = copy.deepcopy(pd_base)
for k, v in est_params_1b1k.iteritems():
if k in pd_base.keys():
pd_1b1k.update({k:v})
pd_1b2k = copy.deepcopy(pd_base)
for k, v in est_params_1b2k.iteritems():
if k in pd_base.keys():
pd_1b2k.update({k:v})
weights_1b2k = (est_params_1b2k['w_lo_k'], 1- est_params_1b2k['w_lo_k'])
params_1b2k = ('k', )
vals_1b2k = ((est_params_1b2k['k0'], ),
(est_params_1b2k['k1'], ),)
################################################
# Functions to estimate delta for a given gamma
###############################################
def gen_agent_bhvr(agent):
c_start = param.c_plt_start_index
s_start = param.s_plt_start_index
series_dict = gen_evolve_share_series(pd_base['e'],
c_start, s_start,
len(opt_target_cons),
param.plt_norm_index,
*agent,
verbose = True,
normalize = True)
cons_out = series_dict['w_cons_out']
search_out = series_dict['w_search_out'][s_start-c_start : s_start-c_start+len(opt_target_search)]
return {'cons_out':cons_out, 'search_out':search_out}
def find_opt_delta_1b2k(gamma, opt_type= None):
"""For a given gamma, finds the discount factor delta(beta_var)
that generates behavior to best fit the data moments"""
def obj_func(delta_in):
delta = delta_in[0]
#Generate agent
if opt_type == "1b1k":
pd_temp = copy.deepcopy(pd_1b1k)
pd_temp.update({'rho':gamma})
pd_temp.update({'beta_var':delta})
agent = [(1,pd_temp)]
elif opt_type == "1b2k":
pd_temp = copy.deepcopy(pd_1b2k)
pd_temp.update({'rho':gamma})
pd_temp.update({'beta_var':delta})
agent = mk_mix_agent(pd_temp, params_1b2k, vals_1b2k, weights_1b2k)
#Compute predicted behavior
series_out = gen_agent_bhvr(agent)
cons_out = series_out['cons_out']
search_out = series_out['search_out']
#Calculate distance from targets
cons_dist = compute_dist(cons_out, opt_target_cons, cons_wmat)
search_dist = compute_dist(search_out, opt_target_search, search_wmat)
return cons_dist + search_dist
opt_out = scipy.optimize.minimize(obj_func, [pd_base['beta_var'],],
bounds=[(0.9,1.0),],
options = {'maxiter':15})
return(opt_out)
###############################################
# Estimate best delta for a range of gamma vals
###############################################
gammas_out = {}
for gamma in [0.9999, 4.0, 10.0]:
opt_out_1b1k = find_opt_delta_1b2k(gamma, opt_type ="1b1k")
opt_out_1b2k = find_opt_delta_1b2k(gamma, opt_type ="1b2k")
opt_delta_1b1k = opt_out_1b1k['x'][0]
opt_delta_1b2k = opt_out_1b2k['x'][0]
key_1b1k = "est_params_1b1k_fix_gamma_" + str(int(np.round(gamma)))
key_1b2k = "est_params_1b2k_fix_gamma_" + str(int(np.round(gamma)))
gammas_out.update({key_1b1k: {'beta_var':opt_delta_1b1k, 'rho':gamma}})
gammas_out.update({key_1b2k: {'beta_var':opt_delta_1b2k, 'rho':gamma}})
with open('../Parameters/model_params_robust_gamma.json', 'w') as f:
json.dump(gammas_out, f, indent=0)
|
StarcoderdataPython
|
68183
|
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model_analyzer.config.input.objects.config_model_profile_spec \
import ConfigModelProfileSpec
from model_analyzer.constants import LOGGER_NAME, THROUGHPUT_GAIN
import copy
import logging
logger = logging.getLogger(LOGGER_NAME)
class RunSearch:
"""
A class responsible for searching the config space.
"""
def __init__(self, config):
self._max_concurrency = config.run_config_search_max_concurrency
self._max_instance_count = config.run_config_search_max_instance_count
self._max_preferred_batch_size = config.run_config_search_max_preferred_batch_size
self._sweep_preferred_batch_size_disable = config.run_config_search_preferred_batch_size_disable
self._model_config_parameters = {'instance_count': 1}
self._measurements = []
self._last_batch_length = None
# Run search operating mode
self._sweep_mode_function = None
def _create_model_config(self, cpu_only=False):
"""
Generate the model config sweep to be used.
"""
model_config = self._model_config_parameters
new_config = {}
if 'dynamic_batching' in model_config:
if model_config['dynamic_batching'] is None:
new_config['dynamic_batching'] = {}
else:
new_config['dynamic_batching'] = {
'preferred_batch_size': [model_config['dynamic_batching']]
}
if 'instance_count' in model_config:
if not cpu_only:
new_config['instance_group'] = [{
'count': model_config['instance_count'],
'kind': 'KIND_GPU'
}]
else:
new_config['instance_group'] = [{
'count': model_config['instance_count'],
'kind': 'KIND_CPU'
}]
return new_config
def add_measurements(self, measurements):
"""
Add the measurments that are the result of running
the sweeps.
Parameters
----------
measurements : list
list of measurements
"""
self._last_batch_length = len(measurements)
# The list will contain one parameter, because we are experimenting
# with one value at a time.
self._measurements += measurements
def _step_instance_count(self):
"""
Advances instance count by one step.
"""
self._model_config_parameters['instance_count'] += 1
def _step_dynamic_batching(self):
"""
Advances the dynamic batching by one step.
"""
if 'dynamic_batching' not in self._model_config_parameters:
# Enable dynamic batching
self._model_config_parameters['dynamic_batching'] = None
else:
if self._model_config_parameters['dynamic_batching'] is None:
self._model_config_parameters['dynamic_batching'] = 1
else:
self._model_config_parameters['dynamic_batching'] *= 2
def _get_throughput(self, measurement):
return measurement.get_metric_value('perf_throughput')
def _calculate_throughput_gain(self, index):
throughput_before = self._get_throughput(
self._measurements[-(index + 1)])
throughput_after = self._get_throughput(self._measurements[-index])
gain = (throughput_after - throughput_before) / throughput_before
return gain
def _valid_throughput_gain(self):
"""
Returns true if the amount of throughput gained
is reasonable for continuing the search process
"""
# If number of measurements is smaller than 4,
# the search can continue.
if len(self._measurements) < 4:
return True
return self._calculate_throughput_gain(1) > THROUGHPUT_GAIN or \
self._calculate_throughput_gain(2) > THROUGHPUT_GAIN or \
self._calculate_throughput_gain(3) > THROUGHPUT_GAIN
def init_model_sweep(self, concurrency, search_model_config_parameters):
"""
Intiliazes the sweep mode, and model config parameters in some cases.
"""
# Reset the measurements after each init
self._measurements = []
if len(concurrency) != 0 and search_model_config_parameters:
self._model_config_parameters = {'instance_count': 0}
self._sweep_mode_function = self._sweep_model_config_only
elif len(concurrency) == 0 and search_model_config_parameters:
self._model_config_parameters = {'instance_count': 1}
logger.info(
'Will sweep both the concurrency and model config parameters...'
)
self._sweep_mode_function = self._sweep_concurrency_and_model_config
else:
logger.info('Will sweep only through the concurrency values...')
self._sweep_mode_function = self._sweep_concurrency_only
def get_model_sweep(self, config_model):
"""
Get the next iteration of the sweeps.
Parameters
----------
config_model : ConfigModelProfileSpec
The config model object of the model to sweep through
Returns
-------
config_model, list
The list may be empty, contain a model config dict or None
"""
new_model = ConfigModelProfileSpec(
copy.deepcopy(config_model.model_name()),
copy.deepcopy(config_model.cpu_only()),
copy.deepcopy(config_model.objectives()),
copy.deepcopy(config_model.constraints()),
copy.deepcopy(config_model.parameters()),
copy.deepcopy(config_model.model_config_parameters()),
copy.deepcopy(config_model.perf_analyzer_flags()))
if self._sweep_mode_function:
new_model, model_sweep = self._sweep_mode_function(new_model)
# Only log message if there is new runs.
if model_sweep:
self._log_message(new_model)
return new_model, model_sweep
return new_model, []
def _sweep_concurrency_and_model_config(self, model):
"""
Gets next iteration of both the concurrency and model config
parameters
Parameters
----------
model : ConfigModelProfileSpec
The model whose parameters are being swept over
"""
return self._sweep_parameters(model, sweep_model_configs=True)
def _sweep_concurrency_only(self, model):
"""
Gets next iteration of the concurrency sweep
"""
return self._sweep_parameters(model, sweep_model_configs=False)
def _sweep_parameters(self, model, sweep_model_configs):
"""
A helper function that sweeps over concurrency
and if required, over model configs as well
"""
concurrency = model.parameters()['concurrency']
if len(concurrency) == 0:
model.parameters()['concurrency'] = [1]
else:
# Exponentially increase concurrency
new_concurrency = concurrency[0] * 2
# If the concurrency limit has been reached, the last batch lead to
# an error, or the throughput gain is not significant, step
# the concurrency value. TODO: add exponential backoff so
# that the algorithm can step back and exactly find the points.
concurrency_limit_reached = new_concurrency > self._max_concurrency
last_batch_erroneous = self._last_batch_length == 0
throughput_peaked = not self._valid_throughput_gain()
if concurrency_limit_reached or last_batch_erroneous or throughput_peaked:
# Reset concurrency
if sweep_model_configs:
self._measurements = []
model.parameters()['concurrency'] = [1]
return self._sweep_model_config_only(model)
else:
return model, []
model.parameters()['concurrency'] = [new_concurrency]
return model, [
self._create_model_config(
cpu_only=model.cpu_only()) if sweep_model_configs else None
]
def _sweep_model_config_only(self, model):
"""
Gets next iteration model config
parameters sweep
"""
self._step_instance_count()
instance_limit_reached = self._model_config_parameters[
'instance_count'] > self._max_instance_count
if instance_limit_reached:
if self._sweep_preferred_batch_size_disable:
return model, []
# Reset instance_count
self._model_config_parameters['instance_count'] = 1
self._step_dynamic_batching()
dynamic_batching_enabled = self._model_config_parameters[
'dynamic_batching'] is not None
if dynamic_batching_enabled:
batch_size_limit_reached = self._model_config_parameters[
'dynamic_batching'] > self._max_preferred_batch_size
if batch_size_limit_reached:
return model, []
return model, [self._create_model_config(cpu_only=model.cpu_only())]
def _log_message(self, model):
"""
Writes the current state of the search to the console
"""
concurrency = model.parameters()['concurrency'][0]
message = 'dynamic batching is disabled.'
if 'dynamic_batching' in self._model_config_parameters:
if self._model_config_parameters['dynamic_batching'] is None:
message = 'dynamic batching is enabled.'
else:
message = (
"preferred batch size is set to "
f"{self._model_config_parameters['dynamic_batching']}.")
if self._sweep_mode_function == self._sweep_concurrency_only:
logger.info(f"[Search Step] Concurrency set to {concurrency}. ")
elif self._sweep_mode_function == self._sweep_concurrency_and_model_config:
logger.info(
f"[Search Step] Concurrency set to {concurrency}. "
f"Instance count set to "
f"{self._model_config_parameters['instance_count']}, and {message}"
)
elif self._sweep_mode_function == self._sweep_model_config_only:
logger.info(
f"[Search Step] Instance count set to "
f"{self._model_config_parameters['instance_count']}, and {message}"
)
|
StarcoderdataPython
|
3269959
|
<filename>Easy/ValidQuadrilateral.py
"""
Problem Statement
John loves gardening. He believes that for good growth of the plant, the land should be quadrilateral. Given angles of 4 sides of the land. Find whether the land is a valid quadrilateral or not. A quadrilateral is valid if the sum of all four angles is equal to 360 degrees.
Input
The first line contains an integer T, total number of test cases. The next T lines, each, contain four angles A, B, C and D of a quadrilateral separated by space.
Output
Print "YES" if the quadrilateral is valid, else print "NO", without the double quotes.
Constraints
1 ≤ T ≤ 100 1 ≤ A,B,C and D ≤ 360
Sample Input
2
90 90 90 90
180 180 45 55
Sample Output
YES
NO
"""
n = int(input())
for i in range(n):
a = input().split()
ar = [int(x) for x in a]
if sum(ar) == 360:
print("YES")
else:
print("NO")
|
StarcoderdataPython
|
3286518
|
import numpy as np
class Genome:
def __init__(self, x_dim, y_dim, random_weights=True):
self.genome_type = "survived"
self.x_dim = x_dim
self.y_dim = y_dim
self.h_dim = 1
self.score = 0
self.fitness = 0
# fixed bias for simplicity
# for optimal search, inputs should be normalized in advance
self.x_bias = 1
# self.h_bias = 1
if random_weights:
self.w1 = np.random.random((
self.h_dim, self.x_dim + 1
)) * 2 - 1
self.w2 = np.random.random((
self.y_dim, self.h_dim
)) * 2 - 1
else:
self.w1 = np.zeros((self.h_dim, self.x_dim + 1))
self.w2 = np.zeros((self.y_dim, self.h_dim))
return
# x, h, and y are input vector, hidden vector, and output vector respectively
def predict(self, x):
# append bias to inputs
x = np.append(self.x_bias, x)
# multiply by weight and push to hidden layer
h = np.dot(self.w1, x.reshape(-1, 1))
# h = self.layer_output(x, self.w1)
# apply relu activation to h
# sigmoid activation commented out below
# h = 1 / (1 + np.exp(-1 * h))
h = h * (h > 0)
# h = np.append(self.h_bias, h)
# multiply by weight and push to output
y = np.dot(self.w2, h.reshape(-1, 1))
# return formatted output
y = np.ndarray.flatten(y > 0)
return y
|
StarcoderdataPython
|
3344911
|
<reponame>YungTimAllen/RSVP-TE-Lab-Builder
#!/usr/bin/env python3
"""Script for rendering config for topologies defined by a YAML file"""
from argparse import ArgumentParser, Namespace
import yaml
from jinja2 import Template
def main(args: Namespace):
"""First method called when ran as a script"""
with open(args.topology_file, "r") as fp_:
render(
data=yaml.safe_load(fp_),
path_to_j2=args.template if args.template else "ios-classic-ospf.j2",
)
def render(data: dict, path_to_j2: str):
"""Renders given Jinja2 template with given data (dict)
Args:
data: Dict object containing structured data valid for given Jinja template
path_to_j2: Filepath to Jinja template
"""
with open(path_to_j2) as file_:
print(Template(file_.read()).render(data))
if __name__ == "__main__":
parser = ArgumentParser(description="Render templates for node config")
parser.add_argument("topology_file", type=str, help="YAML defining topology")
parser.add_argument(
"--template", type=str, help="Use a custom template. Default: ./ios-classic-ospf.j2"
)
main(parser.parse_args())
|
StarcoderdataPython
|
3237346
|
<reponame>mathisloge/mapnik-vector-tile
{
"includes": [
"common.gypi"
],
'variables': {
'MAPNIK_PLUGINDIR%': '',
'enable_sse%':'true',
'common_defines' : [
'MAPNIK_VECTOR_TILE_LIBRARY=1'
]
},
"targets": [
{
'target_name': 'make_vector_tile',
'type': 'none',
'hard_dependency': 1,
'actions': [
{
'action_name': 'run_protoc',
'inputs': [
'../proto/vector_tile.proto'
],
'outputs': [
"<(SHARED_INTERMEDIATE_DIR)/vector_tile.pb.cc",
"<(SHARED_INTERMEDIATE_DIR)/vector_tile.pb.h"
],
'action': ['protoc','-I../proto/','--cpp_out=<(SHARED_INTERMEDIATE_DIR)/','../proto/vector_tile.proto']
}
]
},
{
"target_name": "vector_tile",
'dependencies': [ 'make_vector_tile' ],
'hard_dependency': 1,
"type": "static_library",
"sources": [
"<(SHARED_INTERMEDIATE_DIR)/vector_tile.pb.cc"
],
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/'
],
'conditions': [
['enable_sse == "true"', {
'defines' : [ 'SSE_MATH' ]
}]
],
'cflags_cc' : [
'-D_THREAD_SAFE',
'<!@(mapnik-config --cflags)',
'-Wno-sign-compare',
'-Wno-sign-conversion'
],
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS':[
'-D_THREAD_SAFE',
'<!@(mapnik-config --cflags)',
'-Wno-sign-compare',
'-Wno-sign-conversion'
],
},
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/'
],
'libraries':[
'-lprotobuf-lite'
],
'cflags_cc' : [
'-D_THREAD_SAFE'
],
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS':[
'-D_THREAD_SAFE',
],
},
}
},
{
"target_name": "mapnik_vector_tile_impl",
'dependencies': [ 'vector_tile' ],
'hard_dependency': 1,
"type": "static_library",
"sources": [
"<!@(find ../src/ -name '*.cpp')"
],
'defines' : [
"<@(common_defines)"
],
'conditions': [
['enable_sse == "true"', {
'defines' : [ 'SSE_MATH' ]
}]
],
'cflags_cc' : [
'<!@(mapnik-config --cflags)'
],
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS':[
'<!@(mapnik-config --cflags)'
],
},
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/'
],
'defines' : [
"<@(common_defines)"
],
'cflags_cc' : [
'<!@(mapnik-config --cflags)'
],
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS':[
'<!@(mapnik-config --cflags)'
],
},
'libraries':[
'<!@(mapnik-config --libs)',
'<!@(mapnik-config --ldflags)',
'-lmapnik-wkt',
'-lmapnik-json',
'<!@(mapnik-config --dep-libs)',
'-lprotobuf-lite',
'-lz'
],
}
},
{
"target_name": "tests",
'dependencies': [ 'mapnik_vector_tile_impl' ],
"type": "executable",
"defines": [
"<@(common_defines)",
"MAPNIK_PLUGINDIR=<(MAPNIK_PLUGINDIR)"
],
'conditions': [
['enable_sse == "true"', {
'defines' : [ 'SSE_MATH' ]
}]
],
"sources": [
"<!@(find ../test/ -name '*.cpp')"
],
"include_dirs": [
"../src",
'../test',
'../test/utils'
]
},
{
"target_name": "vtile-transform",
'dependencies': [ 'mapnik_vector_tile_impl' ],
"type": "executable",
"defines": [
"<@(common_defines)",
"MAPNIK_PLUGINDIR=<(MAPNIK_PLUGINDIR)"
],
'conditions': [
['enable_sse == "true"', {
'defines' : [ 'SSE_MATH' ]
}]
],
"sources": [
"../bench/vtile-transform.cpp"
],
"include_dirs": [
"../src",
]
},
{
"target_name": "vtile-decode",
'dependencies': [ 'mapnik_vector_tile_impl' ],
"type": "executable",
"defines": [
"<@(common_defines)",
"MAPNIK_PLUGINDIR=<(MAPNIK_PLUGINDIR)"
],
'conditions': [
['enable_sse == "true"', {
'defines' : [ 'SSE_MATH' ]
}]
],
"sources": [
"../bench/vtile-decode.cpp"
],
"include_dirs": [
"../src",
]
},
{
"target_name": "vtile-encode",
'dependencies': [ 'mapnik_vector_tile_impl' ],
"type": "executable",
"defines": [
"<@(common_defines)",
"MAPNIK_PLUGINDIR=<(MAPNIK_PLUGINDIR)"
],
'conditions': [
['enable_sse == "true"', {
'defines' : [ 'SSE_MATH' ]
}]
],
"sources": [
"../bench/vtile-encode.cpp"
],
"include_dirs": [
"../src",
]
},
{
"target_name": "vtile-edit",
'dependencies': [ 'mapnik_vector_tile_impl' ],
"type": "executable",
"defines": [
"<@(common_defines)",
"MAPNIK_PLUGINDIR=<(MAPNIK_PLUGINDIR)"
],
'conditions': [
['enable_sse == "true"', {
'defines' : [ 'SSE_MATH' ]
}]
],
"sources": [
"../bin/vtile-edit.cpp"
],
"include_dirs": [
"../src",
]
},
{
"target_name": "tileinfo",
'dependencies': [ 'vector_tile' ],
"type": "executable",
"sources": [
"../examples/c++/tileinfo.cpp"
],
'conditions': [
['enable_sse == "true"', {
'defines' : [ 'SSE_MATH' ]
}]
],
"include_dirs": [
"../src"
],
'libraries':[
'-L<!@(mapnik-config --prefix)/lib',
'<!@(mapnik-config --ldflags)',
'-lz'
],
'cflags_cc' : [
'-D_THREAD_SAFE',
'<!@(mapnik-config --cflags)'
],
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS':[
'-D_THREAD_SAFE',
'<!@(mapnik-config --cflags)'
],
}
}
]
}
|
StarcoderdataPython
|
3326385
|
<filename>dakotathon/plugins/__init__.py
"""Components that can be called by Dakota."""
|
StarcoderdataPython
|
3325110
|
from abc import ABC, abstractmethod
from typing import Dict, Union
class Variable(ABC):
"""
Abstract variable class
Args:
type (str): The variable type
name (str): The variable name
value: (int, float): The variable value
Attributes:
type (str): The variable type
name (str): The variable name
value: (int, float): The variable value
comment (str): The variable comment
"""
def __init__(self, type: str, name: str, value: Union[int, float]):
self.type = type
self.name = name
self.value = value
self.comment = ''
# allowed types?
def comment(self, comment: str):
"""
Adds a comment to the variable
Args:
comment (str): The comment
"""
self.comment = comment
@abstractmethod
def validate(self):
pass
@abstractmethod
def generate(self):
pass
|
StarcoderdataPython
|
3249690
|
<gh_stars>0
from numpy import *
from numpy.random import *
from LabFuncs import *
from Params import *
from HaloFuncs import *
from WIMPFuncs import *
import pandas
# Halo params
HaloModel = SHMpp
v0 = HaloModel.RotationSpeed
v_esc = HaloModel.EscapeSpeed
beta = HaloModel.SausageBeta
sig_beta = HaloModel.SausageDispersionTensor
sig_iso = array([1.0,1.0,1.0])*v0/sqrt(2.0)
# Load shards
df = pandas.read_csv('../data/FitShards_red.csv')
names = df.group_id
nshards = size(names)
velocities = zeros(shape=(nshards,3))
dispersions = zeros(shape=(nshards,3))
velocities[0:(nshards),0] = df.vx # stream velocities
velocities[0:(nshards),1] = df.vy
velocities[0:(nshards),2] = df.vz
dispersions[0:(nshards),0] = df.sigx # dispersion tensors
dispersions[0:(nshards),1] = df.sigy
dispersions[0:(nshards),2] = df.sigz
pops = df.population
Psun = df.Psun
weights = ShardsWeights(names,pops,Psun)
iS1 = 0
iS2 = arange(1,3)
iRet = arange(3,10)
iPro = arange(10,25)
iLowE = arange(25,59)
# v_mins
n = 1000
v_min = linspace(0.01,750.0,n)
# Times
ndays = 100
days = linspace(0.0,365.0-365.0/ndays,ndays)
# Calculate everything
gmin_Iso = zeros(shape=(ndays,n))
gmin_Iso_gf = zeros(shape=(ndays,n))
gmin_Saus = zeros(shape=(ndays,n))
gmin_Saus_gf = zeros(shape=(ndays,n))
gmin_S1 = zeros(shape=(ndays,n))
gmin_S1_gf = zeros(shape=(ndays,n))
gmin_S2 = zeros(shape=(ndays,n))
gmin_S2_gf = zeros(shape=(ndays,n))
gmin_Ret = zeros(shape=(ndays,n))
gmin_Ret_gf = zeros(shape=(ndays,n))
gmin_Pro = zeros(shape=(ndays,n))
gmin_Pro_gf = zeros(shape=(ndays,n))
gmin_LowE = zeros(shape=(ndays,n))
gmin_LowE_gf = zeros(shape=(ndays,n))
for i in range(0,ndays):
gmin_Iso[i,:] = gvmin_Triaxial(v_min,days[i],sig_iso)
gmin_Iso_gf[i,:] = gvmin_Triaxial(v_min,days[i],sig_iso,GravFocus=True)
gmin_Saus[i,:] = gvmin_Triaxial(v_min,days[i],sig_beta)
gmin_Saus_gf[i,:] = gvmin_Triaxial(v_min,days[i],sig_beta,GravFocus=True)
gmin_sub = zeros(shape=(nshards,n))
gmin_sub_gf = zeros(shape=(nshards,n))
for isub in range(0,nshards):
v_s = velocities[isub,:]
sig_s = dispersions[isub,:]
gmin_sub[isub,:] = weights[isub]*gvmin_Triaxial(v_min,days[i],sig_s,v_shift=v_s)
gmin_sub_gf[isub,:] = weights[isub]*gvmin_Triaxial(v_min,days[i],sig_s,v_shift=v_s,GravFocus=True)
gmin_S1[i,:] = gmin_sub[iS1,:]
gmin_S1_gf[i,:] = gmin_sub_gf[iS1,:]
gmin_S2[i,:] = sum(gmin_sub[iS2,:],0)
gmin_S2_gf[i,:] = sum(gmin_sub_gf[iS2,:],0)
gmin_Ret[i,:] = sum(gmin_sub[iRet,:],0)
gmin_Ret_gf[i,:] = sum(gmin_sub_gf[iRet,:],0)
gmin_Pro[i,:] = sum(gmin_sub[iPro,:],0)
gmin_Pro_gf[i,:] = sum(gmin_sub_gf[iPro,:],0)
gmin_LowE[i,:] = sum(gmin_sub[iLowE,:],0)
gmin_LowE_gf[i,:] = sum(gmin_sub_gf[iLowE,:],0)
print('day = ',i+1,'of',ndays,sum(gmin_S1[i,:]),sum(gmin_S1_gf[i,:]))
savetxt('../data/gvmin/gvmin_Halo.txt',vstack((v_min,gmin_Iso)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_Halo_GF.txt',vstack((v_min,gmin_Iso_gf)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_Saus.txt',vstack((v_min,gmin_Saus)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_Saus_GF.txt',vstack((v_min,gmin_Saus_gf)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_S1.txt',vstack((v_min,gmin_S1)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_S1_GF.txt',vstack((v_min,gmin_S1_gf)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_S2.txt',vstack((v_min,gmin_S2)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_S2_GF.txt',vstack((v_min,gmin_S2_gf)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_Ret.txt',vstack((v_min,gmin_Ret)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_Ret_GF.txt',vstack((v_min,gmin_Ret_gf)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_Pro.txt',vstack((v_min,gmin_Pro)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_Pro_GF.txt',vstack((v_min,gmin_Pro_gf)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_LowE.txt',vstack((v_min,gmin_LowE)),delimiter='\t',fmt="%1.12f")
savetxt('../data/gvmin/gvmin_LowE_GF.txt',vstack((v_min,gmin_LowE_gf)),delimiter='\t',fmt="%1.12f")
|
StarcoderdataPython
|
3311907
|
<reponame>kcarnold/sentiment-slant-gi18
import kenlm
import heapq
import pickle
import os
import sys
import numpy as np
import nltk
import cytoolz
import joblib
import random
from scipy.misc import logsumexp
import itertools
from functools import partial
from .paths import paths
from .tokenization import tokenize_mid_document
from .lang_model import Model, LMClassifier
from .diversity import scalar_diversity
from . import suffix_array, clustering, manual_bos
LOG10 = np.log(10)
# Sentiment global flags
SHOW_SENTIMENT_OPTIONS = False
SENTIMENT_METHOD = 'full'
MAX_LOGPROB_PENALTY = -1.
PRELOAD_MODELS = '''
yelp_train-balanced
yelp_train-1star
yelp_train-2star
yelp_train-3star
yelp_train-4star
yelp_train-5star
yelp_topic_seqs
airbnb_train
yelp_lowfreq
yelp_hifreq'''.split()
'''
yelp_train-stars12
yelp_train-stars45
sotu
'''
[Model.get_or_load_model(name) for name in PRELOAD_MODELS]
get_model = Model.get_model
import json
star_prior_counts = np.array(json.load(open(paths.models / 'star_counts.json')))
sentiment_classifier = LMClassifier([get_model(f'yelp_train-{star}star') for star in range(1, 6)], star_prior_counts)
# def print(*a, **kw): pass
enable_sufarr = False
enable_bos_suggs = False
use_word_vecs = False
if enable_sufarr:
print("Loading docs...", end='', file=sys.stderr, flush=True)
docs = pickle.load(open(os.path.join(paths.models, 'tokenized_reviews.pkl'), 'rb'))
print(', suffix array...', end='', file=sys.stderr, flush=True)
sufarr = suffix_array.DocSuffixArray(docs=docs, **joblib.load(os.path.join(paths.models, 'yelp_train_sufarr.joblib')))
docs_by_id_fname = os.path.join(paths.models, 'yelp_train_docs_by_id.pkl')
if os.path.exists(docs_by_id_fname):
print(', loading id-mapped docs...', end='', file=sys.stderr, flush=True)
docs_by_id = pickle.load(open(docs_by_id_fname, 'rb'))
else:
print(', mapping ids...', end='', file=sys.stderr, flush=True)
_str2id = {word: idx for idx, word in enumerate(get_model('yelp_train').id2str)}
docs_by_id = [[_str2id.get(word, 0) for word in doc] for doc in docs]
pickle.dump(docs_by_id, open(docs_by_id_fname, 'wb'), -1)
print(" Done.", file=sys.stderr)
if enable_bos_suggs:
print("Loading goal-oriented suggestion data...", end='', file=sys.stderr, flush=True)
with open(os.path.join(paths.parent, 'models', 'goal_oriented_suggestion_data.pkl'), 'rb') as f:
clizer = pickle.load(f)
clizer.topic_continuation_scores = np.load('topic_continuation_scores.npy')
keep = np.full(len(clizer.unique_starts), True, dtype=bool)
keep[clizer.omit] = 0
np.mean(keep)
clizer.scores_by_cluster = clizer.scores_by_cluster[keep]
clizer.topic_continuation_scores = clizer.topic_continuation_scores[keep]
clizer.unique_starts = [clizer.unique_starts[i] for i in np.flatnonzero(keep)]
likelihood_bias = logsumexp(clizer.scores_by_cluster, axis=1, keepdims=True)
clizer.scores_by_cluster = clizer.scores_by_cluster - .85 * likelihood_bias
clizer.topic_continuation_scores_argsort = [np.argsort(clizer.topic_continuation_scores[:,topic])[::-1] for topic in range(clizer.n_clusters)]
clizer.scores_by_cluster_argsort = [np.argsort(clizer.scores_by_cluster[:,topic])[::-1] for topic in range(clizer.n_clusters)]
del keep
del likelihood_bias
topic_tags = [f'<T{i}>' for i in range(clizer.n_clusters)]
topic_seq_model = get_model('yelp_topic_seqs')
topic_word_indices = [topic_seq_model.model.vocab_index(tag) for tag in topic_tags]
print("Done.", file=sys.stderr)
if use_word_vecs:
cnnb = clustering.ConceptNetNumberBatch.load()
def get_vecs_for_words(cnnb, words):
res = np.zeros((len(words), cnnb.ndim))
for i, word in enumerate(words):
try:
res[i] = cnnb[word]
except KeyError:
pass
return res
word_vecs_for_model = {}
def get_word_vecs_for_model(model_name):
if model_name not in word_vecs_for_model:
word_vecs_for_model[model_name] = get_vecs_for_words(cnnb, get_model(model_name).id2str)
return word_vecs_for_model[model_name]
print("Getting word vecs for some models", file=sys.stderr)
get_word_vecs_for_model('yelp_train-balanced')
get_word_vecs_for_model('airbnb_train')
sentiment_starters_by_stars_and_sentnum = json.load(open(paths.models / 'yelp_sentiment_starters.json'))
import numba
@numba.jit
def _next_elt_le(arr, criterion, start, end):
for i in range(start, end):
if arr[i] <= criterion:
return i
return end
def collect_words_in_range(start, after_end, word_idx, docs):
words = []
if start == after_end:
return words
word = docs[sufarr.doc_idx[start]][sufarr.tok_idx[start] + word_idx]
words.append(word)
while True:
before_next_idx = _next_elt_le(sufarr.lcp, word_idx, start, after_end - 1)
if before_next_idx == after_end - 1:
break
next_idx = before_next_idx + 1
word = docs[sufarr.doc_idx[next_idx]][sufarr.tok_idx[next_idx] + word_idx]
words.append(word)
start = next_idx
return words
def softmax(scores):
return np.exp(scores - logsumexp(scores))
def next_word_probs(model, state, prev_word, prefix_logprobs=None, temperature=1., length_bonus_min_length=6, length_bonus_amt=0., pos_weights=None):
next_words, logprobs = model.next_word_logprobs_raw(state, prev_word, prefix_logprobs=prefix_logprobs)
if len(next_words) == 0:
return next_words, logprobs
if length_bonus_amt:
length_bonus_elegible = model.word_lengths[next_words] >= length_bonus_min_length
logprobs = logprobs + length_bonus_amt * length_bonus_elegible
if pos_weights is not None:
poses = model.pos_tags[next_words]
logprobs = logprobs + pos_weights[poses]
logprobs /= temperature
return next_words, softmax(logprobs)
class GenerationFailedException(Exception):
pass
def retry_on_exception(exception, tries):
def decorator(fn):
def wrapper(*a, **kw):
for i in range(tries):
try:
return fn(*a, **kw)
except exception:
continue
except:
raise
return fn(*a, **kw)
return wrapper
return decorator
@retry_on_exception(GenerationFailedException, 10)
def generate_phrase(model, context_toks, length, prefix_logprobs=None, **kw):
if context_toks[0] == '<s>':
state, _ = model.get_state(context_toks[1:], bos=True)
else:
state, _ = model.get_state(context_toks, bos=False)
phrase = context_toks[:]
generated_logprobs = np.empty(length)
for i in range(length):
next_words, probs = next_word_probs(model, state, phrase[-1], prefix_logprobs=prefix_logprobs, **kw)
if len(next_words) == 0:
raise GenerationFailedException
prefix_logprobs = None
picked_subidx = np.random.choice(len(probs), p=probs)
picked_idx = next_words[picked_subidx]
new_state = kenlm.State()
model.model.base_score_from_idx(state, picked_idx, new_state)
state = new_state
word = model.id2str[picked_idx]
phrase.append(word)
generated_logprobs[i] = np.log(probs[picked_subidx])
return phrase[len(context_toks):], generated_logprobs
def generate_phrase_from_sufarr(model, sufarr, context_toks, length, prefix_logprobs=None, temperature=1.):
if context_toks[0] == '<s>':
state, _ = model.get_state(context_toks[1:], bos=True)
else:
state, _ = model.get_state(context_toks, bos=False)
phrase = []
generated_logprobs = np.empty(length)
for i in range(length):
start_idx, end_idx = sufarr.search_range((context_toks[-1],) + tuple(phrase) + ('',))
next_words = collect_words_in_range(start_idx, end_idx, i + 1)
if prefix_logprobs is not None:
prior_logprobs = np.full(len(next_words), -10)
for logprob, prefix in prefix_logprobs:
for nextword_idx, word in enumerate(next_words):
if word.startswith(prefix):
prior_logprobs[nextword_idx] = logprob
else:
prior_logprobs = None
if len(next_words) == 0:
raise GenerationFailedException
vocab_indices = [model.model.vocab_index(word) for word in next_words]
logprobs = model.eval_logprobs_for_words(state, vocab_indices)
if prior_logprobs is not None:
logprobs += prior_logprobs
logprobs /= temperature
probs = softmax(logprobs)
picked_subidx = np.random.choice(len(probs), p=probs)
picked_idx = vocab_indices[picked_subidx]
new_state = kenlm.State()
model.model.base_score_from_idx(state, picked_idx, new_state)
state = new_state
word = next_words[picked_subidx]
phrase.append(word)
generated_logprobs[i] = np.log(probs[picked_subidx])
prefix_logprobs = None
return phrase, generated_logprobs
def generate_diverse_phrases(model, context_toks, n, length, prefix_logprobs=None, use_sufarr=False, null_logprob_weight=None, **kw):
if model is None:
model = 'yelp_train'
if isinstance(model, str):
model = get_model(model)
if 'pos_weights' in kw:
kw['pos_weights'] = np.array(kw['pos_weights'])
assert not use_sufarr
state, _ = model.get_state(context_toks)
first_words, first_word_probs = next_word_probs(model, state, context_toks[-1], prefix_logprobs=prefix_logprobs, **kw)
if len(first_words) == 0:
return []
res = []
for idx in np.random.choice(len(first_words), min(len(first_words), n), p=first_word_probs, replace=False):
first_word = model.id2str[first_words[idx]]
first_word_logprob = np.log(first_word_probs[idx])
phrase, phrase_logprobs = generate_phrase(model, context_toks + [first_word], length - 1, **kw)
# phrase, phrase_logprobs = generate_phrase_from_sufarr(model, sufarr, context_toks + [first_word], length - 1, **kw)
res.append(([first_word] + phrase, dict(probs=np.hstack(([first_word_logprob], phrase_logprobs)).tolist())))
return res
from collections import namedtuple
BeamEntry = namedtuple("BeamEntry", 'score, words, done, penultimate_state, last_word_idx, num_chars, extra')
def beam_search_phrases_init(model, start_words, **kw):
if isinstance(model, str):
model = get_model(model)
start_state, start_score = model.get_state(start_words, bos=False)
return [(0., [], False, start_state, model.model.vocab_index(start_words[-1]), 0, None)]
def beam_search_phrases_extend(model, beam, *, beam_width, iteration_num, length_after_first, prefix_logprobs=None, rare_word_bonus=0., constraints):
if isinstance(model, str):
model = get_model(model)
unigram_probs = model.unigram_probs_wordsonly
avoid_letter = constraints.get('avoidLetter')
bigrams = model.unfiltered_bigrams if iteration_num == 0 else model.filtered_bigrams
DONE = 2
new_beam = [ent for ent in beam if ent[DONE]]
new_beam_size = len(new_beam)
for entry in beam:
score, words, done, penultimate_state, last_word_idx, num_chars, _ = entry
if done:
continue
else:
if iteration_num > 0:
last_state = kenlm.State()
model.model.base_score_from_idx(penultimate_state, last_word_idx, last_state)
else:
last_state = penultimate_state
probs = None
if iteration_num == 0 and prefix_logprobs is not None:
next_words = []
probs = []
for prob, prefix in prefix_logprobs:
for word, word_idx in model.vocab_trie.items(prefix):
next_words.append(word_idx)
probs.append(prob)
else:
# print(id2str[last_word])
next_words = bigrams.get(last_word_idx, [])
if len(next_words) < 10:
if iteration_num == 0:
# Fall back to all common words.
next_words = model.most_common_words_by_idx
else:
# Use the larger set of possible next words
next_words = model.unfiltered_bigrams.get(last_word_idx, [])
if len(next_words) < 10:
next_words = model.most_common_words_by_idx
new_state = kenlm.State()
for next_idx, word_idx in enumerate(next_words):
if word_idx == model.eos_idx or word_idx == model.eop_idx:
continue
if probs is not None:
prob = probs[next_idx]
else:
prob = 0.
word = model.id2str[word_idx]
if avoid_letter is not None and avoid_letter in word:
continue
if word[0] in '.?!':
continue
unigram_bonus = -unigram_probs[word_idx]*rare_word_bonus if iteration_num > 0 and rare_word_bonus and word not in words else 0.
main_model_score = LOG10 * model.model.base_score_from_idx(last_state, word_idx, new_state)
new_score = score + prob + unigram_bonus + main_model_score
new_words = words + [word]
new_num_chars = num_chars + 1 + len(word) if iteration_num else 0
done = new_num_chars >= length_after_first
new_entry = (new_score, new_words, done, last_state, word_idx, new_num_chars, None)
if new_beam_size == beam_width:
heapq.heappushpop(new_beam, new_entry)
# Beam size unchanged.
else:
new_beam.append(new_entry)
new_beam_size += 1
if new_beam_size == beam_width:
heapq.heapify(new_beam)
# assert len(new_beam) <= beam_width
return new_beam
def beam_search_phrases_loop(model, beam, *, length_after_first, prefix_logprobs=None, start_idx=0, **kw):
for iteration_num in range(start_idx, length_after_first):
beam = beam_search_phrases_extend(model, beam, iteration_num=iteration_num, length_after_first=length_after_first,
prefix_logprobs=prefix_logprobs, **kw)
prefix_logprobs = None
return [BeamEntry(*ent) for ent in sorted(beam, reverse=True)]
def beam_search_phrases(model, start_words, **kw):
beam = beam_search_phrases_init(model, start_words, **kw)
beam = beam_search_phrases_loop(model, beam, **kw)
beam.sort(reverse=True)
return [BeamEntry(*ent) for ent in beam]
def beam_search_sufarr_init(model, start_words):
start_state, start_score = model.get_state(start_words, bos=False)
return [(0., [], False, start_state, None, 0, (0, len(sufarr.doc_idx), model.null_context_state))]
def beam_search_sufarr_extend(model, beam, context_tuple, iteration_num, beam_width, length_after_first, *, word_bonuses=None, prefix='', null_logprob_weight=0., constraints):
if isinstance(model, str):
model = get_model(model)
avoid_letter = constraints.get('avoidLetter')
def candidates():
for entry in beam:
score, words, done, penultimate_state, last_word_idx, num_chars, (prev_start_idx, prev_end_idx, penultimate_state_null) = entry
if done:
yield entry
continue
if last_word_idx is not None:
last_state = kenlm.State()
model.model.base_score_from_idx(penultimate_state, last_word_idx, last_state)
last_state_null = kenlm.State()
model.model.base_score_from_idx(penultimate_state_null, last_word_idx, last_state_null)
else:
last_state = penultimate_state
last_state_null = penultimate_state_null
start_idx, end_idx = sufarr.search_range(context_tuple + tuple(words) + (prefix,), lo=prev_start_idx, hi=prev_end_idx)
next_word_ids = collect_words_in_range(start_idx, end_idx, iteration_num + 1, docs_by_id)
if len(next_word_ids) == 0:
assert iteration_num == 0 or model.id2str[last_word_idx] == '</S>', "We only expect to run out of words at an end-of-sentence that's also an end-of-document."
continue
new_state = kenlm.State()
for next_idx, word_idx in enumerate(next_word_ids):
if word_idx == 0: continue
word = model.id2str[word_idx]
if avoid_letter is not None and avoid_letter in word:
continue
if word[0] in '.?!':
continue
new_words = words + [word]
new_num_chars = num_chars + 1 + len(word) if iteration_num > 0 else 0
logprob = LOG10 * model.model.base_score_from_idx(last_state, word_idx, new_state)
unigram_bonus = word_bonuses[word_idx] if word not in words else 0.
logprob_null = LOG10 * model.model.base_score_from_idx(last_state_null, word_idx, new_state)
new_score = score + logprob + null_logprob_weight * logprob_null + unigram_bonus
done = new_num_chars >= length_after_first
yield new_score, new_words, done, last_state, word_idx, new_num_chars, (start_idx, end_idx, last_state_null)#bonuses + [unigram_bonus])
return heapq.nlargest(beam_width, candidates())
def tap_decoder(char_model, before_cursor, cur_word, key_rects, beam_width=100, scale=100.):
keys = [k['key'] for k in key_rects]
rects = [k['rect'] for k in key_rects]
centers = [((rect['left'] + rect['right']) / 2, (rect['top'] + rect['bottom']) / 2) for rect in rects]
beam_width = 100
beam = [(0., '', None)]
for item in cur_word:
if 'tap' not in item:
letter = item['letter']
letters_and_distances = [(letter, 0)]
else:
x, y = item['tap']
sq_dist_to_center = [(x - rect_x) ** 2. + (y - rect_y) ** 2. for rect_x, rect_y in centers]
letters_and_distances = zip(keys, sq_dist_to_center)
new_beam = []
# print(np.min(sq_dist_to_center) / scale, keys[np.argmin(sq_dist_to_center)])
for score, sofar, penultimate_state in beam:
last_state = kenlm.State()
if sofar:
char_model.BaseScore(penultimate_state, sofar[-1], last_state)
else:
char_model.NullContextWrite(last_state)
for c in before_cursor:
next_state = kenlm.State()
char_model.BaseScore(last_state, c, next_state)
last_state = next_state
next_state = kenlm.State()
for key, dist in letters_and_distances:
new_so_far = sofar + key
new_beam.append((score + char_model.BaseScore(last_state, key, next_state) - dist / scale, new_so_far, last_state))
beam = sorted(new_beam, reverse=True)[:beam_width]
return [(prob, word) for prob, word, state in sorted(beam, reverse=True)[:10]]
def tokenize_sofar(sofar):
toks = tokenize_mid_document(sofar.lower().replace(' .', '.').replace(' ,', ','))[0]
if toks[-1] != '':
print("WEIRD: somehow we got a mid-word sofar:", repr(sofar))
assert toks[0] == "<D>"
assert toks[1] == "<P>"
assert toks[2] == "<S>"
return ['<s>', "<D>"] + toks[3:-1]
def phrases_to_suggs(phrases):
return [Recommendation(words=phrase, meta=meta) for phrase, meta in phrases]
def predict_forward(domain, toks, beam_width, length_after_first, constraints):
model = get_model(domain)
first_word = toks[-1]
if first_word in '.?!':
return [first_word], None
continuations = beam_search_phrases(model, toks,
beam_width=beam_width, length_after_first=length_after_first, constraints=constraints)
if len(continuations) > 0:
continuation = continuations[0].words
else:
continuation = []
return [first_word] + continuation, None
def try_to_match_topic_distribution(clizer, target_dist, sents):
def normal_lik(x, sigma):
return np.exp(-.5*(x/sigma)**2) / (2*np.pi*sigma)
sent_cluster_distribs = cytoolz.thread_first(
sents,
clizer.vectorize_sents,
clustering.normalize_vecs,
clizer.clusterer.transform,
(normal_lik, .5),
clustering.normalize_dists
)
new_dists_opts = np.eye(clizer.n_clusters)
from scipy.special import kl_div
with_new_dist = np.array([np.concatenate((sent_cluster_distribs, new_dist_opt[None]), axis=0) for new_dist_opt in new_dists_opts])
dist_with_new_dist = clustering.normalize_dists(np.mean(with_new_dist, axis=1))
return np.argsort(kl_div(dist_with_new_dist, target_dist).sum(axis=1))[:3].tolist()
def get_topic_seq(sents):
if len(sents) == 0:
return []
cluster_distances = cytoolz.thread_first(
sents,
clizer.vectorize_sents,
clustering.normalize_vecs,
clizer.clusterer.transform)
return np.argmin(cluster_distances, axis=1).tolist()
def get_bos_suggs(sofar, sug_state, *, bos_sugg_flag, constraints, verbose=False):
if sug_state is None:
sug_state = {}
if 'suggested_already' not in sug_state:
sug_state['suggested_already'] = set()
suggested_already = sug_state['suggested_already']
if verbose:
print("Already suggested", suggested_already)
sents = nltk.sent_tokenize(sofar)
topic_seq = get_topic_seq(sents)
if False:
topics_to_suggest = try_to_match_topic_distribution(
clizer=clizer,
target_dist=clizer.target_dists['best'],
sents=sents)
elif bos_sugg_flag == 'continue':
if len(topic_seq) == 0:
return None, sug_state, None
last_topic = topic_seq[-1]
topics_to_suggest = [last_topic] * 3
else:
# Find the most likely next topics.
topic_seq_state = topic_seq_model.get_state([topic_tags[topic] for topic in topic_seq], bos=True)[0]
topic_likelihood = topic_seq_model.eval_logprobs_for_words(topic_seq_state, topic_word_indices)
if len(topic_seq):
# Ensure that we don't continue the same topic.
last_topic = topic_seq[-1]
topic_likelihood[last_topic] = -np.inf
# Penalize already-covered topics.
for topic in topic_seq:
topic_likelihood[topic] -= 100.0
topics_to_suggest = np.argsort(topic_likelihood)[-3:][::-1].tolist()
if verbose:
print(f"seq={topic_seq} flag={bos_sugg_flag} suggesting={topics_to_suggest}")
if bos_sugg_flag == 'continue':
argsort_scores_for_topic = clizer.topic_continuation_scores_argsort
else:
argsort_scores_for_topic = clizer.scores_by_cluster_argsort
avoid_letter = constraints.get('avoidLetter')
phrases = []
first_words = []
for topic in topics_to_suggest:
# Try to find a start for this topic that doesn't overlap an existing one in first word.
for suggest_idx in argsort_scores_for_topic[topic]:
phrase = clizer.unique_starts[suggest_idx]
if phrase[0] in first_words:
continue
beginning = ' '.join(phrase[:3])
if beginning in suggested_already:
print("Taboo:", beginning)
continue
if avoid_letter is not None and (avoid_letter in beginning or avoid_letter in ''.join(phrase)):
continue
first_words.append(phrase[0])
suggested_already.add(beginning)
phrases.append((phrase, {'bos': True}))
break
return phrases, sug_state, dict(topic_seq=topic_seq, topics_to_suggest=topics_to_suggest)
def get_sentence_enders(model, start_words):
if isinstance(model, str):
model = get_model(model)
start_state, start_score = model.get_state(start_words, bos=False)
toks = list('.?!')
end_indices = [model.model.vocab_index(tok) for tok in toks]
scores = np.exp(model.eval_logprobs_for_words(start_state, end_indices))
cum_score = np.sum(scores)
if cum_score > 2/3:
return [toks[i] for i in np.argsort(scores)[::-1][:2]]
elif cum_score > 1/3:
return [toks[np.argmax(scores)]]
return []
# Based on https://github.com/python/cpython/blob/3.6/Lib/concurrent/futures/process.py
def _get_chunks(*iterables, chunksize):
""" Iterates over zip()ed iterables in chunks. """
it = zip(*iterables)
while True:
chunk = tuple(itertools.islice(it, chunksize))
if not chunk:
return
yield chunk
def _process_chunk(fn, chunk):
""" Processes a chunk of an iterable passed to map.
Runs the function passed to map() on a chunk of the
iterable passed to map.
This function is run in a separate process.
"""
return [fn(*args) for args in chunk]
def map_as_jobs(executor, fn, arr, chunksize=8):
"""Launches jobs that run a chunk of chunksize elements of arr through fn.
Each job will yield an array; you can use itertools.chain.from_iterable(results).
"""
return [executor.submit(partial(_process_chunk, fn), chunk) for chunk in _get_chunks(arr, chunksize=chunksize)]
def Recommendation(words, meta={}):
return dict(words=words, meta=meta)
def get_synonyms(model, state, toks, query_word_idx, *, num_sims, num_alternatives):
from sklearn.metrics import pairwise
word_vecs = get_word_vecs_for_model(model.name)
# Get unconditional next words
next_words, logprobs = model.next_word_logprobs_raw(state, toks[-1])
# Find synonyms that are less likely.
query_word_vec = word_vecs[query_word_idx]
likelihood_threshold = model.unigram_probs[query_word_idx]
less_frequent_indices = [i for i, idx in enumerate(next_words) if model.unigram_probs_wordsonly[idx] < likelihood_threshold]
if len(less_frequent_indices) == 0:
return []
next_words = np.array(next_words)[less_frequent_indices]
logprobs = logprobs[less_frequent_indices]
vecs_for_words = word_vecs[next_words]
sims = pairwise.cosine_similarity(query_word_vec[None, :], vecs_for_words)[0]
candidates = np.argsort(sims)[-num_sims:][::-1]
relevances = logprobs[candidates]
return [Recommendation([model.id2str[next_words[idx]]]) for idx in candidates[np.argsort(relevances)[::-1][:num_alternatives]]]
def get_split_recs(sofar, cur_word, flags={}):
domain = flags.get('domain', 'yelp_train-balanced')
num_sims = flags.get('num_sims', 5)
num_alternatives = flags.get('num_alternatives', 5)
model = get_model(domain)
toks = tokenize_sofar(sofar)
cur_word_letters = ''.join(item['letter'] for item in cur_word)
prefix_logprobs = [(0., cur_word_letters)] if len(cur_word) > 0 else None
state = model.get_state(toks)[0]
next_words, logprobs = model.next_word_logprobs_raw(state, toks[-1], prefix_logprobs=prefix_logprobs)
logprob_argsort = np.argsort(logprobs)
predictions = []
for idx in logprob_argsort[::-1]:
word = model.id2str[next_words[idx]]
if word[0] in ',.?!<':
continue
predictions.append(Recommendation([word]))
if len(predictions) == 3:
break
result = dict(predictions=predictions)
if len(predictions) == 0:
return result
if len(cur_word) > 0:
# Offer uncommon synonyms of the most likely next word
to_replace = cur_word_letters
replacement_start_idx = len(sofar)
query_word_idx = next_words[np.argmax(logprobs)]
query_state = state
query_toks = toks
else:
to_replace = toks[-1]
if to_replace[0] not in '.?!<' and to_replace in sofar:
replacement_start_idx = sofar.rindex(to_replace)
query_word_idx = model.model.vocab_index(to_replace)
query_toks = toks[:-1]
query_state = model.get_state(query_toks)[0]
else:
query_word_idx = 0
if query_word_idx != 0:
replacement_end_idx = replacement_start_idx + len(to_replace)
result['replacement_range'] = [replacement_start_idx, replacement_end_idx]
result['synonyms'] = get_synonyms(
model, query_state, query_toks, query_word_idx, num_sims=num_sims, num_alternatives=num_alternatives)
return result
def get_clustered_recs(sofar, cur_word, flags={}):
from sklearn.cluster import AffinityPropagation
domain = flags.get('domain', 'yelp_train-balanced')
n_clusters = flags.get('n_clusters', 5)
model = get_model(domain)
word_vecs = get_word_vecs_for_model(domain)
toks = tokenize_sofar(sofar)
prefix_logprobs = [(0., ''.join(item['letter'] for item in cur_word))] if len(cur_word) > 0 else None
state = model.get_state(toks)[0]
next_words, logprobs = model.next_word_logprobs_raw(state, toks[-1], prefix_logprobs=prefix_logprobs)
# TODO: cur_word
if len(next_words) < n_clusters:
return dict(clusters=[[(model.id2str[idx], logprob.item())] for idx, logprob in zip(next_words, logprobs)])
vecs_for_words = word_vecs[next_words]
vecs_for_clustering = vecs_for_words[np.argsort(logprobs)[-30:]]
clusterer = AffinityPropagation(verbose=True)
clusterer.fit(vecs_for_clustering)
cluster_assignment = clusterer.predict(vecs_for_words)
relevance = logprobs# - .5 * model.unigram_probs[next_words]
clusters = []
for cluster in range(cluster_assignment.max() + 1):
members = np.flatnonzero(cluster_assignment == cluster)
if len(members) == 0:
continue
relevances = relevance[members]
new_order = np.argsort(relevances)[::-1][:10]
members = members[new_order]
print(cluster, ', '.join('{}[{:.2f}]'.format(model.id2str[next_words[idx]], relevance[idx]) for idx in members))
clusters.append([(model.id2str[next_words[idx]], relevance[idx].item()) for idx in members])
clusters.sort(key=lambda x: -x[0][1])
return dict(clusters=clusters)
def get_suggestions_async(executor, *, sofar, cur_word, domain,
rare_word_bonus, use_sufarr, temperature, use_bos_suggs,
length_after_first=17, sug_state=None, word_bonuses=None, prewrite_info=None,
constraints={},
promise=None,
sentiment=None,
**kw):
if SENTIMENT_METHOD == 'simple':
if sentiment == 1:
sentiment = None
assert domain == 'yelp_train-balanced'
domain = 'yelp_train-stars12'
elif sentiment == 5:
sentiment = None
assert domain == 'yelp_train-balanced'
domain = 'yelp_train-stars45'
model = get_model(domain)
toks = tokenize_sofar(sofar)
prefix_logprobs = [(0., ''.join(item['letter'] for item in cur_word))] if len(cur_word) > 0 else None
prefix = ''.join(item['letter'] for item in cur_word)
# prefix_probs = tap_decoder(sofar[-12:].replace(' ', '_'), cur_word, key_rects)
if sug_state is None:
sug_state = {}
if 'suggested_already' not in sug_state:
sug_state['suggested_already'] = {}
suggested_already = sug_state['suggested_already']
if word_bonuses is None and prewrite_info is not None:
known_words = set()
unknown_words = set()
word_bonuses = np.zeros(len(model.id2str))
unigram_probs = model.unigram_probs_wordsonly
for word in prewrite_info['text'].split():
idx = model.model.vocab_index(word)
if idx != 0:
word_bonuses[idx] = prewrite_info['amount'] * -unigram_probs[idx]
known_words.add(word)
else:
unknown_words.add(word)
print(f"Bonusing {len(known_words)} prewrite words: {' '.join(sorted(known_words))}")
print(f"Not bonusing {len(unknown_words)} unknown words: {' '.join(sorted(unknown_words))}")
# Beginning of sentence suggestions
if use_bos_suggs and use_bos_suggs != 'manual' and not enable_bos_suggs:
print("Warning: requested BOS suggs but they're not enabled.")
use_bos_suggs = False
is_bos = len(cur_word) == 0 and toks[-1] in ['<D>', '<S>']
if use_bos_suggs and is_bos:
if promise is not None:
print("Warning: promise enabled but making beginning-of-sentence suggestions!")
if use_bos_suggs == 'manual':
phrases, sug_state = manual_bos.get_manual_bos(sofar, sug_state)
elif use_bos_suggs in ['diverse', 'continue']:
phrases, sug_state, _ = get_bos_suggs(sofar, sug_state, bos_sugg_flag=use_bos_suggs, constraints=constraints)
else:
phrases = None
if phrases is not None:
return phrases, sug_state
if use_sufarr and not enable_sufarr:
print("Warning: requested sufarr but not enabled.")
use_sufarr = False
if temperature == 0:
if use_sufarr and len(cur_word) == 0:
assert sentiment is None, "sufarr doesn't support sentiment yet"
assert promise is None, "sufarr doesn't support promises yet"
beam_width = 100
beam = beam_search_sufarr_init(model, toks)
context_tuple = (toks[-1],)
if word_bonuses is None:
# The multiplication makes a copy.
word_bonuses = model.unigram_probs_wordsonly * -rare_word_bonus
else:
word_bonuses = word_bonuses.copy()
# Don't double-bonus words that have already been used.
for word in set(toks):
word_idx = model.model.vocab_index(word)
word_bonuses[word_idx] = 0.
for i in range(length_after_first):
beam_chunks = cytoolz.partition_all(8, beam)
parallel_futures = yield [executor.submit(
beam_search_sufarr_extend, domain, chunk, context_tuple, i, beam_width, length_after_first=length_after_first, word_bonuses=word_bonuses, prefix=prefix, constraints=constraints, **kw)
for chunk in beam_chunks]
parallel_beam = list(cytoolz.concat(parallel_futures))
prefix = ''
# FIXME: maintain diversity in first-words here?
beam = heapq.nlargest(beam_width, parallel_beam)
# entry 2 is "DONE"
if all(ent[2] for ent in beam):
break
ents = [BeamEntry(*ent) for ent in beam]
if len(ents) == 0:
# Fall back on the full LM, but just for one word.
first_word_ents = yield executor.submit(beam_search_phrases, domain, toks, beam_width=100, length_after_first=1, prefix_logprobs=prefix_logprobs, constraints=constraints)
phrases = [(ent.words, None) for ent in first_word_ents[:3]]
else:
result = [ents.pop(0)]
first_words = {ent.words[0] for ent in result}
while len(result) < 3 and len(ents) > 0:
ents.sort(reverse=True, key=lambda ent: (ent.words[0] not in first_words, ent.score))
best = ents.pop(0)
first_words.add(best.words[0])
result.append(best)
phrases = [([word for word in ent.words if word[0] != '<'], None) for ent in result]
else: # sufarr
# Use beam search on LM.
if prefix_logprobs is None:
sentence_enders = yield executor.submit(get_sentence_enders, domain, toks)
else:
sentence_enders = []
beam_search_kwargs = dict(constraints=constraints)
if sentiment:
clf_startstate = sentiment_classifier.get_state(toks)
# Include a broader range of first words if we may need to diversify by sentiment after the fact.
num_first_words = 3 - len(sentence_enders) if sentiment is None else 20
num_intermediates = 20
max_logprob_penalty = MAX_LOGPROB_PENALTY
# Launch a job to get first words.
if num_first_words:
first_word_ents = yield executor.submit(beam_search_phrases,
domain, toks, beam_width=num_first_words, length_after_first=1, prefix_logprobs=prefix_logprobs, **beam_search_kwargs)
else:
first_word_ents = []
first_words = {ent[1][0]: fwent_idx for fwent_idx, ent in enumerate(first_word_ents)}
if promise is not None:
promise_slot = promise['slot']
promise_words = promise['words']
# Remove the first word of the promise from the pool, we'll get to it later.
promise_first_word = promise_words[0]
if promise_first_word in first_words:
first_word_ents.pop(first_words[promise_first_word])
else:
promise_slot = None
jobs = [executor.submit(beam_search_phrases_loop, model, [ent],
start_idx=1,
beam_width=num_intermediates,
length_after_first=length_after_first, **beam_search_kwargs)
for ent in first_word_ents]
if promise is not None and len(promise_words) < 5 and not any(x in promise_words for x in '.?!'):
# Sneak an extra job into the queue...
promise_extension = True
# Promise provided, but we need to extend it with some new words.
remaining_length = max(1, length_after_first - len(' '.join(promise_words)))
jobs.append(executor.submit(beam_search_phrases,
model, toks + promise_words, beam_width=num_intermediates,
length_after_first=remaining_length, **beam_search_kwargs))
else:
promise_extension = False
results = (yield jobs)
if promise_extension:
# The extra job computed a bunch of possible promise continuations. Hold them aside.
promise_extension_results = results.pop()
# Convert them into a format compatible with our beam search.
# Make the score positive, so we can know not to taboo this entry.
promise_beam = [(ent[0] + 500, promise_words + ent[1]) for ent in promise_extension_results]
results.append(promise_beam)
# Now build final suggestions.
is_new_word = len(cur_word) == 0
active_entities = []
final_tok = toks[-1]
if final_tok in suggested_already:
suggested_already_this_tok = suggested_already[final_tok]
else:
suggested_already_this_tok = suggested_already[final_tok] = set()
for beam in results:
for ent in beam:
llk = ent[0]
words = ent[1]
# Penalize a suggestion that has already been made exactly like this before.
if llk < 0 and is_new_word and ' '.join(words[:3]) in suggested_already_this_tok:
print("Taboo:", ' '.join(words))
llk -= 5000.
active_entities.append((llk, words, {}))
# Add sentence-enders in the mix, but flagged special.
for ender in sentence_enders[:2]:
active_entities.append((995, [ender], {'type': 'eos'}))
# Add the highest likelihood promise continuation also, also flagged special.
if promise is not None:
llk = 999
if promise_extension:
words = promise_beam[0][1]
else:
words = promise_words
active_entities.append((llk, words, {'type': 'promise'}))
# If we're at the beginning of a sentence, add the special sentiment sentence starters.
if sentiment is not None and use_bos_suggs == 'sentiment' and len(cur_word) == 0 and toks[-1] in ["<D>", "<S>"]:
sent_idx = sum(1 for tok in toks if tok == '</S>')
if sentiment == 'diverse':
sent_targets = [[0, 1], [2], [3, 4]]
else:
sent_targets = [[sentiment - 1]] * 3
this_time_taboo = set()
for tgt_sentiments in sent_targets:
sent_bos_options = [
(tgt_sentiment, bos_option)
for tgt_sentiment in tgt_sentiments
for bos_option in sentiment_starters_by_stars_and_sentnum[tgt_sentiment][min(sent_idx, 2)]]
random.shuffle(sent_bos_options)
for tgt_sentiment, bos_option in sent_bos_options:
toks = bos_option.split()
first_3_words = ' '.join(toks[:3])
if first_3_words in this_time_taboo:
continue
if first_3_words in suggested_already_this_tok:
print("bos taboo:", bos_option)
continue
active_entities.append((100, toks, {'type': 'sentiment_bos', 'sentiment': tgt_sentiment / 4}))
this_time_taboo.add(first_3_words)
break
# Pad the active entities with null suggestions.
for i in range(3):
active_entities.append((-9999, [''], {'type': 'null'}))
active_entities.sort(reverse=True)
# Compute sentiment data
if sentiment is not None:
if sentiment == 'diverse':
# Diversify the suggestions by sentiment.
def summarize_posterior(sent_posteriors):
return np.mean(sent_posteriors, axis=0) @ sentiment_classifier.sentiment_weights
objective = scalar_diversity
else:
# Try to maximize the likelihood of the desired sentiment
target_sentiment = sentiment - 1
assert 0 <= target_sentiment < 5
def summarize_posterior(sent_posteriors):
return np.mean(sent_posteriors, axis=0)[target_sentiment]
def objective(slots):
return np.sum(slots)
classify_jobs = []
classify_jobs_meta = []
for entity_idx, (llk, words, meta) in enumerate(active_entities):
if meta.get('type') == 'eos' or 'sentiment' in meta:
continue
classify_jobs.append(words)
classify_jobs_meta.append(entity_idx)
classify_jobs_results = (yield map_as_jobs(executor, partial(sentiment_classifier.classify_seq_by_tok, clf_startstate), classify_jobs, chunksize=32))
sentiment_data = [ent[2].get('sentiment', .5) for ent in active_entities]
for entity_idx, posterior in zip(classify_jobs_meta, itertools.chain.from_iterable(classify_jobs_results)):
sentiment_data[entity_idx] = summarize_posterior(posterior)
entity_idx = 0
promise_entity_idx = 0
if promise is not None:
# The zeroth entity should be the promise.
assert active_entities[promise_entity_idx][2]['type'] == 'promise'
# Start open-assignment at the first entity.
entity_idx += 1
# Take 3 suggestions
assignments = [None] * 3
first_words_used = {}
if promise is not None:
first_words_used[promise['words'][0]] = promise_slot
for slot_idx in range(3):
if slot_idx == promise_slot:
# Assign the existing promise to this entry.
# We may extend it later with one of the computed extensions.
assignments[slot_idx] = promise_entity_idx
continue
while True:
llk, words, meta = active_entities[entity_idx]
first_word = words[0]
if first_word in first_words_used:
entity_idx += 1
continue
if first_word != '':
first_words_used[first_word] = slot_idx
assignments[slot_idx] = entity_idx
entity_idx += 1
break
if sentiment is not None:
# Tweak the suggestions as requested.
print("First words:", ' '.join(ent[1][0] for ent in first_word_ents))
cur_summaries = np.array([sentiment_data[entity_idx] for entity_idx in assignments])
cur_objective = objective(cur_summaries)
min_logprob_allowed = min(active_entities[entity_idx][0] for entity_idx in assignments) + max_logprob_penalty
if SHOW_SENTIMENT_OPTIONS:
for i in np.argsort(sentiment_data):
llk, words, meta = active_entities[i]
if llk < min_logprob_allowed:
continue
print(f'{sentiment_data[i]:.2f} {llk:.2f}', ' '.join(words))
# Greedily replace suggestions so as to increase sentiment diversity.
while True:
for entity_idx in assignments:
llk, words, meta = active_entities[entity_idx]
sentiment = sentiment_data[entity_idx]
print(f"{sentiment:3.2f} {llk:6.2f} {' '.join(words)}")
print()
print()
candidates = []
for entity_idx, (llk, words, meta) in enumerate(active_entities):
if llk < min_logprob_allowed:
continue
cur_summary = sentiment_data[entity_idx]
# Would this increase the objective if we added it?
# Case 1: it replaces an existing word
replaces_slot = first_words_used.get(words[0])
if replaces_slot is not None:
prev_llk = active_entities[assignments[replaces_slot]][0]
if llk < prev_llk + max_logprob_penalty:
# Too much relevance cost.
continue
if replaces_slot == promise_slot:
# This could replace the promise iff it was a continuation.
if words[:len(promise_words)] == promise_words:
# print("Considering replacing promise", words)
pass
else:
continue
elif prev_llk >= 0:
# Sorry, this was a special one, can't kick it out.
continue
candidate_summaries = cur_summaries.copy()
candidate_summaries[replaces_slot] = cur_summary
new_objective = objective(candidate_summaries)
else:
# Case 2: it replaces the currently least-diverse word.
new_objectives = np.full(3, -np.inf)
for replaces_slot in range(3):
prev_llk = active_entities[assignments[replaces_slot]][0]
if prev_llk >= 0:
# Sorry, this was a special one, can't kick it out.
continue
elif llk < prev_llk + max_logprob_penalty:
# Sorry, too much relevance cost.
continue
candidate_summaries = cur_summaries.copy()
candidate_summaries[replaces_slot] = cur_summary
new_objectives[replaces_slot] = objective(candidate_summaries)
replaces_slot = np.argmax(new_objectives)
new_objective = new_objectives[replaces_slot]
if new_objective > cur_objective:
candidates.append((new_objective, replaces_slot, entity_idx))
print(f"Found {len(candidates)} candidates that increase objective")
if len(candidates) == 0:
break
prev_objective = cur_objective
cur_objective, replaces_slot, entity_idx = max(candidates)
llk, words, meta = active_entities[entity_idx]
new_summary = sentiment_data[entity_idx]
print(f"Replacing slot {replaces_slot} with llk={llk:.2f} sent={new_summary:.2f} \"{' '.join(words)}\" to gain {cur_objective - prev_objective:.2f} objective")
# Actually replace the suggestion.
kicked_out_entity_idx = assignments[replaces_slot]
cur_summaries[replaces_slot] = new_summary
existing_first_word = active_entities[kicked_out_entity_idx][1][0]
del first_words_used[existing_first_word]
first_words_used[words[0]] = replaces_slot
assignments[replaces_slot] = entity_idx
# Sort the slots by sentiment, but keeping the promise in its spot.
if promise is not None:
assert active_entities[assignments[promise_slot]][1][:len(promise_words)] == promise_words
promise_entity_idx = assignments.pop(promise_slot)
assignments.sort(key=lambda entity_idx: -sentiment_data[entity_idx])
if promise is not None:
assignments.insert(promise_slot, promise_entity_idx)
# Now we should have assignments of phrases to slots.
phrases = []
for entity_idx in assignments:
llk, words, meta = active_entities[entity_idx]
meta = dict(meta, llk=llk)
if sentiment is not None:
meta['sentiment_summary'] = sentiment_data[entity_idx]
phrases.append((words, meta))
suggested_already_this_tok.add(' '.join(words[:3]))
if is_bos:
phrases = [(words, dict(meta, bos=True)) for words, meta in phrases]
else:
# TODO: upgrade to use_sufarr flag
phrases = generate_diverse_phrases(
domain, toks, 3, 6, prefix_logprobs=prefix_logprobs, temperature=temperature, use_sufarr=use_sufarr, **kw)
return phrases, sug_state
def get_suggestions(*a, **kw):
'''Wrap the async suggestion generation so it's testable.'''
from concurrent.futures import Future
class NullExecutor:
def submit(self, fn, *a, **kw):
future = Future()
future.set_result(fn(*a, **kw))
return future
generator = get_suggestions_async(NullExecutor(), *a, **kw)
result = None
while True:
try:
result = generator.send(result)
if isinstance(result, Future):
result = result.result()
elif isinstance(result, list) and len(result) > 0 and isinstance(result[0], Future):
result = [fut.result() for fut in result]
else:
print("Unexpected yield of something other than a Future!")
return result
except StopIteration as stop:
return stop.value
def request_to_kwargs(request):
return dict(
domain=request.get('domain', 'yelp_train'),
rare_word_bonus=request.get('rare_word_bonus', 0.0),
use_sufarr=request.get('useSufarr', False),
temperature=request.get('temperature', 0.),
use_bos_suggs=request.get('use_bos_suggs', False),
length_after_first=request.get('continuation_length', 17),
null_logprob_weight=request.get('null_logprob_weight', 0.),
prewrite_info=request.get('prewrite_info'),
constraints=request.get('constraints', {}),
promise=request.get('promise'),
sentiment=request.get('sentiment'),
word_bonuses=None)
def do_request_raw(request):
return get_suggestions(
sofar=request['sofar'], cur_word=request['cur_word'],
**request_to_kwargs(request))
def do_request_raw_json(request_json):
return do_request_raw(json.loads(request_json))
|
StarcoderdataPython
|
1712047
|
"""RKI Covid numbers integration."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import async_timeout
from homeassistant import config_entries, core
from homeassistant.helpers import update_coordinator
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from rki_covid_parser.parser import RkiCovidParser
from custom_components.rki_covid.const import DOMAIN
from custom_components.rki_covid.data import DistrictData
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup(hass: core.HomeAssistant, config: dict) -> bool:
"""Set up the component into HomeAssistant."""
_LOGGER.debug("setup component.")
parser = RkiCovidParser(async_get_clientsession(hass))
# Make sure coordinator is initialized.
await get_coordinator(hass, parser)
# Return boolean to indicate that initialization was successful.
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Set up component from a config entry."""
_LOGGER.debug(f"Setup item from config entry: {entry.data}.")
# Forward the setup to the sensor platform.
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
_LOGGER.debug(f"Unload item from config entry: {entry.data}.")
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
return unload_ok
async def get_coordinator(hass: core.HomeAssistant, parser: RkiCovidParser):
"""Get the data update coordinator."""
_LOGGER.debug("initialize the data coordinator.")
if DOMAIN in hass.data:
return hass.data[DOMAIN]
async def async_get_districts():
"""Fetch data from rki-covid-parser library.
Here the data for each district is loaded.
"""
_LOGGER.debug("fetch data from rki-covid-parser.")
try:
with async_timeout.timeout(30):
# return {case.county: case for case in await api.load_districts()}
await parser.load_data()
_LOGGER.debug("fetching finished.")
items = {}
# districts
for d in parser.districts:
district = parser.districts[d]
items[district.county] = DistrictData(
district.name,
district.county,
district.state,
district.population,
district.cases,
district.deaths,
district.casesPerWeek,
district.recovered,
district.weekIncidence,
district.casesPer100k,
district.newCases,
district.newDeaths,
district.newRecovered,
district.lastUpdate,
)
# states
for s in parser.states:
state = parser.states[s]
name = "BL " + state.name
items[name] = DistrictData(
name,
name,
None,
state.population,
state.cases,
state.deaths,
state.casesPerWeek,
state.recovered,
state.weekIncidence,
state.casesPer100k,
state.newCases,
state.newDeaths,
state.newRecovered,
state.lastUpdate,
)
# country
items["Deutschland"] = DistrictData(
"Deutschland",
"Deutschland",
None,
parser.country.population,
parser.country.cases,
parser.country.deaths,
parser.country.casesPerWeek,
parser.country.recovered,
parser.country.weekIncidence,
parser.country.casesPer100k,
parser.country.newCases,
parser.country.newDeaths,
parser.country.newRecovered,
parser.country.lastUpdate,
)
_LOGGER.debug("parsing data finished.")
return items
except asyncio.TimeoutError as err:
raise update_coordinator.UpdateFailed(
f"Error reading data from rki-covid-parser timed-out: {err}"
)
except aiohttp.ClientError as err:
raise update_coordinator.UpdateFailed(
f"Error reading data from rki-covid-parser by client: {err}"
)
hass.data[DOMAIN] = update_coordinator.DataUpdateCoordinator(
hass,
logging.getLogger(__name__),
name=DOMAIN,
update_method=async_get_districts,
update_interval=timedelta(hours=3),
)
await hass.data[DOMAIN].async_refresh()
return hass.data[DOMAIN]
|
StarcoderdataPython
|
1648025
|
import logging
import os.path
from flask import request
from flask_restplus import Resource, fields
from common import api, main
log = logging.getLogger(__name__)
# This collects the API operations into named groups under a root URL.
example_ns = api.namespace('example', description="Example operations")
ExampleObj = api.model('Example', {
'in_str': fields.String(required=True,
description='your str',
example="exs"),
})
@example_ns.route('/<string:in_str>')
class ExampleResource(Resource):
@api.marshal_with(ExampleObj)
def get(self, in_str):
"""Takes in data"""
log.debug("Got parameter: %r", in_str)
log.debug("Got body: %r", request.data)
return {"in_str": in_str}
if __name__ == '__main__':
main(os.path.splitext(os.path.basename(__file__))[0] + '.json')
|
StarcoderdataPython
|
1640837
|
<reponame>franpoz/tirma
class StarInfo:
def __init__(self, object_id=None, ld_coefficients=None, teff=None, lum=None, logg=None, radius=None, radius_min=None,
radius_max=None, mass=None, mass_min=None, mass_max=None, ra=None, dec=None):
self.object_id = object_id
self.ld_coefficients = ld_coefficients
self.teff = teff
self.lum = lum
self.logg = logg
self.mass = mass
self.radius = radius
self.mass = mass
self.mass_min = None if mass is None or mass_min is None else mass - mass_min
self.mass_max = None if mass is None or mass_max is None else mass + mass_max
self.radius = radius
self.radius_min = None if radius is None or radius_min is None else radius - radius_min
self.radius_max = None if radius is None or radius_max is None else radius + radius_max
self.mass_assumed = False
self.radius_assumed = False
self.ra = ra
self.dec = dec
def assume_model_mass(self, mass=0.1):
self.mass = mass
self.mass_min = mass
self.mass_max = mass
self.mass_assumed = True
def assume_model_radius(self, radius=0.1):
self.radius = radius
self.radius_min = radius
self.radius_max = radius
self.radius_assumed = True
|
StarcoderdataPython
|
159225
|
<filename>operations/subtraction.py
from operations.operation import Operation
class Subtraction(Operation):
"""
Representing an operation to perform Subtraction
"""
TAG = 'subtraction'
__slots__ = ('minuend', 'subtrahend')
def __init__(self):
self.minuend = None
self.subtrahend = None
def __hash__(self):
return hash(self.minuend) ^ hash(self.subtrahend)
def __call__(self):
"""
Perform the operation on operands
:return: a number representing the outcome of operation or NaN
"""
if self.minuend is not None and self.subtrahend is not None:
return self.minuend() - self.subtrahend()
return self.NAN
def add_operand(self, operand, tag):
""" Add an operand for this operation """
if not isinstance(operand, Operation):
raise TypeError("Operand of type Operator is expected")
if tag not in self.__slots__:
raise ValueError("Tag value cannot only be one of following: ", *self.__slots__)
if tag == 'minuend':
self.minuend = operand
elif tag == 'subtrahend':
self.subtrahend = operand
|
StarcoderdataPython
|
3291835
|
<gh_stars>1-10
#!/usr/bin/env python
from math import *
# https://www.allaboutcircuits.com/textbook/alternating-current/chpt-3/ac-inductor-circuits/
# a pure inductor circuit (AC current with inductor) will have V(t) 90 phase ahead of I(t) and the power can be negative
# implying we can absorb power from the circuit as well as release it (net dissipation of energy is 0), however it cannot
# be realized in practice because this is ideal behavior which have no wire resistance
def reactance(f, L):
return 2*pi*f*L
def inductive_reactance(E, Xl):
return E/Xl
print(reactance(60, 10e-3))
print(reactance(120, 10e-3))
print(reactance(2500, 10e-3))
# at 60 hz with 10 mH, find the current of the circuit
# I = E/Xl
print(inductive_reactance(10, reactance(60, 10e-3)))
|
StarcoderdataPython
|
3369501
|
<reponame>strawpants/cate
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = "<NAME> (Brockmann Consult GmbH)"
def extend(target_class, property_name=None, property_doc=None):
"""
Return a class decorator for classes that will become extensions to the given *target_class*.
The *target_class* will be extended by a new property with the given *name* and the given *doc*.
The new property will return an instance of the decorated extension class. The property value will be lazily
created by calling the extension class' ``__init__`` method with the the *target_class* instance
as only argument.
Example:
Let ``Model`` be an existing API class. Now another module wishes to extend the ``Model`` class by additional
methods. This could be done by inheritance, but this will cause severe compatibility issues once the
``Model`` class evolves and break the composition-over-inheritance design principle. In addition,
instantiation of the derived class must be performed explicitly. Instead, we want all ``Model`` instances to
automatically include our new methods. Here is the code:::
@extend(Model, 'my_ext')
class MyModelExt:
'''My Model extension''''
def __init__(self, model):
self.model = model
def some_new_method(self, x):
self.model.some_old_method()
# ...
# Model API users can now use the API extension without explicitly instantiating MyModelExt:
model = Model()
model.my_ext.some_new_method()
:param target_class: A target class or sequence of target classes that will be extended.
:param property_name: The name of the new property in the target class.
If ``None``, a name will be derived from the *extension_class*.
:param property_doc: The docstring of the new property in the target class.
If ``None``, the doc-string will be taken from the *extension_class*, if any.
:return: A decorator.
"""
def decorator(extension_class):
return _add_extension(target_class, extension_class, property_name=property_name, property_doc=property_doc)
return decorator
def _add_extension(target_class, extension_class, property_name=None, property_doc=None):
"""
Add an "extension" property with *property_name* to the *target_class*. The property will return an
instance of *extension_class* whose ``__init__`` method will be called with the the *target_class*
instance as only argument.
Use this function to dynamically add extensions to existing classes in order to avoid inheritance.
This function should be used through its decorator function :py:func:`extend`.
:param target_class: A target class or sequence of target classes that will be extended.
:param extension_class: The class that implements the extension.
:param property_name: The name of the new property in the target class.
If ``None``, a name will be derived from the *extension_class*.
:param property_doc: The docstring of the new property in the target class.
If ``None``, the doc-string will be taken from the *extension_class*, if any.
:return: The *extension_class*.
"""
if not property_name:
# generate a property name from extension class name
property_name = []
last_was_lower = False
for c in extension_class.__name__:
if last_was_lower and c.isupper():
property_name.append('_')
property_name.append(c.lower())
last_was_lower = c.islower()
property_name = ''.join(property_name)
attribute_name = '_' + property_name
# define a property getter that lazily creates the extension instance
def _lazy_extension_getter(self):
if hasattr(self, attribute_name):
extension = getattr(self, attribute_name)
else:
extension = extension_class(self)
setattr(self, attribute_name, extension)
return extension
# derive docstring for property
doc = property_doc if property_doc else getattr(extension_class, '__doc__', None)
# inject new property into all target classes
try:
iterator = iter(target_class)
except TypeError:
iterator = iter([target_class])
for cls in iterator:
if hasattr(cls, property_name):
raise ValueError("an attribute with name '%s' already exists in %s", property_name, cls)
setattr(cls, property_name, property(fget=_lazy_extension_getter, doc=doc))
return extension_class
|
StarcoderdataPython
|
4824819
|
<reponame>scottwedge/OpenStack-Stein
# Copyright (c) 2013 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from blazar import manager
from blazar.utils import service
CONF = cfg.CONF
CONF.import_opt('rpc_topic', 'blazar.manager.service', 'manager')
class ManagerRPCAPI(service.RPCClient):
"""Client side for the Manager RPC API.
Used from other services to communicate with blazar-manager service.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
"""Initiate RPC API client with needed topic and RPC version."""
super(ManagerRPCAPI, self).__init__(manager.get_target())
def get_computehost(self, host_id):
"""Get detailed info about some computehost."""
return self.call('physical:host:get_computehost', host_id=host_id)
def list_computehosts(self, query=None):
"""List all computehosts."""
return self.call('physical:host:list_computehosts', query=query)
def create_computehost(self, host_values):
"""Create computehost with specified parameters."""
return self.call('physical:host:create_computehost',
host_values=host_values)
def update_computehost(self, host_id, values):
"""Update computehost with passes values dictionary."""
return self.call('physical:host:update_computehost', host_id=host_id,
values=values)
def delete_computehost(self, host_id):
"""Delete specified computehost."""
return self.call('physical:host:delete_computehost',
host_id=host_id)
def list_allocations(self, query):
"""List all allocations on all computehosts."""
return self.call('physical:host:list_allocations', query=query)
def get_allocations(self, host_id, query):
"""List all allocations on a specified computehost."""
return self.call('physical:host:get_allocations',
host_id=host_id, query=query)
|
StarcoderdataPython
|
1695143
|
<reponame>tusharsarkar3/XBNet
import torch
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from XBNet.training_utils import training,predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
from os import environ
def suppress_qt_warnings():
environ["QT_DEVICE_PIXEL_RATIO"] = "0"
environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
environ["QT_SCREEN_SCALE_FACTORS"] = "1"
environ["QT_SCALE_FACTOR"] = "1"
suppress_qt_warnings()
column_to_predict = input("Column to classify: ")
data = pd.read_csv(r'test\Iris (1).csv')
n_df = len(data)
label_encoded = {}
imputations = {}
for i in data.columns:
imputations[i] = data[i].mode()
if data[i].isnull().sum()/n_df >= 0.15:
data.drop(i,axis = 1,inplace=True)
elif data[i].isnull().sum()/n_df < 0.15 and data[i].isnull().sum()/n_df > 0:
data[i].fillna(data[i].mode(),inplace=True)
imputations[i] = data[i].mode()
columns_object = list(data.dtypes[data.dtypes==object].index)
for i in columns_object:
if i != column_to_predict:
if data[i].nunique()/n_df < 0.4:
le = LabelEncoder()
data[i] = le.fit_transform(data[i])
label_encoded[i] = le
else:
data.drop(i,axis=1,inplace=True)
x_data = data.drop(column_to_predict,axis=1).to_numpy()
columns_finally_used = data.drop(column_to_predict,axis=1).columns
print(x_data[0,:])
print("Number of features are: ",x_data.shape[1])
y_data = data[column_to_predict].to_numpy()
if y_data.dtype == object:
y_label_encoder = LabelEncoder()
y_data = y_label_encoder.fit_transform(y_data)
print("Number of classes are: ", np.unique(y_data,return_counts=True))
X_train,X_test,y_train,y_test = train_test_split(x_data,y_data,test_size = 0.3,random_state = 0)
# model = torch.load("model.pb")
model = XBNETClassifier(X_train,y_train,2,input_through_cmd=True,inputs_for_gui=[10,4,4,2])
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
m,acc, lo, val_ac, val_lo = run_XBNET(X_train,X_test,y_train,y_test,model,criterion,optimizer,32,300)
print(predict(m, x_data))
print(model.feature_importances_)
def process_for_predict(df,columns,imputations,encodings):
data = df[columns]
n = len(data)
for i in data.columns:
if data[i].isnull().sum() >0:
data[i].fillna(imputations[i], inplace=True)
if i in encodings.keys():
data[i] = encodings[i].transform(data[i])
print(predict(m, data.to_numpy()))
process_for_predict(pd.read_csv(r"test\titanic_test.csv"),columns_finally_used,imputations,label_encoded)
|
StarcoderdataPython
|
3399523
|
<reponame>tiaanswart/NZBankRegisterJSON
# import the modules
import csv
import json
# reader dict collection
bankAndBranchDict = []
# https://www.paymentsnz.co.nz/resources/industry-registers/bank-branch-register/
# read bank branch registry
with open("Bank_Branch_Register.csv", 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
bankAndBranchDict.append(dict(row))
csvfile.close()
# New banks collection for JSON
sBanks = {}
# For each branch
for branch in bankAndBranchDict:
# If we don't have the Bank yet
if branch["Bank_Number"] not in sBanks:
# Add the bank
sBanks[branch["Bank_Number"]] = {"Bank_Name": branch["Bank_Name"], "Branches": {}}
# Clone the branch info
branchDict = branch.copy()
# Get the number
branchNumber = branchDict.pop("Branch_Number")
# Remove Bank Attributes from Branch
del branchDict["Bank_Number"]
del branchDict["Bank_Name"]
# Add Branch to Bank
sBanks[branch["Bank_Number"]]["Branches"][branchNumber] = branchDict
# Dump Bank Dict into JSON file
jsonDump = json.dumps(sBanks)
jsonFile = open("banks.json","w")
jsonFile.write(jsonDump)
jsonFile.close()
|
StarcoderdataPython
|
1641576
|
<reponame>mhtb32/tl-env
import pytest
from tl_env.logic.automaton import Automaton, TransitionError
def test_add_state():
a = Automaton()
a.add_state(1) # add int node
a.add_state('q1') # add str node
assert a.states <= {1, 'q1'}
a.clear()
a.add_state(1, type_='init')
a.add_state(2, type_='final')
assert a.states[1] == {'type': 'init'}
# check whether value error is raised
with pytest.raises(ValueError):
a.add_state(3, type_='normal')
def test_add_state_from():
a = Automaton()
a.add_state_from([1, 2]) # add normal states
a.add_state_from([(3, {'type': 'final'})])
assert a.states <= {1, 2, 3}
assert a.states[3] == {'type': 'final'}
# check whether key error is raised
with pytest.raises(KeyError):
a.add_state_from([(4, {'typo': 'final'})])
def test_step():
a = Automaton()
alphabet = {'x': True, 'y': False}
a.add_transition_from([('q1', 'q2', 'x'), ('q1', 'q3', 'y'), ('q2', 'q3', 'y')])
# initial state is unknown, we expect throwing an error
with pytest.raises(TransitionError):
a.step(alphabet)
a.add_state_from([('q1', {'type': 'init'}), 'q2', ('q3', {'type': 'final'})])
a.step(alphabet)
assert a.cur_state == 'q2'
# should not allow simultaneous active alphabet
alphabet = {'x': True, 'y': True}
with pytest.raises(TransitionError):
a.step(alphabet)
alphabet = {'x': False, 'y': True}
a.step(alphabet)
assert a.cur_state == 'q3'
# q3 has no successors, so the state must remain q3
a.step(alphabet)
assert a.cur_state == 'q3'
assert a.in_final()
def test_drawing():
a = Automaton()
a.add_state('q1')
a.add_state('q2', type_='final')
a.add_transition_from([('q1', 'q2', 'x'), ('q2', 'q1', 'y')])
a.draw()
|
StarcoderdataPython
|
1702097
|
<gh_stars>1-10
import sys
sys.path.append('../implementations/')
from implementations.dimensions import dimensions
for dimension in dimensions:
print(dimension)
|
StarcoderdataPython
|
189367
|
<reponame>pusinuke/Python_projects
#Coursera capstone project
#week3
#2020 02 17
from bs4 import BeautifulSoup
import requests
source = requests.get('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M').text
soup = BeautifulSoup(source, 'lxml')
for table in soup.find_all('table'):
# line = table.tr.th.text
try:
line = table.tr.text
except Exception as e:
line = None
print(line)
print()
# table = soup.find('table')
# line = table.tr.text
# print(line)
#print(table.prettify())
#print(soup.prettify())
# with open('List of postal codes of Canada_ M - Wikipedia.htm') as html_file:
# soup = BeautifulSoup(html_file, 'lxml')
# print(soup.prettify())
# # match = soup.title.text #randa tik pirma title
# # match = soup.div #tik pirma div su visais priklausiniais
# # match = soup.find('div') #randa ta pati ka anksciau
# # match = soup.find('div', class_='footer')
# #print(match)
# for article in soup.find_all('div', class_='article'):
# headline = article.h2.a.text
# print(headline)
# summary = article.p.text
# print(summary)
# print()
|
StarcoderdataPython
|
60378
|
import pandas as pd
from os import listdir
from datetime import datetime as dtt
import logging
import json
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class WalletManager:
def __init__(self, client, states):
self.binance = client
self.states = states
self._wallet_history_file = "wallet_history.csv"
def update_balances(self):
logger.info("Updating account balances...")
r = self.binance.spot_account_trade.account_information()["content"]
balances = {}
for b in r["balances"]:
if float(b["free"]) == 0 and float(b["locked"]) == 0:
continue
balances[b["asset"]] = float(b["free"])
self.states["balances"] = balances
def fetch_trading_rules(self):
logger.info("Fetching trading rules...")
trade_rules = {}
r = self.binance.market_data.exchange_information()
for symbol in r["content"]["symbols"]:
pair = symbol["symbol"]
trade_rules[pair] = {}
for feelter in symbol["filters"]:
filter_type = feelter["filterType"]
trade_rules[pair][filter_type] = {}
for part in feelter.keys():
if part == "filterType":
continue
value = feelter[part]
if type(value) == str:
value = float(value)
trade_rules[pair][filter_type][part] = value
self.states["trade_rules"] = trade_rules
|
StarcoderdataPython
|
131314
|
<reponame>paul-nameless/beanie
from typing import Type, TYPE_CHECKING, Optional, Union, Mapping
from pymongo.client_session import ClientSession
from beanie.odm.interfaces.session import SessionMethods
from beanie.odm.interfaces.update import (
UpdateMethods,
)
from beanie.odm.operators.update import BaseUpdateOperator
if TYPE_CHECKING:
from beanie.odm.documents import Document
class UpdateQuery(UpdateMethods, SessionMethods):
"""
Update Query base class
Inherited from:
- [SessionMethods](/api/interfaces/#sessionmethods)
- [UpdateMethods](/api/interfaces/#aggregatemethods)
"""
def __init__(self, document_model: Type["Document"], find_query: dict):
self.document_model = document_model
self.find_query = find_query
self.update_expressions = []
self.session = None
@property
def update_query(self):
query = {}
for expression in self.update_expressions:
if isinstance(expression, BaseUpdateOperator):
query.update(expression.query)
elif isinstance(expression, dict):
query.update(expression)
else:
raise TypeError("Wrong expression type")
return query
def update(
self,
*args: Union[dict, Mapping],
session: Optional[ClientSession] = None
):
"""
Provide modifications to the update query. The same as `update()`
:param args: *Union[dict, Mapping] - the modifications to apply.
:param session: Optional[ClientSession]
:return: UpdateMany query
"""
self.set_session(session=session)
self.update_expressions += args
return self
class UpdateMany(UpdateQuery):
"""
Update Many query class
Inherited from:
- [UpdateQuery](/api/queries/#updatequery)
"""
def update_many(self, *args, session: Optional[ClientSession] = None):
"""
Provide modifications to the update query
:param args: *Union[dict, Mapping] - the modifications to apply.
:param session: Optional[ClientSession]
:return: UpdateMany query
"""
return self.update(*args, session=session)
def __await__(self):
"""
Run the query
:return:
"""
yield from self.document_model.get_motor_collection().update_many(
self.find_query, self.update_query, session=self.session
)
class UpdateOne(UpdateQuery):
"""
Update One query class
Inherited from:
- [UpdateQuery](/api/queries/#updatequery)
"""
def update_one(self, *args, session: Optional[ClientSession] = None):
"""
Provide modifications to the update query. The same as `update()`
:param args: *Union[dict, Mapping] - the modifications to apply.
:param session: Optional[ClientSession]
:return: UpdateMany query
"""
return self.update(*args, session=session)
def __await__(self):
"""
Run the query
:return:
"""
yield from self.document_model.get_motor_collection().update_one(
self.find_query, self.update_query, session=self.session
)
|
StarcoderdataPython
|
3256168
|
<reponame>dimkarakostas/advent-of-code
from collections import defaultdict
from math import ceil
product_requirements, reaction_amounts = {}, {}
for l in open('input14').readlines():
reaction = [r.split(',') for r in l.strip().split('=>')]
inputs, output = reaction[0], reaction[1][0]
inputs = [i.strip().split(' ') for i in inputs]
output = output.strip().split(' ')
product_requirements[output[1]] = [(int(a), b) for [a, b] in inputs]
reaction_amounts[output[1]] = int(output[0])
def mine(element, quantity):
if element == 'ORE':
used[element] += quantity
else:
if quantity <= excess[element]:
used[element] += quantity
excess[element] -= quantity
else:
quantity = quantity - excess[element]
used[element] += excess[element]
excess[element] = 0
reactions_needed = int(ceil(float(quantity) / reaction_amounts[element]))
for (q, elem) in product_requirements[element]:
mine(elem, reactions_needed * q)
used[element] += quantity
excess[element] += reactions_needed * reaction_amounts[element] - quantity
used, excess = defaultdict(int), defaultdict(int)
mine('FUEL', 1)
print 'Part 1:', used['ORE']
max_ore = 1000000000000
fuels_min, fuels_max = max_ore / used['ORE'], max_ore
while fuels_min < fuels_max:
mid_fuel = (fuels_max + fuels_min) / 2
used, excess = defaultdict(int), defaultdict(int)
mine('FUEL', mid_fuel)
if used['ORE'] > max_ore:
fuels_max = mid_fuel - 1
elif used['ORE'] < max_ore:
fuels_min = mid_fuel + 1
else:
fuels_max = mid_fuel
break
print 'Part 2:', fuels_max
|
StarcoderdataPython
|
1669418
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
from piccolo.apps.meta.commands.version import version
class TestVersion(TestCase):
@patch("piccolo.apps.meta.commands.version.print")
def test_version(self, print_: MagicMock):
version()
print_.assert_called_once()
|
StarcoderdataPython
|
4807190
|
<filename>dnachisel/builtin_specifications/EnforceSequence.py<gh_stars>100-1000
"""Implement EnforceSequence (DO NOT USE YET: Work in progress, stabilizing)"""
# TODO: factorize with self.sequence ?
import numpy as np
from ..Specification import Specification, SpecEvaluation
from ..Location import Location
from ..biotools import group_nearby_indices, reverse_complement, IUPAC_NOTATION
class EnforceSequence(Specification):
"""Enforces a (possibly degenerate) sequence at some location.
Shorthand for annotations: "sequence".
Parameters
----------
sequence
An ATGC string representing the wanted sequence, possibly degenerated,
for instance ATTCGCGTYTTKWNAA
location
Location of the DNA segment on which to enforce the pattern e.g.
``Location(10, 45, 1)`` or simply ``(10, 45, 1)``
"""
localization_interval_length = 6 # used when optimizing
best_possible_score = 0
enforced_by_nucleotide_restrictions = True
shorthand_name = "sequence"
def __init__(self, sequence=None, location=None, boost=1.0):
"""Initialize."""
self.sequence = sequence
self.location = Location.from_data(location)
self.boost = boost
def initialized_on_problem(self, problem, role):
"""Find out what sequence it is that we are supposed to conserve."""
return self._copy_with_full_span_if_no_location(problem)
# if self.location is None:
# result = self.copy_with_changes()
# result.location = Location(0, len(problem.sequence), 1)
# return result
# else:
# return self
def evaluate(self, problem):
"""Return a score equal to -number_of modifications.
Locations are "binned" modifications regions. Each bin has a length
in nucleotides equal to ``localization_interval_length`.`
"""
sequence = self.location.extract_sequence(problem.sequence)
discrepancies = np.array(
[
i
for i, nuc in enumerate(sequence)
if nuc not in IUPAC_NOTATION[self.sequence[i]]
]
)
if self.location.strand == -1:
discrepancies = self.location.end - discrepancies
else:
discrepancies = discrepancies + self.location.start
intervals = [
(r[0], r[-1] + 1)
for r in group_nearby_indices(
discrepancies, max_group_spread=self.localization_interval_length
)
]
locations = [Location(start, end, 1) for start, end in intervals]
return SpecEvaluation(
self, problem, score=-len(discrepancies), locations=locations
)
def localized(self, location, problem=None):
"""Localize the spec to the overlap of its location and the new."""
start, end = location.start, location.end
new_location = self.location.overlap_region(location)
if new_location is None:
return None
else:
if self.location.strand == -1:
start = self.location.end - new_location.end
end = self.location.end - new_location.start
else:
start = new_location.start - self.location.start
end = new_location.end - self.location.start
new_sequence = self.sequence[start:end]
return self.copy_with_changes(location=new_location, sequence=new_sequence)
def restrict_nucleotides(self, sequence, location=None):
"""When localizing, forbid any nucleotide but the one already there."""
if location is not None:
new_location = self.location.overlap_region(location)
if new_location is None:
return []
else:
new_location = self.location
start, end = new_location.start, new_location.end
if self.location.strand == -1:
lend = self.location.end
return [
(
i,
set(
reverse_complement(n)
for n in IUPAC_NOTATION[self.sequence[lend - i - 1]]
),
)
for i in range(start, end)
]
else:
lstart = self.location.start
return [
(i, IUPAC_NOTATION[self.sequence[i - lstart]])
for i in range(start, end)
]
def __repr__(self):
"""Represent."""
return "EnforceSequence(%s)" % str(self.location)
def __str__(self):
"""Represent."""
return "EnforceSequence(%s)" % str(self.location)
|
StarcoderdataPython
|
4829383
|
from mango.relations import base
from mango.relations.constants import CASCADE
__all__ = [
"OneToOneRel",
"OneToManyRel",
"ManyToOneRel",
"ManyToManyRel",
]
class OneToOneRel(base.Relation):
def __init__(
self,
cls=None,
name=None,
rev_name=None,
rev_hidden=None,
hidden=False,
persist=True,
typed=False,
validators=(),
on_delete=CASCADE
):
super(OneToOneRel, self).__init__(
cls=cls,
name=name,
rev_name=rev_name,
rev_hidden=rev_hidden,
rev_relation=OneToOneRel,
hidden=hidden,
persist=persist,
typed=typed,
on_delete=on_delete,
validators=validators,
multi=False,
)
class OneToManyRel(base.Relation):
def __init__(
self,
cls=None,
name=None,
rev_name=None,
rev_hidden=None,
hidden=False,
persist=True,
typed=False,
validators=(),
on_delete=CASCADE
):
super(OneToManyRel, self).__init__(
cls=cls,
name=name,
rev_name=rev_name,
rev_hidden=rev_hidden,
rev_relation=OneToOneRel,
hidden=hidden,
persist=persist,
typed=typed,
on_delete=on_delete,
validators=validators,
multi=True,
)
class ManyToOneRel(base.Relation):
def __init__(
self,
cls=None,
name=None,
rev_name=None,
rev_hidden=None,
hidden=False,
persist=True,
typed=False,
validators=(),
on_delete=CASCADE
):
super(ManyToOneRel, self).__init__(
cls=cls,
name=name,
rev_name=rev_name,
rev_hidden=rev_hidden,
rev_relation=OneToManyRel,
hidden=hidden,
persist=persist,
typed=typed,
on_delete=on_delete,
validators=validators,
multi=False,
)
class ManyToManyRel(base.Relation):
def __init__(
self,
cls=None,
name=None,
rev_name=None,
rev_hidden=None,
hidden=False,
persist=True,
typed=False,
validators=(),
on_delete=CASCADE
):
super(ManyToManyRel, self).__init__(
cls=cls,
name=name,
rev_name=rev_name,
rev_hidden=rev_hidden,
rev_relation=ManyToManyRel,
hidden=hidden,
persist=persist,
typed=typed,
on_delete=on_delete,
validators=validators,
multi=True,
)
|
StarcoderdataPython
|
135727
|
<filename>catalog/migrations/0005_auto_20220324_0011.py<gh_stars>1-10
# Generated by Django 3.2.12 on 2022-03-23 21:11
import catalog.validators
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rating', '0001_initial'),
('catalog', '0004_auto_20220324_0011'),
]
operations = [
migrations.AddField(
model_name='item',
name='ratings',
field=models.ManyToManyField(related_name='items', through='rating.Rating', to=settings.AUTH_USER_MODEL, verbose_name='Оценки'),
),
migrations.AlterField(
model_name='item',
name='name',
field=models.CharField(max_length=150),
),
migrations.AlterField(
model_name='item',
name='text',
field=models.TextField(help_text='Минимум два слова. Обязательно должно содержаться слово превосходно или роскошно', validators=[catalog.validators.text_validation], verbose_name='Текст'),
),
migrations.AddField(
model_name='item',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='items', to='catalog.category', verbose_name='Категория'),
preserve_default=False,
),
migrations.AddField(
model_name='item',
name='tags',
field=models.ManyToManyField(related_name='items', to='catalog.Tag', verbose_name='Теги'),
),
]
|
StarcoderdataPython
|
3241383
|
import util
from os import system
from random import randint
import tempfile
def config_version(stack, version):
config = "tconfig" + str(randint(1000, 5000))
fp = tempfile.NamedTemporaryFile(delete=False)
version_as_bytes = str.encode(version)
fp.write(b'%s' % version_as_bytes)
fp.close()
print(stack)
config_name = stack + "/" + config
print(config_name)
util.run("rio config create %s %s" % (config_name, fp.name))
return config
def create_service(stack, config):
name = "tsrv" + str(randint(1000, 5000))
fullName = "%s/%s" % (stack, name)
path = "/usr/share/nginx/html/index.html"
print(fullName)
print(config)
run_command = "rio run -n %s -p 80/http --config %s:%s nginx" % (
fullName, config, path
)
print(run_command)
system(run_command)
system("rio wait %s" % fullName)
return name
def stage_service(stack, name, version, second_config):
fullName = "%s/%s" % (stack, name)
path = "/usr/share/nginx/html/index.html"
command = "rio stage --image=nginx --config %s:%s %s:%s" % (
second_config, path, fullName, version
)
print(command)
system(command)
system("rio wait %s" % fullName)
stackJson = util.runToJson("rio export -o json %s" % stack)
got = stackJson['services'][name]['revisions']['v2']['scale']
return got
def weight_service(stack, name, version, weight):
fullName = "%s/%s" % (stack, name)
command = "rio weight %s:%s=%s" % (fullName, version, weight)
system(command)
stackJson = util.runToJson("rio export -o json %s" % stack)
got = stackJson['services'][name]['revisions']['v2']['weight']
return got
def promote_service(stack, name, version):
fullName = "%s/%s" % (stack, name)
command = "rio promote %s:%s" % (fullName, version)
system(command)
stackJson = util.runToJson("rio export -o json %s" % stack)
got = stackJson['services'][name]['version']
return got
def test_stage_service(stack):
config_name = config_version(stack, "1")
name = create_service(stack, config_name)
second_config = config_version(stack, "2")
got = stage_service(stack, name, "v2", second_config)
assert got == 1
def test_weight_service(stack):
config_name = config_version(stack, "1")
name = create_service(stack, config_name)
second_config = config_version(stack, "2")
stage_service(stack, name, "v2", second_config)
got = weight_service(stack, name, "v2", "50")
assert got == 50
def test_promote_service(stack):
config_name = config_version(stack, "1")
name = create_service(stack, config_name)
second_config = config_version(stack, "2")
stage_service(stack, name, "v2", second_config)
weight_service(stack, name, "v2", "50")
got = promote_service(stack, name, "v2")
assert got == "v2"
|
StarcoderdataPython
|
174227
|
import cv2 as cv
img = cv.imread('data/pic1.jpg')
cv.imshow('pic1', img)
# RGB
rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
cv.imshow('rgb', rgb)
# HSV
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
cv.imshow('hsv', hsv)
# LAB
lab = cv.cvtColor(img, cv.COLOR_BGR2LAB)
cv.imshow('lab', lab)
# grayscale
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('gray', gray)
cv.waitKey(0)
|
StarcoderdataPython
|
3307813
|
import uuid
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from orders.domain.exceptions.invalid_uuid import InvalidUUID
from orders.domain.exceptions.order_does_not_exist import OrderDoesNotExist
from orders.infrastructure.persistence.django.order import Order
ORDER = 'api:api_orders:v1_order'
class OrderTests(TestCase):
"""
E2E tests using Order API
Creating order
Deleting order
"""
def setUp(self):
self.client = APIClient()
def test_create_order(self):
"""Test creating order"""
order_uuid = str(uuid.uuid4())
url = reverse(
ORDER,
kwargs={
'order_uuid': order_uuid,
},
)
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(1, Order.objects.filter(id=order_uuid).count())
def test_create_order_with_invalid_uuid(self):
"""Test creating order with invalid uuid"""
order_uuid = "error"
url = reverse(
ORDER,
kwargs={
'order_uuid': order_uuid,
},
)
with self.assertRaises(InvalidUUID):
self.client.post(url)
def test_remove_order(self):
"""Test remove existing order"""
order_uuid = str(uuid.uuid4())
url = reverse(
ORDER,
kwargs={
'order_uuid': order_uuid,
},
)
self.client.post(url)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(0, Order.objects.filter(id=order_uuid).count())
def test_remove_non_existing_order(self):
"""Test remove non existing order"""
order_uuid = str(uuid.uuid4())
url = reverse(
ORDER,
kwargs={
'order_uuid': order_uuid,
},
)
with self.assertRaises(OrderDoesNotExist):
self.client.delete(url)
|
StarcoderdataPython
|
3224751
|
class Main:
def __init__(self):
self.li = []
for i in range(0, 4):
self.li.append(int(input()))
def difference(self):
return self.li[0] * self.li[1] - self.li[2] * self.li[3]
def output(self):
print("DIFERENCA = {dif}".format(dif=self.difference()))
if __name__ == '__main__':
obj = Main()
obj.output()
|
StarcoderdataPython
|
1779173
|
global_catalyst_coprocessor = None
def initialize(coprocessing_script):
global global_catalyst_coprocessor
import paraview
paraview.options.batch = True
paraview.options.symmetric = True
from paraview.vtk.vtkPVClientServerCoreCore import vtkProcessModule
def coprocess(dataset, timestep, time):
pass
def finalize():
pass
|
StarcoderdataPython
|
1670008
|
<reponame>daserzw/oidc-swamid-federation
#!/usr/bin/env python3
import json
import os
import sys
from urllib.parse import quote_plus
for _dir in ['entities']:
if not os.path.isdir(_dir):
os.makedirs(_dir)
mdss_sign_key = open('public/mdss.json').read()
for entity in sys.argv[1:]:
einfo = json.loads(open('../{}/enrollment_info'.format(entity)).read())
eid = quote_plus(einfo['entity_id'])
fp = open('entities/{}'.format(eid), 'w')
fp.write(json.dumps(einfo))
fp.close()
fp = open('../{}/mdss.jwks'.format(entity), 'w')
fp.write(mdss_sign_key)
fp.close()
|
StarcoderdataPython
|
1728039
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 21:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0003_taskworker_attempt'),
]
operations = [
migrations.AlterField(
model_name='project',
name='group_id',
field=models.IntegerField(db_index=True, null=True),
),
migrations.AlterField(
model_name='task',
name='group_id',
field=models.IntegerField(db_index=True, null=True),
),
migrations.AlterField(
model_name='taskworker',
name='group_id',
field=models.IntegerField(db_index=True, null=True),
),
migrations.AlterField(
model_name='template',
name='group_id',
field=models.IntegerField(db_index=True, null=True),
),
]
|
StarcoderdataPython
|
1679605
|
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...types import T
class VideoDataMixin:
"""Provide helper functions for :class:`Document` to support video data. """
def load_uri_to_video_blob(self: 'T', only_keyframes: bool = False) -> 'T':
"""Convert a :attr:`.uri` to a video ndarray :attr:`.blob`.
:param only_keyframes: only keep the keyframes in the video
:return: Document itself after processed
"""
import av
with av.open(self.uri) as container:
if only_keyframes:
stream = container.streams.video[0]
stream.codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
img = frame.to_image()
frames.append(np.asarray(img))
self.blob = np.moveaxis(np.stack(frames), 1, 2)
return self
def save_video_blob_to_file(
self: 'T', file: Union[str, BinaryIO], frame_rate: int = 30, codec: str = 'h264'
) -> 'T':
"""Save :attr:`.blob` as a video mp4/h264 file.
:param file: The file to open, which can be either a string or a file-like object.
:param frame_rate: frames per second
:param codec: the name of a decoder/encoder
:return: itself after processed
"""
if (
self.blob.ndim != 4
or self.blob.shape[-1] != 3
or self.blob.dtype != np.uint8
):
raise ValueError(
f'expects `.blob` with dtype=uint8 and ndim=4 and the last dimension is 3, '
f'but receiving {self.blob.shape} in {self.blob.dtype}'
)
video_blob = np.moveaxis(np.clip(self.blob, 0, 255), 1, 2)
import av
with av.open(file, mode='w') as container:
stream = container.add_stream(codec, rate=frame_rate)
stream.width = self.blob.shape[1]
stream.height = self.blob.shape[2]
stream.pix_fmt = 'yuv420p'
for b in video_blob:
frame = av.VideoFrame.from_ndarray(b, format='rgb24')
for packet in stream.encode(frame):
container.mux(packet)
for packet in stream.encode():
container.mux(packet)
return self
|
StarcoderdataPython
|
151420
|
<filename>mvn/test/__init__.py
import os
import sys
import subprocess
#import nose
def main(argv = None):
if argv is None:
argv = []
[testPath,filename] = os.path.split(__file__)
[mvnPath,_] = os.path.split(testPath)
resultPath = os.path.join(testPath,'results.txt')
targets = [mvnPath,os.path.join(testPath+'/unit.py')]
args = ['--with-coverage','--cover-package=mvn','--with-doctest']
# nose.run(argv = args+targets)
tee = subprocess.Popen(
['tee',resultPath],
stdin = subprocess.PIPE,
stdout = sys.stdout
)
tests = subprocess.Popen(
['nosetests']+args+targets,
stdout = tee.stdin,
stderr = tee.stdin
)
tests.communicate()
|
StarcoderdataPython
|
4832493
|
from abc import ABC
class AbstractType(ABC):
def __init__(self, module_name: str, class_name: str):
self._module_name = module_name
self._class_name = class_name
@property
def module_name(self):
return self._module_name
@property
def class_name(self):
return self._class_name
def is_primitive_type(self) -> bool:
return self._module_name == "builtins"
def is_defined(self) -> bool:
return self._module_name != "inspect" and self._class_name != "_empty"
def __str__(self):
return self._module_name + "." + self._class_name
|
StarcoderdataPython
|
1661756
|
from .verbs import *
# preceed w/ underscore so it isn't exported by default
# we just want to register the singledispatch funcs
from .dply import vector as _vector
from .dply import string as _string
|
StarcoderdataPython
|
3236638
|
<filename>7KYU/reverse.py
def reverse(n: int) -> int:
""" This function takes in input 'n' and returns 'n' with all digits reversed. Assume positive 'n'. """
reversed_n = []
while n != 0:
i = n % 10
reversed_n.append(i)
n = (n - i) // 10
return int(''.join(map(str, reversed_n)))
|
StarcoderdataPython
|
3301852
|
<reponame>cdeepakroy/SMQTK
import abc
import collections
import os
from smqtk.representation import SmqtkRepresentation
from smqtk.utils import plugin
class DataSet (collections.Set, SmqtkRepresentation, plugin.Pluggable):
"""
Abstract interface for data sets, that contain an arbitrary number of
``DataElement`` instances of arbitrary implementation type, keyed on
``DataElement`` UUID values.
This should only be used with DataElements whose byte content is expected
not to change. If they do, then UUID keys may no longer represent the
elements associated with them.
"""
def __len__(self):
"""
:return: Number of elements in this DataSet.
:rtype: int
"""
return self.count()
def __getitem__(self, uuid):
return self.get_data(uuid)
def __contains__(self, d):
"""
Different than has_uuid() because this takes another DataElement
instance, not a UUID.
:param d: DataElement to test for containment
:type d: smqtk.representation.DataElement
:return: True of this DataSet contains the given data element. Since,
:rtype: bool
"""
return self.has_uuid(d.uuid())
@abc.abstractmethod
def __iter__(self):
"""
:return: Generator over the DataElements contained in this set in no
particular order.
"""
@abc.abstractmethod
def count(self):
"""
:return: The number of data elements in this set.
:rtype: int
"""
@abc.abstractmethod
def uuids(self):
"""
:return: A new set of uuids represented in this data set.
:rtype: set
"""
@abc.abstractmethod
def has_uuid(self, uuid):
"""
Test if the given uuid refers to an element in this data set.
:param uuid: Unique ID to test for inclusion. This should match the
type that the set implementation expects or cares about.
:type uuid: collections.Hashable
:return: True if the given uuid matches an element in this set, or
False if it does not.
:rtype: bool
"""
@abc.abstractmethod
def add_data(self, *elems):
"""
Add the given data element(s) instance to this data set.
*NOTE: Implementing methods should check that input elements are in
fact DataElement instances.*
:param elems: Data element(s) to add
:type elems: smqtk.representation.DataElement
"""
@abc.abstractmethod
def get_data(self, uuid):
"""
Get the data element the given uuid references, or raise an
exception if the uuid does not reference any element in this set.
:raises KeyError: If the given uuid does not refer to an element in
this data set.
:param uuid: The uuid of the element to retrieve.
:type uuid: collections.Hashable
:return: The data element instance for the given uuid.
:rtype: smqtk.representation.DataElement
"""
def get_data_set_impls(reload_modules=False):
"""
Discover and return discovered ``DataSet`` classes. Keys in the
returned map are the names of the discovered classes, and the paired values
are the actual class type objects.
We search for implementation classes in:
- modules next to this file this function is defined in (ones that begin
with an alphanumeric character),
- python modules listed in the environment variable ``DATA_SET_PATH``
- This variable should contain a sequence of python module
specifications, separated by the platform specific PATH separator
character (``;`` for Windows, ``:`` for unix)
Within a module we first look for a helper variable by the name
``DATA_SET_CLASS``, which can either be a single class object or
an iterable of class objects, to be specifically exported. If the variable
is set to None, we skip that module and do not import anything. If the
variable is not present, we look at attributes defined in that module for
classes that descend from the given base class type. If none of the above
are found, or if an exception occurs, the module is skipped.
:param reload_modules: Explicitly reload discovered modules from source.
:type reload_modules: bool
:return: Map of discovered class object of type ``DataSet``
whose keys are the string names of the classes.
:rtype: dict[str, type]
"""
this_dir = os.path.abspath(os.path.dirname(__file__))
env_var = "DATA_SET_PATH"
helper_var = "DATA_SET_CLASS"
return plugin.get_plugins(__name__, this_dir, env_var, helper_var, DataSet,
reload_modules=reload_modules)
|
StarcoderdataPython
|
3366649
|
<reponame>HanseMerkur/nitro-python
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class protocoltcp_stats(base_resource) :
"""Statistics for tcp protocol resource."""
def __init__(self) :
self._clearstats = ""
self._tcpactiveserverconn = 0
self._tcpcurserverconnopening = 0
self._tcpcurclientconnopening = 0
self._tcpcurclientconnestablished = 0
self._tcpcurserverconnestablished = 0
self._tcptotrxpkts = 0
self._tcprxpktsrate = 0
self._tcptotrxbytes = 0
self._tcprxbytesrate = 0
self._tcptottxpkts = 0
self._tcptxpktsrate = 0
self._tcptottxbytes = 0
self._tcptxbytesrate = 0
self._tcpcurclientconn = 0
self._tcpcurclientconnclosing = 0
self._tcptotclientconnopened = 0
self._tcpclientconnopenedrate = 0
self._tcpcurserverconn = 0
self._tcpcurserverconnclosing = 0
self._tcptotserverconnopened = 0
self._tcpserverconnopenedrate = 0
self._tcpsurgequeuelen = 0
self._tcpspareconn = 0
self._tcptotzombiecltconnflushed = 0
self._tcpzombiecltconnflushedrate = 0
self._tcptotzombiehalfopencltconnflushed = 0
self._tcpzombiehalfopencltconnflushedrate = 0
self._tcptotzombieactivehalfclosecltconnflushed = 0
self._tcpzombieactivehalfclosecltconnflushedrate = 0
self._tcptotzombiepassivehalfclosecltconnflushed = 0
self._tcpzombiepassivehalfclosecltconnflushedrate = 0
self._tcptotzombiesvrconnflushed = 0
self._tcpzombiesvrconnflushedrate = 0
self._tcptotzombiehalfopensvrconnflushed = 0
self._tcpzombiehalfopensvrconnflushedrate = 0
self._tcptotzombieactivehalfclosesvrconnflushed = 0
self._tcpzombieactivehalfclosesvrconnflushedrate = 0
self._tcptotzombiepassivehalfclosesrvconnflushed = 0
self._tcpzombiepassivehalfclosesrvconnflushedrate = 0
self._pcbtotzombiecall = 0
self._pcbzombiecallrate = 0
self._tcptotsyn = 0
self._tcpsynrate = 0
self._tcptotsynprobe = 0
self._tcpsynproberate = 0
self._tcptotsvrfin = 0
self._tcpsvrfinrate = 0
self._tcptotcltfin = 0
self._tcpcltfinrate = 0
self._tcpwaittosyn = 0
self._tcpwaittosynrate = 0
self._tcpwaittodata = 0
self._tcpwaittodatarate = 0
self._tcptotsynheld = 0
self._tcpsynheldrate = 0
self._tcptotsynflush = 0
self._tcpsynflushrate = 0
self._tcptotfinwaitclosed = 0
self._tcpfinwaitclosedrate = 0
self._tcperrbadchecksum = 0
self._tcperrbadchecksumrate = 0
self._tcperrdataafterfin = 0
self._tcperrdataafterfinrate = 0
self._tcperrsyninsynrcvd = 0
self._tcperrsyninsynrcvdrate = 0
self._tcperrsyninest = 0
self._tcperrsyninestrate = 0
self._tcperrsynsentbadack = 0
self._tcperrsynsentbadackrate = 0
self._tcperrrst = 0
self._tcperrrstrate = 0
self._tcperrrstnonest = 0
self._tcperrrstnonestrate = 0
self._tcperrrstoutofwindow = 0
self._tcperrrstoutofwindowrate = 0
self._tcperrrstintimewait = 0
self._tcperrrstintimewaitrate = 0
self._tcperrsvroutoforder = 0
self._tcperrsvroutoforderrate = 0
self._tcperrcltoutoforder = 0
self._tcperrcltoutoforderrate = 0
self._tcperrclthole = 0
self._tcperrcltholerate = 0
self._tcperrsvrhole = 0
self._tcperrsvrholerate = 0
self._tcperrcookiepktseqreject = 0
self._tcperrcookiepktseqrejectrate = 0
self._tcperrcookiepktsigreject = 0
self._tcperrcookiepktsigrejectrate = 0
self._tcperrcookiepktseqdrop = 0
self._tcperrcookiepktseqdroprate = 0
self._tcperrcookiepktmssreject = 0
self._tcperrcookiepktmssrejectrate = 0
self._tcperranyportfail = 0
self._tcperranyportfailrate = 0
self._tcperripportfail = 0
self._tcperripportfailrate = 0
self._tcperrstraypkt = 0
self._tcperrstraypktrate = 0
self._tcperrsentrst = 0
self._tcperrsentrstrate = 0
self._tcperrbadstateconn = 0
self._tcperrbadstateconnrate = 0
self._tcperrrstthreshold = 0
self._tcperrrstthresholdrate = 0
self._tcperroutofwindowpkts = 0
self._tcperroutofwindowpktsrate = 0
self._tcperrsyndroppedcongestion = 0
self._tcperrsyndroppedcongestionrate = 0
self._tcperrcltretrasmit = 0
self._tcperrcltretrasmitrate = 0
self._tcperrfullretrasmit = 0
self._tcperrfullretrasmitrate = 0
self._tcperrsynretry = 0
self._tcperrsynretryrate = 0
self._tcperrsyngiveup = 0
self._tcperrsyngiveuprate = 0
self._tcperrretransmit = 0
self._tcperrretransmitrate = 0
self._tcperrfirstretransmissions = 0
self._tcperrfirstretransmissionsrate = 0
self._tcperrthirdretransmissions = 0
self._tcperrthirdretransmissionsrate = 0
self._tcperrfifthretransmissions = 0
self._tcperrfifthretransmissionsrate = 0
self._tcperrseventhretransmissions = 0
self._tcperrseventhretransmissionsrate = 0
self._tcperrfastretransmissions = 0
self._tcperrfastretransmissionsrate = 0
self._tcperrsvrretrasmit = 0
self._tcperrsvrretrasmitrate = 0
self._tcperrpartialretrasmit = 0
self._tcperrpartialretrasmitrate = 0
self._tcperrfinretry = 0
self._tcperrfinretryrate = 0
self._tcperrfingiveup = 0
self._tcperrfingiveuprate = 0
self._tcperrsecondretransmissions = 0
self._tcperrsecondretransmissionsrate = 0
self._tcperrforthretransmissions = 0
self._tcperrforthretransmissionsrate = 0
self._tcperrsixthretransmissions = 0
self._tcperrsixthretransmissionsrate = 0
self._tcperrretransmitgiveup = 0
self._tcperrretransmitgiveuprate = 0
self._tcperrcipalloc = 0
self._tcperrcipallocrate = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full."""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
:param clearstats:
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def tcperrfingiveuprate(self) :
"""Rate (/s) counter for tcperrfingiveup."""
try :
return self._tcperrfingiveuprate
except Exception as e:
raise e
@property
def tcperrretransmit(self) :
"""TCP packets retransmitted. The NetScaler attempts to retransmit the packet up to seven times, after which it resets the other half of the TCP connection."""
try :
return self._tcperrretransmit
except Exception as e:
raise e
@property
def tcperrcookiepktmssreject(self) :
"""SYN cookie packets rejected because the maximum segment size (MSS) specified in the packets is incorrect."""
try :
return self._tcperrcookiepktmssreject
except Exception as e:
raise e
@property
def tcptotzombiehalfopencltconnflushed(self) :
"""Half-opened client connections that are flushed because the three-way handshakes are not complete."""
try :
return self._tcptotzombiehalfopencltconnflushed
except Exception as e:
raise e
@property
def tcperrsentrstrate(self) :
"""Rate (/s) counter for tcperrsentrst."""
try :
return self._tcperrsentrstrate
except Exception as e:
raise e
@property
def tcperrcltoutoforder(self) :
"""Out of order TCP packets received from a client."""
try :
return self._tcperrcltoutoforder
except Exception as e:
raise e
@property
def tcpsurgequeuelen(self) :
"""Connections in the surge queue. When the NetScaler cannot open a connection to the server, for example when maximum connections have been reached, the NetScaler queues these requests."""
try :
return self._tcpsurgequeuelen
except Exception as e:
raise e
@property
def tcperrcookiepktseqrejectrate(self) :
"""Rate (/s) counter for tcperrcookiepktseqreject."""
try :
return self._tcperrcookiepktseqrejectrate
except Exception as e:
raise e
@property
def tcperrrstoutofwindowrate(self) :
"""Rate (/s) counter for tcperrrstoutofwindow."""
try :
return self._tcperrrstoutofwindowrate
except Exception as e:
raise e
@property
def tcptotrxpkts(self) :
"""TCP packets received."""
try :
return self._tcptotrxpkts
except Exception as e:
raise e
@property
def tcpcurclientconnopening(self) :
"""Client connections in the Opening state, which indicates that the handshakes are not yet complete."""
try :
return self._tcpcurclientconnopening
except Exception as e:
raise e
@property
def tcperrcookiepktseqreject(self) :
"""SYN cookie packets rejected because they contain an incorrect sequence number."""
try :
return self._tcperrcookiepktseqreject
except Exception as e:
raise e
@property
def tcperrforthretransmissionsrate(self) :
"""Rate (/s) counter for tcperrforthretransmissions."""
try :
return self._tcperrforthretransmissionsrate
except Exception as e:
raise e
@property
def tcperrrst(self) :
"""Reset packets received from a client or a server."""
try :
return self._tcperrrst
except Exception as e:
raise e
@property
def tcpsvrfinrate(self) :
"""Rate (/s) counter for tcptotsvrfin."""
try :
return self._tcpsvrfinrate
except Exception as e:
raise e
@property
def tcptxbytesrate(self) :
"""Rate (/s) counter for tcptottxbytes."""
try :
return self._tcptxbytesrate
except Exception as e:
raise e
@property
def tcptotsyn(self) :
"""SYN packets received."""
try :
return self._tcptotsyn
except Exception as e:
raise e
@property
def tcpserverconnopenedrate(self) :
"""Rate (/s) counter for tcptotserverconnopened."""
try :
return self._tcpserverconnopenedrate
except Exception as e:
raise e
@property
def tcperrsyninsynrcvdrate(self) :
"""Rate (/s) counter for tcperrsyninsynrcvd."""
try :
return self._tcperrsyninsynrcvdrate
except Exception as e:
raise e
@property
def tcperrpartialretrasmitrate(self) :
"""Rate (/s) counter for tcperrpartialretrasmit."""
try :
return self._tcperrpartialretrasmitrate
except Exception as e:
raise e
@property
def tcpzombieactivehalfclosecltconnflushedrate(self) :
"""Rate (/s) counter for tcptotzombieactivehalfclosecltconnflushed."""
try :
return self._tcpzombieactivehalfclosecltconnflushedrate
except Exception as e:
raise e
@property
def tcperrfinretry(self) :
"""FIN packets resent to a server or a client."""
try :
return self._tcperrfinretry
except Exception as e:
raise e
@property
def tcperrsvroutoforderrate(self) :
"""Rate (/s) counter for tcperrsvroutoforder."""
try :
return self._tcperrsvroutoforderrate
except Exception as e:
raise e
@property
def pcbzombiecallrate(self) :
"""Rate (/s) counter for pcbtotzombiecall."""
try :
return self._pcbzombiecallrate
except Exception as e:
raise e
@property
def tcperrfirstretransmissionsrate(self) :
"""Rate (/s) counter for tcperrfirstretransmissions."""
try :
return self._tcperrfirstretransmissionsrate
except Exception as e:
raise e
@property
def tcperrstraypkt(self) :
"""Number of stray or misrouted packets."""
try :
return self._tcperrstraypkt
except Exception as e:
raise e
@property
def tcperrsyninsynrcvd(self) :
"""SYN packets received on a connection that is in the SYN_RCVD state. A connection goes into the SYN_RCVD state after receiving a SYN packet."""
try :
return self._tcperrsyninsynrcvd
except Exception as e:
raise e
@property
def tcprxbytesrate(self) :
"""Rate (/s) counter for tcptotrxbytes."""
try :
return self._tcprxbytesrate
except Exception as e:
raise e
@property
def tcpcurserverconnestablished(self) :
"""Current server connections in the Established state, which indicates that data transfer can occur between the NetScaler and the server."""
try :
return self._tcpcurserverconnestablished
except Exception as e:
raise e
@property
def tcperroutofwindowpktsrate(self) :
"""Rate (/s) counter for tcperroutofwindowpkts."""
try :
return self._tcperroutofwindowpktsrate
except Exception as e:
raise e
@property
def tcperranyportfailrate(self) :
"""Rate (/s) counter for tcperranyportfail."""
try :
return self._tcperranyportfailrate
except Exception as e:
raise e
@property
def tcpsynproberate(self) :
"""Rate (/s) counter for tcptotsynprobe."""
try :
return self._tcpsynproberate
except Exception as e:
raise e
@property
def tcpzombiesvrconnflushedrate(self) :
"""Rate (/s) counter for tcptotzombiesvrconnflushed."""
try :
return self._tcpzombiesvrconnflushedrate
except Exception as e:
raise e
@property
def tcpcurclientconn(self) :
"""Client connections, including connections in the Opening, Established, and Closing state."""
try :
return self._tcpcurclientconn
except Exception as e:
raise e
@property
def tcperrsyninestrate(self) :
"""Rate (/s) counter for tcperrsyninest."""
try :
return self._tcperrsyninestrate
except Exception as e:
raise e
@property
def tcpcurserverconn(self) :
"""Server connections, including connections in the Opening, Established, and Closing state."""
try :
return self._tcpcurserverconn
except Exception as e:
raise e
@property
def tcperripportfailrate(self) :
"""Rate (/s) counter for tcperripportfail."""
try :
return self._tcperripportfailrate
except Exception as e:
raise e
@property
def tcperrcookiepktsigrejectrate(self) :
"""Rate (/s) counter for tcperrcookiepktsigreject."""
try :
return self._tcperrcookiepktsigrejectrate
except Exception as e:
raise e
@property
def tcperrforthretransmissions(self) :
"""Packets retransmitted four times by the NetScaler."""
try :
return self._tcperrforthretransmissions
except Exception as e:
raise e
@property
def tcpzombiehalfopensvrconnflushedrate(self) :
"""Rate (/s) counter for tcptotzombiehalfopensvrconnflushed."""
try :
return self._tcpzombiehalfopensvrconnflushedrate
except Exception as e:
raise e
@property
def tcperrsixthretransmissions(self) :
"""Packets retransmitted six times by the NetScaler."""
try :
return self._tcperrsixthretransmissions
except Exception as e:
raise e
@property
def tcpfinwaitclosedrate(self) :
"""Rate (/s) counter for tcptotfinwaitclosed."""
try :
return self._tcpfinwaitclosedrate
except Exception as e:
raise e
@property
def tcperrbadchecksum(self) :
"""Packets received with a TCP checksum error."""
try :
return self._tcperrbadchecksum
except Exception as e:
raise e
@property
def tcpsynflushrate(self) :
"""Rate (/s) counter for tcptotsynflush."""
try :
return self._tcpsynflushrate
except Exception as e:
raise e
@property
def tcperrcipallocrate(self) :
"""Rate (/s) counter for tcperrcipalloc."""
try :
return self._tcperrcipallocrate
except Exception as e:
raise e
@property
def tcperrrstthreshold(self) :
"""Reset packets dropped because the default threshold of 100 resets per 10 milliseconds has been exceeded. This is a configurable value using the set rateControl command."""
try :
return self._tcperrrstthreshold
except Exception as e:
raise e
@property
def tcptotzombieactivehalfclosecltconnflushed(self) :
"""Active half-closed client connections that are flushed because the client has closed the connection and there has been no activity on the connection."""
try :
return self._tcptotzombieactivehalfclosecltconnflushed
except Exception as e:
raise e
@property
def tcperrsynsentbadackrate(self) :
"""Rate (/s) counter for tcperrsynsentbadack."""
try :
return self._tcperrsynsentbadackrate
except Exception as e:
raise e
@property
def tcpwaittodatarate(self) :
"""Rate (/s) counter for tcpwaittodata."""
try :
return self._tcpwaittodatarate
except Exception as e:
raise e
@property
def tcperrseventhretransmissionsrate(self) :
"""Rate (/s) counter for tcperrseventhretransmissions."""
try :
return self._tcperrseventhretransmissionsrate
except Exception as e:
raise e
@property
def tcperrretransmitrate(self) :
"""Rate (/s) counter for tcperrretransmit."""
try :
return self._tcperrretransmitrate
except Exception as e:
raise e
@property
def tcperrfullretrasmitrate(self) :
"""Rate (/s) counter for tcperrfullretrasmit."""
try :
return self._tcperrfullretrasmitrate
except Exception as e:
raise e
@property
def tcptotsynflush(self) :
"""SYN packets flushed on the NetScaler because of no response from the server for three or more seconds."""
try :
return self._tcptotsynflush
except Exception as e:
raise e
@property
def tcperrsixthretransmissionsrate(self) :
"""Rate (/s) counter for tcperrsixthretransmissions."""
try :
return self._tcperrsixthretransmissionsrate
except Exception as e:
raise e
@property
def tcperrfastretransmissions(self) :
"""TCP packets on which the NetScaler performs a fast retransmission in response to three duplicate acknowledgements or a partial acknowledgement. The NetScaler assumes that the packet is lost and retransmits the packet before its time-out."""
try :
return self._tcperrfastretransmissions
except Exception as e:
raise e
@property
def tcperrsecondretransmissionsrate(self) :
"""Rate (/s) counter for tcperrsecondretransmissions."""
try :
return self._tcperrsecondretransmissionsrate
except Exception as e:
raise e
@property
def tcpzombiepassivehalfclosecltconnflushedrate(self) :
"""Rate (/s) counter for tcptotzombiepassivehalfclosecltconnflushed."""
try :
return self._tcpzombiepassivehalfclosecltconnflushedrate
except Exception as e:
raise e
@property
def tcpcltfinrate(self) :
"""Rate (/s) counter for tcptotcltfin."""
try :
return self._tcpcltfinrate
except Exception as e:
raise e
@property
def tcpsynheldrate(self) :
"""Rate (/s) counter for tcptotsynheld."""
try :
return self._tcpsynheldrate
except Exception as e:
raise e
@property
def tcperrrstnonestrate(self) :
"""Rate (/s) counter for tcperrrstnonest."""
try :
return self._tcperrrstnonestrate
except Exception as e:
raise e
@property
def tcperrcltretrasmit(self) :
"""Packets retransmitted by a client. This usually occurs because the acknowledgement from the NetScaler has not reached the client."""
try :
return self._tcperrcltretrasmit
except Exception as e:
raise e
@property
def tcptotsynprobe(self) :
"""Probes from the NetScaler to a server. The NetScaler sends a SYN packet to the server to check its availability and expects a SYN_ACK packet from the server before a specified response timeout."""
try :
return self._tcptotsynprobe
except Exception as e:
raise e
@property
def tcptotfinwaitclosed(self) :
"""Connections closed on the NetScaler because the number of connections in the TIME_WAIT state has exceeded the default value of 7000."""
try :
return self._tcptotfinwaitclosed
except Exception as e:
raise e
@property
def tcpzombiepassivehalfclosesrvconnflushedrate(self) :
"""Rate (/s) counter for tcptotzombiepassivehalfclosesrvconnflushed."""
try :
return self._tcpzombiepassivehalfclosesrvconnflushedrate
except Exception as e:
raise e
@property
def tcperrsynsentbadack(self) :
"""Incorrect ACK packets received on a connection that is in the SYN_SENT state. An incorrect ACK packet is the third packet in the three-way handshake that has an incorrect sequence number."""
try :
return self._tcperrsynsentbadack
except Exception as e:
raise e
@property
def tcptotsynheld(self) :
"""SYN packets held on the NetScaler that are waiting for a server connection."""
try :
return self._tcptotsynheld
except Exception as e:
raise e
@property
def tcperrrstintimewaitrate(self) :
"""Rate (/s) counter for tcperrrstintimewait."""
try :
return self._tcperrrstintimewaitrate
except Exception as e:
raise e
@property
def tcperrrstintimewait(self) :
"""Reset packets received on a connection that is in the TIME_WAIT state. Packets cannot be transferred on a connection in the TIME_WAIT state."""
try :
return self._tcperrrstintimewait
except Exception as e:
raise e
@property
def tcperrthirdretransmissions(self) :
"""Packets retransmitted three times by the NetScaler."""
try :
return self._tcperrthirdretransmissions
except Exception as e:
raise e
@property
def tcptotrxbytes(self) :
"""Bytes of TCP data received."""
try :
return self._tcptotrxbytes
except Exception as e:
raise e
@property
def tcperrsvrretrasmit(self) :
"""Packets retransmitted by a server. This usually occurs because the acknowledgement from the NetScaler has not reached the server."""
try :
return self._tcperrsvrretrasmit
except Exception as e:
raise e
@property
def tcperrfastretransmissionsrate(self) :
"""Rate (/s) counter for tcperrfastretransmissions."""
try :
return self._tcperrfastretransmissionsrate
except Exception as e:
raise e
@property
def tcptotzombiepassivehalfclosecltconnflushed(self) :
"""Passive half-closed client connections that are flushed because the NetScaler has closed the connection and there has been no activity on the connection."""
try :
return self._tcptotzombiepassivehalfclosecltconnflushed
except Exception as e:
raise e
@property
def tcperrcltholerate(self) :
"""Rate (/s) counter for tcperrclthole."""
try :
return self._tcperrcltholerate
except Exception as e:
raise e
@property
def tcperrsvroutoforder(self) :
"""Out of order TCP packets received from a server. ."""
try :
return self._tcperrsvroutoforder
except Exception as e:
raise e
@property
def tcprxpktsrate(self) :
"""Rate (/s) counter for tcptotrxpkts."""
try :
return self._tcprxpktsrate
except Exception as e:
raise e
@property
def tcptotclientconnopened(self) :
"""Client connections opened by the NetScaler since startup (after three-way handshake). This counter is reset when the NetScaler is restarted."""
try :
return self._tcptotclientconnopened
except Exception as e:
raise e
@property
def tcperrstraypktrate(self) :
"""Rate (/s) counter for tcperrstraypkt."""
try :
return self._tcperrstraypktrate
except Exception as e:
raise e
@property
def tcperrfinretryrate(self) :
"""Rate (/s) counter for tcperrfinretry."""
try :
return self._tcperrfinretryrate
except Exception as e:
raise e
@property
def tcptotserverconnopened(self) :
"""Server connections initiated by the NetScaler since startup. This counter is reset when the NetScaler is restarted."""
try :
return self._tcptotserverconnopened
except Exception as e:
raise e
@property
def tcperrretransmitgiveup(self) :
"""Number of times NetScaler terminates a connection after retransmitting the packet seven times on that connection.Retrasnmission happens when recieving end doesn't acknowledges the packet."""
try :
return self._tcperrretransmitgiveup
except Exception as e:
raise e
@property
def tcperrthirdretransmissionsrate(self) :
"""Rate (/s) counter for tcperrthirdretransmissions."""
try :
return self._tcperrthirdretransmissionsrate
except Exception as e:
raise e
@property
def tcpwaittosyn(self) :
"""SYN packets (packets used to initiate a TCP connection) received on connections that are in the TIME_WAIT state. Packets cannot be transferred on a connection in this state."""
try :
return self._tcpwaittosyn
except Exception as e:
raise e
@property
def tcperrsyndroppedcongestion(self) :
"""SYN packets dropped because of network congestion."""
try :
return self._tcperrsyndroppedcongestion
except Exception as e:
raise e
@property
def tcperrfingiveup(self) :
"""Connections that were timed out by the NetScaler because of not receiving the ACK packet after retransmitting the FIN packet four times."""
try :
return self._tcperrfingiveup
except Exception as e:
raise e
@property
def tcptotsvrfin(self) :
"""FIN packets received from the server."""
try :
return self._tcptotsvrfin
except Exception as e:
raise e
@property
def tcperrcltretrasmitrate(self) :
"""Rate (/s) counter for tcperrcltretrasmit."""
try :
return self._tcperrcltretrasmitrate
except Exception as e:
raise e
@property
def tcperrcookiepktmssrejectrate(self) :
"""Rate (/s) counter for tcperrcookiepktmssreject."""
try :
return self._tcperrcookiepktmssrejectrate
except Exception as e:
raise e
@property
def tcperrsvrretrasmitrate(self) :
"""Rate (/s) counter for tcperrsvrretrasmit."""
try :
return self._tcperrsvrretrasmitrate
except Exception as e:
raise e
@property
def tcpcurserverconnclosing(self) :
"""Server connections in the Closing state, which indicates that the connection termination process has initiated but is not complete."""
try :
return self._tcpcurserverconnclosing
except Exception as e:
raise e
@property
def tcperrrstrate(self) :
"""Rate (/s) counter for tcperrrst."""
try :
return self._tcperrrstrate
except Exception as e:
raise e
@property
def tcperrfifthretransmissionsrate(self) :
"""Rate (/s) counter for tcperrfifthretransmissions."""
try :
return self._tcperrfifthretransmissionsrate
except Exception as e:
raise e
@property
def tcperrdataafterfin(self) :
"""Packets received following a connection termination request. This error is usually caused by a reordering of packets during transmission."""
try :
return self._tcperrdataafterfin
except Exception as e:
raise e
@property
def tcperrsvrhole(self) :
"""TCP holes created on a server connection. When out of order packets are received from a server, a hole is created on the NetScaler for each group of missing packets."""
try :
return self._tcperrsvrhole
except Exception as e:
raise e
@property
def tcperrsyngiveup(self) :
"""Attempts to establish a connection on the NetScaler that timed out."""
try :
return self._tcperrsyngiveup
except Exception as e:
raise e
@property
def tcperrfirstretransmissions(self) :
"""Packets retransmitted once by the NetScaler."""
try :
return self._tcperrfirstretransmissions
except Exception as e:
raise e
@property
def tcptotzombiesvrconnflushed(self) :
"""Server connections that are flushed because there have been no client requests in the queue for some time."""
try :
return self._tcptotzombiesvrconnflushed
except Exception as e:
raise e
@property
def tcperrsynretryrate(self) :
"""Rate (/s) counter for tcperrsynretry."""
try :
return self._tcperrsynretryrate
except Exception as e:
raise e
@property
def tcperrcookiepktsigreject(self) :
"""SYN cookie packets rejected because they contain an incorrect signature."""
try :
return self._tcperrcookiepktsigreject
except Exception as e:
raise e
@property
def tcperroutofwindowpkts(self) :
"""Packets received that are out of the current advertised window."""
try :
return self._tcperroutofwindowpkts
except Exception as e:
raise e
@property
def tcperrcookiepktseqdrop(self) :
"""SYN cookie packets dropped because the sequence number specified in the packets is outside the current window."""
try :
return self._tcperrcookiepktseqdrop
except Exception as e:
raise e
@property
def tcperrbadstateconnrate(self) :
"""Rate (/s) counter for tcperrbadstateconn."""
try :
return self._tcperrbadstateconnrate
except Exception as e:
raise e
@property
def tcperrsyndroppedcongestionrate(self) :
"""Rate (/s) counter for tcperrsyndroppedcongestion."""
try :
return self._tcperrsyndroppedcongestionrate
except Exception as e:
raise e
@property
def tcperrsvrholerate(self) :
"""Rate (/s) counter for tcperrsvrhole."""
try :
return self._tcperrsvrholerate
except Exception as e:
raise e
@property
def tcperrdataafterfinrate(self) :
"""Rate (/s) counter for tcperrdataafterfin."""
try :
return self._tcperrdataafterfinrate
except Exception as e:
raise e
@property
def tcperrcltoutoforderrate(self) :
"""Rate (/s) counter for tcperrcltoutoforder."""
try :
return self._tcperrcltoutoforderrate
except Exception as e:
raise e
@property
def tcptotzombiepassivehalfclosesrvconnflushed(self) :
"""Passive half-closed server connections that are flushed because the NetScaler has closed the connection and there has been no activity on the connection."""
try :
return self._tcptotzombiepassivehalfclosesrvconnflushed
except Exception as e:
raise e
@property
def tcperrsyninest(self) :
"""SYN packets received on a connection that is in the ESTABLISHED state. A SYN packet is not expected on an ESTABLISHED connection."""
try :
return self._tcperrsyninest
except Exception as e:
raise e
@property
def tcperrsecondretransmissions(self) :
"""Packets retransmitted twice by the NetScaler."""
try :
return self._tcperrsecondretransmissions
except Exception as e:
raise e
@property
def tcperrbadstateconn(self) :
"""Connections that are not in a valid TCP state."""
try :
return self._tcperrbadstateconn
except Exception as e:
raise e
@property
def tcpspareconn(self) :
"""Spare connections available. To save time and resources in establishing another connection for a new client, the connection on the server is not closed after completing the request from the first client and is available for serving future requests."""
try :
return self._tcpspareconn
except Exception as e:
raise e
@property
def tcperrretransmitgiveuprate(self) :
"""Rate (/s) counter for tcperrretransmitgiveup."""
try :
return self._tcperrretransmitgiveuprate
except Exception as e:
raise e
@property
def tcptottxpkts(self) :
"""TCP packets transmitted."""
try :
return self._tcptottxpkts
except Exception as e:
raise e
@property
def tcptotcltfin(self) :
"""FIN packets received from the clients."""
try :
return self._tcptotcltfin
except Exception as e:
raise e
@property
def tcpsynrate(self) :
"""Rate (/s) counter for tcptotsyn."""
try :
return self._tcpsynrate
except Exception as e:
raise e
@property
def tcpclientconnopenedrate(self) :
"""Rate (/s) counter for tcptotclientconnopened."""
try :
return self._tcpclientconnopenedrate
except Exception as e:
raise e
@property
def tcperranyportfail(self) :
"""Port allocations that have failed on a mapped IP address because the maximum limit of 65536 has been exceeded."""
try :
return self._tcperranyportfail
except Exception as e:
raise e
@property
def tcpwaittosynrate(self) :
"""Rate (/s) counter for tcpwaittosyn."""
try :
return self._tcpwaittosynrate
except Exception as e:
raise e
@property
def tcperrrstnonest(self) :
"""Reset packets received on a connection that is not in the ESTABLISHED state."""
try :
return self._tcperrrstnonest
except Exception as e:
raise e
@property
def tcperripportfail(self) :
"""Port allocations that have failed on a subnet IP address or vserver IP address because the maximum limit of 65536 has been exceeded."""
try :
return self._tcperripportfail
except Exception as e:
raise e
@property
def tcperrpartialretrasmit(self) :
"""Partial packet retransmits by a client or server due to congestion on the connection. This usually occurs because the window advertised by the NetScaler is not big enough to hold the full packet."""
try :
return self._tcperrpartialretrasmit
except Exception as e:
raise e
@property
def tcpzombieactivehalfclosesvrconnflushedrate(self) :
"""Rate (/s) counter for tcptotzombieactivehalfclosesvrconnflushed."""
try :
return self._tcpzombieactivehalfclosesvrconnflushedrate
except Exception as e:
raise e
@property
def tcperrclthole(self) :
"""TCP holes created on a client connection. When out of order packets are received from a client, a hole is created on the NetScaler for each group of missing packets."""
try :
return self._tcperrclthole
except Exception as e:
raise e
@property
def tcpcurclientconnestablished(self) :
"""Current client connections in the Established state, which indicates that data transfer can occur between the NetScaler and the client."""
try :
return self._tcpcurclientconnestablished
except Exception as e:
raise e
@property
def tcpwaittodata(self) :
"""Bytes of data received on connections that are in the TIME_WAIT state. Data cannot be transferred on a connection that is in this state."""
try :
return self._tcpwaittodata
except Exception as e:
raise e
@property
def tcperrsentrst(self) :
"""Reset packets sent to a client or a server."""
try :
return self._tcperrsentrst
except Exception as e:
raise e
@property
def tcperrsyngiveuprate(self) :
"""Rate (/s) counter for tcperrsyngiveup."""
try :
return self._tcperrsyngiveuprate
except Exception as e:
raise e
@property
def tcperrbadchecksumrate(self) :
"""Rate (/s) counter for tcperrbadchecksum."""
try :
return self._tcperrbadchecksumrate
except Exception as e:
raise e
@property
def tcperrcookiepktseqdroprate(self) :
"""Rate (/s) counter for tcperrcookiepktseqdrop."""
try :
return self._tcperrcookiepktseqdroprate
except Exception as e:
raise e
@property
def tcperrseventhretransmissions(self) :
"""Packets retransmitted seven times by the NetScaler. If this fails, the NetScaler terminates the connection."""
try :
return self._tcperrseventhretransmissions
except Exception as e:
raise e
@property
def tcperrrstoutofwindow(self) :
"""Reset packets received on a connection that is out of the current TCP window."""
try :
return self._tcperrrstoutofwindow
except Exception as e:
raise e
@property
def tcpcurserverconnopening(self) :
"""Server connections in the Opening state, which indicates that the handshakes are not yet complete."""
try :
return self._tcpcurserverconnopening
except Exception as e:
raise e
@property
def tcptotzombieactivehalfclosesvrconnflushed(self) :
"""Active half-closed server connections that are flushed because the server has closed the connection and there has been no activity on the connection."""
try :
return self._tcptotzombieactivehalfclosesvrconnflushed
except Exception as e:
raise e
@property
def tcperrfullretrasmit(self) :
"""Full packets retransmitted by the client or the server."""
try :
return self._tcperrfullretrasmit
except Exception as e:
raise e
@property
def tcperrcipalloc(self) :
"""Number of times TCP level client header insertion failure."""
try :
return self._tcperrcipalloc
except Exception as e:
raise e
@property
def tcptotzombiecltconnflushed(self) :
"""Client connections that are flushed because the client has been idle for some time."""
try :
return self._tcptotzombiecltconnflushed
except Exception as e:
raise e
@property
def tcptotzombiehalfopensvrconnflushed(self) :
"""Half-opened server connections that are flushed because the three-way handshakes are not complete."""
try :
return self._tcptotzombiehalfopensvrconnflushed
except Exception as e:
raise e
@property
def pcbtotzombiecall(self) :
"""Times the Zombie cleanup function is called. Every time a connection is flushed, it is marked for cleanup. The Zombie cleanup function clears all these connections at predefined intervals."""
try :
return self._pcbtotzombiecall
except Exception as e:
raise e
@property
def tcpactiveserverconn(self) :
"""Connections to a server currently responding to requests."""
try :
return self._tcpactiveserverconn
except Exception as e:
raise e
@property
def tcperrsynretry(self) :
"""SYN packets resent to a server."""
try :
return self._tcperrsynretry
except Exception as e:
raise e
@property
def tcptottxbytes(self) :
"""Bytes of TCP data transmitted."""
try :
return self._tcptottxbytes
except Exception as e:
raise e
@property
def tcpcurclientconnclosing(self) :
"""Client connections in the Closing state, which indicates that the connection termination process has initiated but is not complete."""
try :
return self._tcpcurclientconnclosing
except Exception as e:
raise e
@property
def tcperrrstthresholdrate(self) :
"""Rate (/s) counter for tcperrrstthreshold."""
try :
return self._tcperrrstthresholdrate
except Exception as e:
raise e
@property
def tcperrfifthretransmissions(self) :
"""Packets retransmitted five times by the NetScaler."""
try :
return self._tcperrfifthretransmissions
except Exception as e:
raise e
@property
def tcpzombiehalfopencltconnflushedrate(self) :
"""Rate (/s) counter for tcptotzombiehalfopencltconnflushed."""
try :
return self._tcpzombiehalfopencltconnflushedrate
except Exception as e:
raise e
@property
def tcpzombiecltconnflushedrate(self) :
"""Rate (/s) counter for tcptotzombiecltconnflushed."""
try :
return self._tcpzombiecltconnflushedrate
except Exception as e:
raise e
@property
def tcptxpktsrate(self) :
"""Rate (/s) counter for tcptottxpkts."""
try :
return self._tcptxpktsrate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(protocoltcp_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.protocoltcp
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
"""Use this API to fetch the statistics of all protocoltcp_stats resources that are configured on netscaler.
:param service:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
obj = protocoltcp_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
""" """
basic = "basic"
full = "full"
class protocoltcp_response(base_response) :
""" """
def __init__(self, length=1) :
self.protocoltcp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.protocoltcp = [protocoltcp_stats() for _ in range(length)]
|
StarcoderdataPython
|
4801369
|
# -*- coding: utf-8 -*-
"""Find a Buyer - Confirm Identity - with letter page"""
import logging
from requests import Response, Session
from directory_tests_shared import PageType, Service, URLs
from tests.functional.utils.context_utils import Actor
from tests.functional.utils.request import Method, check_response, make_request
SERVICE = Service.FAB
NAME = "Confirm identity with letter"
TYPE = PageType.FORM
URL = URLs.FAB_CONFIRM_IDENTITY_LETTER.absolute
EXPECTED_STRINGS = [
"Verification letter request",
"Your verification letter should arrive within 5 working days",
"Contact us",
URLs.CONTACT_US_FEEDBACK.absolute,
"The letter contains a 12 digit verification code",
"Go to your company profile",
# URLs.FAB_COMPANY_PROFILE.relative,
]
def go_to(session: Session) -> Response:
headers = {"Referer": URL}
return make_request(Method.GET, URL, session=session, headers=headers)
def should_be_here(response: Response):
check_response(response, 200, body_contains=EXPECTED_STRINGS)
logging.debug(
"Successfully got to the FAB Confirm your Identity - with letter page"
)
def submit(actor: Actor) -> Response:
"""Verify your identity with a physical letter."""
headers = {"Referer": URL}
data = {
"csrfmiddlewaretoken": actor.csrfmiddlewaretoken,
"send_verification_letter_view-current_step": "address",
"address-postal_full_name": actor.alias,
"address-address_confirmed": "on",
}
response = make_request(
Method.POST, URL, session=actor.session, headers=headers, data=data
)
return response
|
StarcoderdataPython
|
1719251
|
<filename>dnstable_manager/util.py
# Copyright (c) 2015 by Farsight Security, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def iterfileobj(fp, length=16*1024):
'''iterate data from file-like object fp'''
while 1:
buf = fp.read(length)
if not buf:
break
yield buf
|
StarcoderdataPython
|
1734842
|
<gh_stars>1-10
import os
import pyapr
from skimage import io as skio
def main():
"""
This demo shows how to convert an image to an APR using a fixed set of parameters.
"""
# Read in an image
io_int = pyapr.filegui.InteractiveIO()
fpath = io_int.get_tiff_file_name() # get image file path from gui (data type must be float32 or uint16)
img = skio.imread(fpath)
# Set some parameters
par = pyapr.APRParameters()
par.rel_error = 0.1 # relative error threshold
par.gradient_smoothing = 3 # b-spline smoothing parameter for gradient estimation
# 0 = no smoothing, higher = more smoothing
par.dx = 1
par.dy = 1 # voxel size
par.dz = 1
# threshold parameters
par.Ip_th = 0 # regions below this intensity are regarded as background
par.grad_th = 3 # gradients below this value are set to 0
par.sigma_th = 10 # the local intensity scale is clipped from below to this value
par.auto_parameters = False # if true, threshold parameters are set automatically based on histograms
# Compute APR and sample particle values
apr, parts = pyapr.converter.get_apr(img, params=par, verbose=True)
# Compute computational ratio
cr = img.size/apr.total_number_particles()
print("Compuational Ratio: {:7.2f}".format(cr))
# Display the APR
pyapr.viewer.parts_viewer(apr, parts)
# Write the resulting APR to file
print("Writing APR to file ... \n")
fpath_apr = io_int.save_apr_file_name() # get path through gui
pyapr.io.write(fpath_apr, apr, parts)
if fpath_apr:
# Display the size of the file
file_sz = os.path.getsize(fpath_apr)
print("APR File Size: {:7.2f} MB \n".format(file_sz * 1e-6))
# Compute compression ratio
mcr = os.path.getsize(fpath) / file_sz
print("Memory Compression Ratio: {:7.2f}".format(mcr))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1675295
|
from flask import render_template,request,redirect,url_for,abort
from . import main
from ..models import User,Pitch,Comment
from .forms import UpdateProfile,PitchForm,CommentForm
from .. import db,photos
from flask_login import login_required,current_user
from datetime import datetime
@main.route('/')
def index():
pitches = Pitch.query.order_by(Pitch.time.desc()).all()
return render_template('index.html', pitches = pitches)
@main.route('/add',methods = ['GET','POST'])
@login_required
def add_pitch():
form = PitchForm()
if form.validate_on_submit():
pitch = Pitch(title = form.title.data, pitch = form.pitch.data,user=current_user)
db.session.add(pitch)
db.session.commit()
return redirect(url_for('main.index'))
return render_template('add.html',pitch_form=form)
@main.route('/pitch/<int:id>')
def pitch(id):
pitch = Pitch.query.filter_by(id=id).first()
comments = Comment.get_comments(pitch.id)
return render_template('pitch.html',comments = comments, pitch = pitch)
@main.route('/pitch/comment/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_comment(id):
form = CommentForm()
pitch = Pitch.query.filter_by(id=id).first()
if form.validate_on_submit():
comment = form.comment.data
new_comment = Comment(pitch_comment=comment,pitch_id = pitch.id,user=current_user)
new_comment.save_comment()
return redirect(url_for('.pitch',id = pitch.id))
return render_template('new_comment.html',comment_form=form, pitch=pitch)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET', 'POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form=form)
@main.route('/user/<uname>/update/pic',methods=['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname)).
|
StarcoderdataPython
|
1658215
|
<reponame>k4rth33k/gdrivefs
import re
import json
import os
from fsspec.spec import AbstractFileSystem, AbstractBufferedFile
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google.auth.credentials import AnonymousCredentials
import pydata_google_auth
scope_dict = {'full_control': 'https://www.googleapis.com/auth/drive',
'read_only': 'https://www.googleapis.com/auth/drive.readonly'}
DIR_MIME_TYPE = 'application/vnd.google-apps.folder'
fields = ','.join(['name', 'id', 'size', 'description', 'trashed', 'mimeType',
'version', 'createdTime', 'modifiedTime', 'capabilities'])
def _normalize_path(prefix, name):
raw_prefix = prefix.strip('/')
return '/' + '/'.join([raw_prefix, name])
def _finfo_from_response(f, path_prefix=None):
# strictly speaking, other types might be capable of having children,
# such as packages
ftype = 'directory' if f.get('mimeType') == DIR_MIME_TYPE else 'file'
if path_prefix:
name = _normalize_path(path_prefix, f['name'])
else:
name = f['name']
info = {'name': name.lstrip('/'),
'size': int(f.get('size', 0)),
'type': ftype}
f.update(info)
return f
class GoogleDriveFileSystem(AbstractFileSystem):
protocol = "gdrive"
root_marker = ''
def __init__(self, root_file_id=None, token="browser",
access="full_control", spaces='drive',
**kwargs):
"""
Access to dgrive as a file-system
:param root_file_id: str or None
If you have a share, drive or folder ID to treat as the FS root, enter
it here. Otherwise, you will get your default drive
:param token: str
One of "anon", "browser", "cache". Using "browser" will prompt a URL to
be put in a browser, and cache the response for future use with token="cache".
"browser" will remove any previously cached token file if it exists.
:param access: str
One of "full_control", "read_only
:param spaces:
Category of files to search, can be 'drive', 'appDataFolder' and 'photos'.
Of these, only the first is general
:param kwargs:
Passed to parent
"""
super().__init__(**kwargs)
self.access = access
self.scopes = [scope_dict[access]]
self.token = token
self.spaces = spaces
self.root_file_id = root_file_id or 'root'
self.connect(method=token)
if token != "anon":
self.ls("")
def connect(self, method=None):
if method == 'browser':
cred = self._connect_browser()
elif method == "cache":
cred = self._connect_cache()
elif method == 'anon':
cred = AnonymousCredentials()
else:
raise ValueError(f"Invalid connection method `{method}`.")
srv = build('drive', 'v3', credentials=cred)
self._drives = srv.drives()
self.service = srv.files()
def _connect_browser(self):
try:
os.remove(pydata_google_auth.cache.READ_WRITE._path)
except OSError:
pass
return self._connect_cache()
def _connect_cache(self):
return pydata_google_auth.get_user_credentials(self.scopes)
@property
def drives(self):
if self._drives is not None:
return self._drives.list().execute()['drives']
else:
return []
def info(self, path, trashed=False, **kwargs):
if self._parent(path) in self.dircache:
listing = self.dircache[self._parent(path)]
out = [l for l in listing if l['name'] == path]
if not out:
raise FileNotFoundError
return out[0]
else:
file_id = self.path_to_file_id(path, trashed=trashed)
return self._info_by_id(file_id)
def mkdir(self, path, create_parents=True, **kwargs):
if create_parents and self._parent(path):
self.makedirs(self._parent(path), exist_ok=True)
parent_id = self.path_to_file_id(self._parent(path))
meta = {"name": path.rstrip("/").rsplit("/", 1)[-1],
'mimeType': DIR_MIME_TYPE,
"parents": [parent_id]}
self.service.create(body=meta).execute()
self.invalidate_cache(self._parent(path))
def makedirs(self, path, exist_ok=True):
if self.isdir(path):
if exist_ok:
return
else:
raise FileExistsError(path)
if self._parent(path):
self.makedirs(self._parent(path), exist_ok=True)
self.mkdir(path, create_parents=False)
def _delete(self, file_id):
self.service.delete(fileId=file_id).execute()
def rm(self, path, recursive=True, maxdepth=None):
if recursive is False and self.isdir(path) and self.ls(path):
raise ValueError("Attempt to delete non-empty folder")
self._delete(self.path_to_file_id(path))
self.invalidate_cache(path)
self.invalidate_cache(self._parent(path))
def rmdir(self, path):
if not self.isdir(path):
raise ValueError("Path is not a directory")
self.rm(path, recursive=False)
def _info_by_id(self, file_id, path_prefix=None):
response = self.service.get(fileId=file_id, fields=fields,
).execute()
return _finfo_from_response(response, path_prefix)
def export(self, path, mime_type):
""" Convert a google-native file to other format and download
mime_type is something like "text/plain"
"""
file_id = self.path_to_file_id(path)
return self.service.export(fileId=file_id, mimeType=mime_type).execute()
def ls(self, path, detail=False, trashed=False):
if path in [None, '/']:
path = ""
if path not in self.dircache:
if path == "":
file_id = self.root_file_id
else:
file_id = self.path_to_file_id(path, trashed=trashed)
files = self._list_directory_by_id(file_id, trashed=trashed,
path_prefix=path)
self.dircache[path] = files
else:
files = self.dircache[path]
if detail:
return files
else:
return sorted([f["name"] for f in files])
def _list_directory_by_id(self, file_id, trashed=False, path_prefix=None):
all_files = []
page_token = None
afields = 'nextPageToken, files(%s)' % fields
query = f"'{file_id}' in parents "
if not trashed:
query += "and trashed = false "
while True:
response = self.service.list(q=query,
spaces=self.spaces, fields=afields,
pageToken=page_token).execute()
for f in response.get('files', []):
all_files.append(_finfo_from_response(f, path_prefix))
more = response.get('incompleteSearch', False)
page_token = response.get('nextPageToken', None)
if page_token is None or more is False:
break
return all_files
def invalidate_cache(self, path=None):
if path:
self.dircache.pop(path, None)
else:
self.dircache.clear()
def path_to_file_id(self, path, parent=None, trashed=False):
items = path.strip('/').split('/')
if path in ["", "/", "root", self.root_file_id]:
return self.root_file_id
if parent is None:
parent = self.root_file_id
top_file_id = self._get_directory_child_by_name(items[0], parent,
trashed=trashed)
if len(items) == 1:
return top_file_id
else:
sub_path = '/'.join(items[1:])
return self.path_to_file_id(sub_path, parent=top_file_id,
trashed=trashed)
def _get_directory_child_by_name(self, child_name, directory_file_id,
trashed=False):
all_children = self._list_directory_by_id(directory_file_id,
trashed=trashed)
possible_children = []
for child in all_children:
if child['name'] == child_name:
possible_children.append(child['id'])
if len(possible_children) == 0:
raise FileNotFoundError(
f'Directory {directory_file_id} has no child '
f'named {child_name}')
if len(possible_children) == 1:
return possible_children[0]
else:
raise KeyError(f'Directory {directory_file_id} has more than one '
f'child named {child_name}. Unable to resolve path '
'to file_id.')
def _open(self, path, mode="rb", **kwargs):
return GoogleDriveFile(self, path, mode=mode, **kwargs)
DEFAULT_BLOCK_SIZE = 5 * 2 ** 20
class GoogleDriveFile(AbstractBufferedFile):
def __init__(self, fs, path, mode='rb', block_size=DEFAULT_BLOCK_SIZE,
autocommit=True, **kwargs):
"""
Open a file.
Parameters
----------
fs: instance of GoogleDriveFileSystem
mode: str
Normal file modes. Currently only 'wb' amd 'rb'.
block_size: int
Buffer size for reading or writing (default 5MB)
"""
super().__init__(fs, path, mode, block_size, autocommit=autocommit,
**kwargs)
if mode == 'wb':
self.location = None
else:
self.file_id = fs.path_to_file_id(path)
def _fetch_range(self, start=None, end=None):
""" Get data from Google Drive
start, end : None or integers
if not both None, fetch only given range
"""
if start is not None or end is not None:
start = start or 0
end = end or 0
head = {'Range': 'bytes=%i-%i' % (start, end - 1)}
else:
head = {}
try:
files_service = self.fs.service
media_obj = files_service.get_media(fileId=self.file_id)
media_obj.headers.update(head)
data = media_obj.execute()
return data
except HttpError as e:
# TODO : doc says server might send everything if range is outside
if 'not satisfiable' in str(e):
return b''
raise
def _upload_chunk(self, final=False):
""" Write one part of a multi-block file upload
Parameters
----------
final: bool
Complete and commit upload
"""
self.buffer.seek(0)
data = self.buffer.getvalue()
head = {}
l = len(data)
if final and self.autocommit:
if l:
part = "%i-%i" % (self.offset, self.offset + l - 1)
head['Content-Range'] = 'bytes %s/%i' % (part, self.offset + l)
else:
# closing when buffer is empty
head['Content-Range'] = 'bytes */%i' % self.offset
data = None
else:
head['Content-Range'] = 'bytes %i-%i/*' % (
self.offset, self.offset + l - 1)
head.update({'Content-Type': 'application/octet-stream',
'Content-Length': str(l)})
req = self.fs.service._http.request
head, body = req(self.location, method="PUT", body=data,
headers=head)
status = int(head['status'])
assert status < 400, "Init upload failed"
if status in [200, 201]:
# server thinks we are finished, this should happen
# only when closing
self.file_id = json.loads(body.decode())['id']
elif 'Range' in head:
assert status == 308
assert head['Range'].split("=") == part
else:
raise IOError
return True
def commit(self):
"""If not auto-committing, finalize file"""
self.autocommit = True
self._upload_chunk(final=True)
def _initiate_upload(self):
""" Create multi-upload """
parent_id = self.fs.path_to_file_id(self.fs._parent(self.path))
head = {"Content-Type": "application/json; charset=UTF-8"}
# also allows description, MIME type, version, thumbnail...
body = json.dumps({"name": self.path.rsplit('/', 1)[-1],
"parents": [parent_id]}).encode()
req = self.fs.service._http.request
# TODO : this creates a new file. If the file exists, you should
# update it by getting the ID and using PATCH, else you get two
# identically-named files
r = req("https://www.googleapis.com/upload/drive/v3/files"
"?uploadType=resumable", method='POST',
headers=head, body=body)
head = r[0]
assert int(head['status']) < 400, "Init upload failed"
self.location = r[0]['location']
def discard(self):
"""Cancel in-progress multi-upload
"""
if self.location is None:
return
uid = re.findall('upload_id=([^&=?]+)', self.location)
head, _ = self.gcsfs._call(
'DELETE',
'https://www.googleapis.com/upload/drive/v3/files',
params={'uploadType': 'resumable', 'upload_id': uid})
assert int(head['status']) < 400, "Cancel upload failed"
|
StarcoderdataPython
|
3238312
|
from .board import *
def penalty_action(board: Board) -> Action:
goal_vector = Vector.from_point(
board.ball.position - board.opponent_goal_position
)
ball_vector = Vector.from_point(
board.ball.position - board.controlled_player.position
)
if board.ball.position.x > 0:
return board.set_action(Action.Shot, vector=goal_vector, power=4)
else:
return board.set_action(None, ball_vector, sprint=True)
|
StarcoderdataPython
|
1312
|
<gh_stars>10-100
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
- LICENCE
The MIT License (MIT)
Copyright (c) 2016 <NAME> Ericsson AB (EU FP7 CityPulse Project)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
- DESCRIPTION OF DOCUMENTS
-- MongoDB Database Documents:
address_document: {
'_id', 'name', 'node_id', 'point': {'longitude', 'latitude'}
}
bus_line_document: {
'_id', 'bus_line_id', 'bus_stops': [{'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}]
}
bus_stop_document: {
'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}
}
bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_object_id]]
}
bus_vehicle_document: {
'_id', 'bus_vehicle_id', 'maximum_capacity',
'routes': [{'starting_datetime', 'ending_datetime', 'timetable_id'}]
}
detailed_bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_document]]
}
edge_document: {
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}
node_document: {
'_id', 'osm_id', 'tags', 'point': {'longitude', 'latitude'}
}
point_document: {
'_id', 'osm_id', 'point': {'longitude', 'latitude'}
}
timetable_document: {
'_id', 'timetable_id', 'bus_line_id', 'bus_vehicle_id',
'timetable_entries': [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime', 'number_of_onboarding_passengers',
'number_of_deboarding_passengers', 'number_of_current_passengers',
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}],
'travel_requests': [{
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}]
}
traffic_event_document: {
'_id', 'event_id', 'event_type', 'event_level', 'point': {'longitude', 'latitude'}, 'datetime'
}
travel_request_document: {
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}
way_document: {
'_id', 'osm_id', 'tags', 'references'
}
-- Route Generator Responses:
get_route_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}
get_route_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}]
get_waypoints_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}
get_waypoints_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}]
"""
import time
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.common.logger import log
from src.common.parameters import testing_bus_stop_names
from src.route_generator.route_generator_client import get_route_between_two_bus_stops, \
get_route_between_multiple_bus_stops, get_waypoints_between_two_bus_stops, get_waypoints_between_multiple_bus_stops
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__credits__ = [
'<NAME> (Senior Researcher at Ericsson AB) - email: <EMAIL>'
'<NAME> (Senior Researcher at Ericsson AB) - email: <EMAIL>'
]
def test_get_route_between_two_bus_stops(starting_bus_stop=None, ending_bus_stop=None,
starting_bus_stop_name=None, ending_bus_stop_name=None):
"""
:param starting_bus_stop: bus_stop_document
:param ending_bus_stop: bus_stop_document
:param starting_bus_stop_name: string
:param ending_bus_stop_name: string
"""
log(module_name='route_generator_test', log_type='INFO',
log_message='get_route_between_two_bus_stops: starting')
start_time = time.time()
# response = {
# 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'route': {
# 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
# 'distances_from_starting_node', 'times_from_starting_node',
# 'distances_from_previous_node', 'times_from_previous_node'
# }
# }
response = get_route_between_two_bus_stops(
starting_bus_stop=starting_bus_stop,
ending_bus_stop=ending_bus_stop,
starting_bus_stop_name=starting_bus_stop_name,
ending_bus_stop_name=ending_bus_stop_name
)
starting_bus_stop = response.get('starting_bus_stop')
ending_bus_stop = response.get('ending_bus_stop')
route = response.get('route')
if route is not None:
total_distance = route.get('total_distance')
total_time = route.get('total_time')
node_osm_ids = route.get('node_osm_ids')
points = route.get('points')
edges = route.get('edges')
distances_from_starting_node = route.get('distances_from_starting_node')
times_from_starting_node = route.get('times_from_starting_node')
distances_from_previous_node = route.get('distances_from_previous_node')
times_from_previous_node = route.get('times_from_previous_node')
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop) + \
'\ntotal_distance: ' + str(total_distance) + \
'\ntotal_time: ' + str(total_time) + \
'\nnode_osm_ids: ' + str(node_osm_ids) + \
'\npoints: ' + str(points) + \
'\nedges: ' + str(edges) + \
'\ndistances_from_starting_node: ' + str(distances_from_starting_node) + \
'\ntimes_from_starting_node: ' + str(times_from_starting_node) + \
'\ndistances_from_previous_node: ' + str(distances_from_previous_node) + \
'\ntimes_from_previous_node: ' + str(times_from_previous_node)
else:
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop) + \
'\nroute: None'
print output
elapsed_time = time.time() - start_time
time.sleep(0.1)
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_route_between_two_bus_stops: finished - elapsed_time = ' +
str(elapsed_time) + ' sec')
def test_get_route_between_multiple_bus_stops(bus_stops=None, bus_stop_names=None):
"""
:param bus_stops: [bus_stop_document]
:param bus_stop_names: [string]
"""
log(module_name='route_generator_test', log_type='INFO',
log_message='get_route_between_multiple_bus_stops: starting')
start_time = time.time()
route_distance = 0
route_traveling_time = 0
# response = [{
# 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'route': {
# 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
# 'distances_from_starting_node', 'times_from_starting_node',
# 'distances_from_previous_node', 'times_from_previous_node'
# }
# }]
response = get_route_between_multiple_bus_stops(
bus_stops=bus_stops,
bus_stop_names=bus_stop_names
)
for intermediate_response in response:
starting_bus_stop = intermediate_response.get('starting_bus_stop')
ending_bus_stop = intermediate_response.get('ending_bus_stop')
intermediate_route = intermediate_response.get('route')
if intermediate_route is not None:
total_distance = intermediate_route.get('total_distance')
route_distance += total_distance
total_time = intermediate_route.get('total_time')
route_traveling_time += total_time
node_osm_ids = intermediate_route.get('node_osm_ids')
points = intermediate_route.get('points')
edges = intermediate_route.get('edges')
distances_from_starting_node = intermediate_route.get('distances_from_starting_node')
times_from_starting_node = intermediate_route.get('times_from_starting_node')
distances_from_previous_node = intermediate_route.get('distances_from_previous_node')
times_from_previous_node = intermediate_route.get('times_from_previous_node')
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop) + \
'\ntotal_distance: ' + str(total_distance) + \
'\ntotal_time: ' + str(total_time) + \
'\nnode_osm_ids: ' + str(node_osm_ids) + \
'\npoints: ' + str(points) + \
'\nedges: ' + str(edges) + \
'\ndistances_from_starting_node: ' + str(distances_from_starting_node) + \
'\ntimes_from_starting_node: ' + str(times_from_starting_node) + \
'\ndistances_from_previous_node: ' + str(distances_from_previous_node) + \
'\ntimes_from_previous_node: ' + str(times_from_previous_node)
else:
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop) + \
'\nroute: None'
print output
route_average_speed = (route_distance / 1000) / (route_traveling_time / 3600)
print '\nroute_distance: ' + str(route_distance / 1000) + \
' - route_traveling_time: ' + str(route_traveling_time / 60) + \
' - route_average_speed: ' + str(route_average_speed)
elapsed_time = time.time() - start_time
time.sleep(0.1)
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_route_between_multiple_bus_stops: finished - elapsed_time = ' +
str(elapsed_time) + ' sec')
def test_get_waypoints_between_two_bus_stops(starting_bus_stop=None, ending_bus_stop=None,
starting_bus_stop_name=None, ending_bus_stop_name=None):
"""
:param starting_bus_stop: bus_stop_document
:param ending_bus_stop: bus_stop_document
:param starting_bus_stop_name: string
:param ending_bus_stop_name: string
"""
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_waypoints_between_two_bus_stops: starting')
start_time = time.time()
# response = {
# 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'waypoints': [[{
# '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
# 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
# 'max_speed', 'road_type', 'way_id', 'traffic_density'
# }]]
# }
response = get_waypoints_between_two_bus_stops(
starting_bus_stop=starting_bus_stop,
ending_bus_stop=ending_bus_stop,
starting_bus_stop_name=starting_bus_stop_name,
ending_bus_stop_name=ending_bus_stop_name
)
starting_bus_stop = response.get('starting_bus_stop')
ending_bus_stop = response.get('ending_bus_stop')
waypoints = response.get('waypoints')
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop)
print output
for separate_waypoints in waypoints:
print 'waypoints: ' + str(separate_waypoints)
elapsed_time = time.time() - start_time
time.sleep(0.1)
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_waypoints_between_two_bus_stops: finished - elapsed_time = ' +
str(elapsed_time) + ' sec')
def test_get_waypoints_between_multiple_bus_stops(bus_stops=None, bus_stop_names=None):
"""
:param bus_stops: [bus_stop_document]
:param bus_stop_names: [string]
"""
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_waypoints_between_multiple_bus_stops: starting')
start_time = time.time()
# response = [{
# 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'waypoints': [[{
# '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
# 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
# 'max_speed', 'road_type', 'way_id', 'traffic_density'
# }]]
# }]
response = get_waypoints_between_multiple_bus_stops(
bus_stops=bus_stops,
bus_stop_names=bus_stop_names
)
for intermediate_response in response:
starting_bus_stop = intermediate_response.get('starting_bus_stop')
ending_bus_stop = intermediate_response.get('ending_bus_stop')
waypoints = intermediate_response.get('waypoints')
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop)
print output
for separate_waypoints in waypoints:
print 'waypoints: ' + str(separate_waypoints)
elapsed_time = time.time() - start_time
time.sleep(0.1)
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_waypoints_between_multiple_bus_stops: finished - elapsed_time = ' +
str(elapsed_time) + ' sec')
if __name__ == '__main__':
selection = ''
while True:
selection = raw_input(
'\n0. exit'
'\n1. test_get_route_between_two_bus_stops'
'\n2. test_get_route_between_multiple_bus_stops'
'\n3. test_get_waypoints_between_two_bus_stops'
'\n4. test_get_waypoints_between_multiple_bus_stops'
'\nSelection: '
)
if selection == '0':
break
elif selection == '1':
test_get_route_between_two_bus_stops(
starting_bus_stop_name=testing_bus_stop_names[0],
ending_bus_stop_name=testing_bus_stop_names[1]
)
elif selection == '2':
test_get_route_between_multiple_bus_stops(
bus_stop_names=testing_bus_stop_names
)
elif selection == '3':
test_get_waypoints_between_two_bus_stops(
starting_bus_stop_name=testing_bus_stop_names[0],
ending_bus_stop_name=testing_bus_stop_names[1]
)
elif selection == '4':
test_get_waypoints_between_multiple_bus_stops(
bus_stop_names=testing_bus_stop_names
)
else:
print 'Invalid input'
|
StarcoderdataPython
|
1728940
|
"""
Module to featurize CIFs using JarvisCFID
Author: <NAME>
Email: <EMAIL>
"""
import os
import pandas as pd
import numpy as np
import pymatgen as pmg
import timeout_decorator
import pathlib
import joblib
from matminer.featurizers.structure import JarvisCFID
class use_cfid():
"""
Class to generate JarvisCFID features using CIFs
args:
(1) name_of_parent_folder (type:str) - must match name of directory containing the 'cif' folder
(2) csv (type:bool) - whether to save data as csv
return:
(1) pandas.Dataframe of CFID features (pkl and/or csv)
"""
def __init__(self, name_of_parent_folder, csv):
self.name_of_parent_folder = name_of_parent_folder
self.csv = csv
self.cur_dir = pathlib.Path().resolve()
self.directory = os.path.join(self.cur_dir, 'retrieved_data', self.name_of_parent_folder, 'cifs')
self.directory_2 = os.path.join('retrieved_data', self.name_of_parent_folder)
self.jarvis = JarvisCFID()
print(self.jarvis)
@timeout_decorator.timeout(100, timeout_exception = TimeoutError) #100 seconds timer
def descriptor(self, cif):
"""
Apply CFID descriptor
args:
(1) CIFs
return:
(2) CFID features
"""
struc = pmg.Structure.from_file(os.path.join(self.directory, cif))
output = self.jarvis.featurize(struc)
return output
def featurize(self):
"""
Create features using the 'descriptor()' function with a time limit of 100 seconds
"""
# Create a list of cifs
files = [f for f in os.listdir(self.directory) if os.path.isfile(os.path.join(self.directory, f))]
cif_files = [f for f in files if os.path.splitext(f)[1] == '.cif']
print('No. of CIFs: ', len(cif_files))
# Featurise
jarvis_features, cif_success, cif_timedout, cif_error = list(), list(), list(), list()
for cif in cif_files:
try:
cif_features = self.descriptor(cif)
jarvis_features.append(cif_features)
cif_success.append(os.path.splitext(cif)[0])
print('Success with ', cif)
except TimeoutError:
cif_timedout.append(os.path.splitext(cif)[0])
print('Timeout with ', cif)
pass
except:
cif_error.append(os.path.splitext(cif)[0])
print('Error with ', cif)
pass
print('no. of data saved:', len(cif_success))
print('no. of errors:', len(cif_error))
print('no. of time-outs:', len(cif_timedout))
# Create pandas.Dataframe of the complied data
if len(cif_success) != 0:
#Features
df_cif = pd.DataFrame(jarvis_features)
#CIF IDs
df_index = pd.DataFrame(cif_success)
df_index = df_index.rename(columns={0: 'task_id'})
#Concat two dataframes
df = pd.concat([df_index, df_cif], axis=1)
df['task_id'] = df['task_id'].map(lambda x: x.rstrip('.cif'))
df = df.set_index('task_id')
# CIF with erros
df_error = pd.DataFrame(cif_error)
df_error = df_error.rename(columns={0: 'task_id'})
# CIF that timed out
df_timedout = pd.DataFrame(cif_timedout)
df_timedout = df_timedout.rename(columns={0: 'task_id'})
# Save data
if len(cif_success) != 0:
joblib.dump(df, os.path.join(self.directory_2,r'CFID_features_' + str(self.name_of_parent_folder) + '.pkl'))
print('Successfully saved data as: ', 'CFID_features_' + str(self.name_of_parent_folder) + '.pkl')
if self.csv == True:
df.to_csv(os.path.join(self.directory_2,r'CFID_features_' + str(self.name_of_parent_folder) + '.csv'), index = False)
print('Successfully saved data as: ', 'CFID_features_' + str(self.name_of_parent_folder) + '.csv')
joblib.dump(df_error, os.path.join(self.directory_2,r'CFID_error_' + str(self.name_of_parent_folder) + '.pkl'))
joblib.dump(df_timedout, os.path.join(self.directory_2,r'CFID_timedout_' + str(self.name_of_parent_folder) + '.pkl'))
if self.csv == True:
df_error.to_csv(os.path.join(self.directory_2,r'CFID_error_' + str(self.name_of_parent_folder) + '.csv'), index = False)
df_timedout.to_csv(os.path.join(self.directory_2,r'CFID_timedout_' + str(self.name_of_parent_folder) + '.csv'), index = False)
|
StarcoderdataPython
|
1623837
|
<filename>dnplab/widgets/manual_align.py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
def manual_align(data, dim):
"""Manually align spectra"""
coord = data.coords[dim]
max_index = int(data.size / (coord.size**2.0))
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
init_index = 0
delta_index = 1
l = plt.plot(data.coords["f2"], np.real(data.values))
ax.margins(x=0)
axcolor = "lightgoldenrodyellow"
axindex = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
sindex = Slider(
axindex,
"index",
-1 * max_index,
max_index,
valinit=init_index,
valstep=delta_index,
)
def update(val):
index = sindex.val
ix = 0
for line in l:
line.set_ydata(np.roll(data[dim, ix].values.ravel(), index * ix))
ix += 1
fig.canvas.draw_idle()
sindex.on_changed(update)
reset_ax = plt.axes([0.8, 0.025, 0.1, 0.04])
reset_button = Button(reset_ax, "Reset", color=axcolor, hovercolor="0.975")
inc_ax = plt.axes([0.6, 0.025, 0.1, 0.04])
inc_button = Button(inc_ax, "+", color=axcolor, hovercolor="0.975")
dec_ax = plt.axes([0.4, 0.025, 0.1, 0.04])
dec_button = Button(dec_ax, "-", color=axcolor, hovercolor="0.975")
def reset(event):
sindex.reset()
def inc(event):
sindex.set_val(sindex.val + 1)
def dec(event):
sindex.set_val(sindex.val - 1)
reset_button.on_clicked(reset)
inc_button.on_clicked(inc)
dec_button.on_clicked(dec)
plt.show()
manual_index = sindex.val
for ix, x in enumerate(data.coords[dim]):
data[dim, ix] = np.roll(data[dim, ix].values, manual_index * ix)
ix += 1
proc_parameters = {
"dim": dim,
}
proc_attr_name = "manualalign"
data.add_proc_attrs(proc_attr_name, proc_parameters)
return data
|
StarcoderdataPython
|
3241269
|
<filename>Timbuchalka/Section-1/Experementing/English holiday homework.py<gh_stars>1-10
english_homework = [ "\t\t\t\t\t\t\t\t\t\t\t" , "Write A letter to an editor" , "Write a diary entry" ,"The School of the future had no books and no teacher. Write an crticle based on the lesson - The fun they had." , "Write a letter Plasing an order (page no. 13,13)" ]
h1= input ("Press the number of the holiday homework to get it (Max is 4)\
" )
print (english_homework[int(h1)])
h1= input ("Press the number of the holiday homework to get it (Max is 4)\
" )
print (english_homework[int(h1)])
h1= input ("Press the number of the holiday homework to get it (Max is 4)\
" )
print (english_homework[int(h1)])
h1= input ("Press the number of the holiday homework to get it (Max is 4)\
" )
print (english_homework[int(h1)])
endkey = input ("{0}".format("-----Press ENTER to exit-----"))
print ("exiting program...")
|
StarcoderdataPython
|
3207861
|
<reponame>rokj/django_basketball<filename>basketball/templatetags/replace.py
# -*- coding: utf-8 -*-
from django import template
from datetime import datetime
from django.conf import settings
from common.functions import replace
register = template.Library()
register.filter("replace", replace)
|
StarcoderdataPython
|
104488
|
<filename>oozappa/_structure/_environment/fabfile/__init__.py
# -*- coding:utf8 -*-
from fabric.api import task, local, run, sudo, env
from oozappa.config import get_config, procure_common_functions
_settings = get_config()
procure_common_functions()
# your own task below
|
StarcoderdataPython
|
3275178
|
<reponame>jdmoorman/clapsolver<filename>setup.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import platform
import sys
import setuptools
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
with open("README.md") as readme_file:
readme = readme_file.read()
test_requirements = [
"codecov",
"pytest",
"pytest-cov",
]
docs_requirements = [
"sphinx==1.8.5",
]
setup_requirements = [
"numpy",
"pytest-runner",
"pybind11>=2.5.0",
]
perf_requirements = [
"pyperf",
"matplotlib",
"numpy",
"scipy",
"munkres",
"lap",
"lapsolver",
"lapjv",
]
dev_requirements = [
*test_requirements,
*docs_requirements,
*setup_requirements,
*perf_requirements,
"pre-commit",
"bumpversion>=0.5.3",
"ipython>=7.5.0",
"tox>=3.5.2",
"twine>=1.13.0",
"wheel>=0.33.1",
]
requirements = [
"numpy",
"scipy",
"numba",
]
extra_requirements = {
"test": test_requirements,
"docs": docs_requirements,
"setup": setup_requirements,
"dev": dev_requirements,
"perf": perf_requirements,
"all": [
*requirements,
*test_requirements,
*docs_requirements,
*setup_requirements,
*dev_requirements,
*perf_requirements,
],
}
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __str__(self):
import pybind11
return pybind11.get_include()
class get_numpy_include(object):
"""Same as ``get_pybind_include``, but for ``numpy``"""
def __str__(self):
import numpy
return numpy.get_include()
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import os
import tempfile
with tempfile.NamedTemporaryFile("w", suffix=".cpp", delete=False) as f:
f.write("int main (int argc, char **argv) { return 0; }")
fname = f.name
try:
compiler.compile([fname], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
finally:
try:
os.remove(fname)
except OSError:
pass
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
flags = ["-std=c++17", "-std=c++14", "-std=c++11"]
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError("Unsupported compiler -- at least C++11 support " "is needed!")
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
"msvc": ["/EHsc", "/std:c++latest", "/arch:AVX2"],
"unix": ["-march=native", "-ftree-vectorize"],
}
l_opts = {
"msvc": [],
"unix": [],
}
if sys.platform == "darwin":
darwin_opts = ["-stdlib=libc++", "-mmacosx-version-min=10.7"]
c_opts["unix"] += darwin_opts
l_opts["unix"] += darwin_opts
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
link_opts = self.l_opts.get(ct, [])
if ct == "unix":
opts.append(cpp_flag(self.compiler))
for ext in self.extensions:
ext.define_macros = [
("VERSION_INFO", '"{}"'.format(self.distribution.get_version()))
]
ext.extra_compile_args = opts
ext.extra_link_args = link_opts
build_ext.build_extensions(self)
setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research ",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
],
cmdclass={"build_ext": BuildExt},
description="Fast constrained linear assignment problem (CLAP) solvers",
ext_modules=[
Extension(
"_augment",
sorted(
["src/cpp/_augment.cpp"]
), # Sort input source files to ensure bit-for-bit reproducible builds
include_dirs=[get_pybind_include()], # Path to pybind11 headers
language="c++",
),
Extension(
"py_lapjv",
sources=["src/cpp/py_lapjv.cpp"],
include_dirs=[get_numpy_include(), "src/cpp"],
language="c++",
),
],
extras_require=extra_requirements,
install_requires=requirements,
license="MIT License",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="laptools",
name="laptools",
packages=find_packages(where="src"),
package_dir={"": "src"},
python_requires=">=3.6",
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/jdmoorman/laptools",
# Do not edit this string manually, always use bumpversion
# Details in CONTRIBUTING.rst
version="0.2.6",
zip_safe=False,
)
|
StarcoderdataPython
|
1685833
|
<filename>software/jetson/ArduCAM/MIPI_Camera/RPI/python/imx230_postProcess/postProcess.py
import sys
import cv2 as cv
import numpy as np
import os
import arducam_mipicamera as arducam
def align_down(size, align):
return (size & ~((align)-1))
def align_up(size, align):
return align_down(size + align - 1, align)
def remove_padding(data, width, height, bit_width):
buff = np.frombuffer(data, np.uint8)
real_width = int(width / 8 * bit_width)
align_width = align_up(real_width, 32)
align_height = align_up(height, 16)
buff = buff.reshape(align_height, align_width)
buff = buff[:height, :real_width]
buff = buff.reshape(height, real_width)
buff = buff.astype(np.uint16) << 2
# now convert to real 10 bit camera signal
for byte in range(4):
buff[:, byte::5] |= ((buff[:, 4::5] >> ((4 - byte) * 2)) & 0b11)
# delete the unused pix
buff = np.delete(buff, np.s_[4::5], 1)
return buff
def choose_lens_table(i):
switcher={
0:'./lens_table/imx230/5344x4012.npy',
1:'./lens_table/imx230/2672x2004.npy',
2:'./lens_table/imx230/1920x1080.npy',
3:'./lens_table/imx230/1336x1000.npy',
4:'./lens_table/imx230/1280x960.npy',
5:'./lens_table/imx230/1280x720.npy',
}
return switcher.get(i,"Invalid lens_table path ")
if __name__ == '__main__':
camera = arducam.mipi_camera()
print("Open camera...")
mode =1
camera.init_camera()
camera.set_mode(mode) # chose a camera mode which yields raw10 pixel format, see output of list_format utility
fmt = camera.get_format()
width = fmt.get("width")
height = fmt.get("height")
# print(choose_lens_table(mode))
print("Current resolution is {w}x{h}".format(w=width, h=height))
mask = np.load(choose_lens_table(mode))
rmask = mask[:, :, 0]
g1mask = mask[:, :, 1]
g2mask = mask[:, :, 2]
bmask = mask[:, :, 3]
rmask = cv.resize(rmask.astype(np.uint8), (width//2, height//2), interpolation=cv.INTER_LINEAR).astype(np.uint8)
g1mask = cv.resize(g1mask.astype(np.uint8),(width//2, height//2), interpolation=cv.INTER_LINEAR).astype(np.uint8)
g2mask = cv.resize(g2mask.astype(np.uint8), (width//2, height//2), interpolation=cv.INTER_LINEAR).astype(np.uint8)
bmask = cv.resize(bmask.astype(np.uint8), (width//2, height//2), interpolation=cv.INTER_LINEAR).astype(np.uint8)
rmask = (rmask[:, :] >> 5) + (rmask[:, :] & 0x1F) / 32
g1mask = (g1mask[:, :] >> 5) + (g1mask[:, :] & 0x1F) / 32
g2mask = (g2mask[:, :] >> 5) + (g2mask[:, :] & 0x1F) / 32
bmask = (bmask[:, :] >> 5) + (bmask[:, :] & 0x1F) / 32
while cv.waitKey(10) != 27:
frame = camera.capture(encoding = 'raw')
#stream = open("./2672x2004.raw", 'rb') #test
#image = stream.read()
image = remove_padding(frame.data,width,height,10 )
image[0::2, 0::2] = image[0::2, 0::2] * rmask
image[0::2, 1::2] = image[0::2, 1::2] * g1mask
image[1::2, 0::2] = image[1::2, 0::2] * g2mask
image[1::2, 1::2] = image[1::2, 1::2] * bmask
image = np.clip(image, 0, 1023)
image = cv.cvtColor(image, 46)
image = image >>2
image = image.astype(np.uint8)
image = cv.resize(image, (640, 480))
cv.imshow("preview image", image)
# Release memory
del frame
print("Close camera...")
camera.close_camera()
|
StarcoderdataPython
|
1768800
|
# Databricks notebook source
# DBTITLE 1,Define path variables
import glow
spark = glow.register(spark)
vcf_path = '/databricks-datasets/genomics/variant-splitting/01_IN_altered_multiallelic.vcf'
# COMMAND ----------
# DBTITLE 1,Load a VCF into a DataFrame
original_variants_df = (spark.read
.format("vcf")
.option("includeSampleIds", False)
.option("flattenInfoFields", True)
.load(vcf_path))
# COMMAND ----------
# DBTITLE 1,Display
display(original_variants_df)
# COMMAND ----------
# DBTITLE 1,Split multi-allelic variants
spark.conf.set("spark.sql.codegen.wholeStage", False) # turn off Spark SQL whole-stage code generation for faster performance.
split_variants_df = glow.transform(
"split_multiallelics",
original_variants_df
)
display(split_variants_df)
|
StarcoderdataPython
|
3395539
|
<gh_stars>0
""" Renders form for creating new users, and writes new users to database
get(): Renders new user signup form
post(): If user input is valid, creates new user
"""
import handler as handler
import models.user as db_user # facilitates creation and query for users
import helpers.form_data as validate_form # validates user's form data
import helpers.password as pw_hash # creates and validates hashed passwords
class SignUp(handler.Handler):
""" Handles all requests pertaining to signing up new users """
def show_form(self, form_data=None):
""" Display HTML form with any residual user-generated content """
self.render("signup.html", form_data=None)
def get(self):
""" Show form without any user data """
self.show_form()
def post(self):
""" If user input from form is valid, create new user in database """
# Logic inspired by Intro to Backend course materials
error_flag = False
username = self.request.get("username")
password = self.request.get("password")
verify = self.request.get("verify")
email = self.request.get("email")
params = dict(username=username,
email=email)
if not validate_form.username(username):
params["error_username"] = "That is not a valid username"
error_flag = True
if db_user.User.by_username(username):
params["error_username"] = "That user already exists"
error_flag = True
if not validate_form.password(password):
params["error_password"] = "<PASSWORD>"
error_flag = True
elif password != verify:
params["error_verify"] = "Passwords do not match"
error_flag = True
if not validate_form.email(email):
params["error_email"] = "That is not a valid email"
error_flag = True
if error_flag:
self.render("signup.html", **params)
else:
hashed_pw = pw_hash.make(username, password)
usr = db_user.User.register(username, hashed_pw, email)
self.set_secure_cookie("username", usr.username)
self.redirect("/welcome")
|
StarcoderdataPython
|
151605
|
import napalm
driver = napalm.get_network_driver("ios")
conn_details = {
"hostname" : 'sandbox-iosxe-recomm-1.cisco.com',
"username" : 'developer',
"password" : '<PASSWORD>',
"optional_args": {
"port": 22
}
}
device = driver(**conn_details)
device.open()
to_ping = [
"10.0.0.1"
]
for host in to_ping:
res = device.ping(host)
print(f"Results for {host}")
print(res)
device.close()
|
StarcoderdataPython
|
174876
|
<filename>soft_delete_model_mixin/managers.py<gh_stars>0
from django.db import models
from .querysets import SoftDeleteQuerySet
class SoftDeleteModelManager(models.Manager):
def get_queryset(self):
return SoftDeleteQuerySet(self.model, using=self._db).not_deleted_items()
|
StarcoderdataPython
|
34806
|
from bgfactory.components.constants import HALIGN_LEFT, HALIGN_CENTER, HALIGN_RIGHT
import pangocffi as pango
PANGO_SCALE = 1024
def convert_to_pango_align(halign):
if halign == HALIGN_LEFT:
return pango.Alignment.LEFT
elif halign == HALIGN_CENTER:
return pango.Alignment.CENTER
elif halign == HALIGN_RIGHT:
return pango.Alignment.RIGHT
else:
raise ValueError('Horizontal alignment type {} not recognized'.format(halign))
def convert_extents(ext):
return ext.x / PANGO_SCALE, ext.y / PANGO_SCALE, ext.width / PANGO_SCALE, ext.height / PANGO_SCALE
|
StarcoderdataPython
|
3357540
|
# coding: utf-8
import datetime
from pytest import approx
import scipy as sp
from scipy.stats import multivariate_normal
from ..linear import RandomWalk
def test_rwodel():
""" RandomWalk Transition Model test """
# State related variables
state_vec = sp.array([[3.0]])
old_timestamp = datetime.datetime.now()
timediff = 1 # 1sec
new_timestamp = old_timestamp + datetime.timedelta(seconds=timediff)
time_interval = new_timestamp - old_timestamp
# Model-related components
noise_diff_coeff = 0.001 # m/s^2
F = sp.array([[1]])
Q = sp.array([[timediff]]) * noise_diff_coeff
# Create and a Random Walk model object
rw = RandomWalk(noise_diff_coeff=noise_diff_coeff)
# Ensure ```rw.transfer_function(time_interval)``` returns F
assert sp.array_equal(F, rw.matrix(
timestamp=new_timestamp, time_interval=time_interval))
# Ensure ```rw.covar(time_interval)``` returns Q
assert sp.array_equal(Q, rw.covar(
timestamp=new_timestamp, time_interval=time_interval))
# Propagate a state vector through the model
# (without noise)
new_state_vec_wo_noise = rw.function(
state_vec,
timestamp=new_timestamp,
time_interval=time_interval,
noise=0)
assert sp.array_equal(new_state_vec_wo_noise, F@state_vec)
# Evaluate the likelihood of the predicted state, given the prior
# (without noise)
prob = rw.pdf(new_state_vec_wo_noise,
state_vec,
timestamp=new_timestamp,
time_interval=time_interval)
assert approx(prob) == multivariate_normal.pdf(
new_state_vec_wo_noise.T,
mean=sp.array(F@state_vec).ravel(),
cov=Q)
# Propagate a state vector throught the model
# (with internal noise)
new_state_vec_w_inoise = rw.function(
state_vec,
timestamp=new_timestamp,
time_interval=time_interval)
assert not sp.array_equal(new_state_vec_w_inoise, F@state_vec)
# Evaluate the likelihood of the predicted state, given the prior
# (with noise)
prob = rw.pdf(new_state_vec_w_inoise,
state_vec,
timestamp=new_timestamp,
time_interval=time_interval)
assert approx(prob) == multivariate_normal.pdf(
new_state_vec_w_inoise.T,
mean=sp.array(F@state_vec).ravel(),
cov=Q)
# Propagate a state vector throught the model
# (with external noise)
noise = rw.rvs(timestamp=new_timestamp, time_interval=time_interval)
new_state_vec_w_enoise = rw.function(
state_vec,
timestamp=new_timestamp,
time_interval=time_interval,
noise=noise)
assert sp.array_equal(new_state_vec_w_enoise, F@state_vec+noise)
# Evaluate the likelihood of the predicted state, given the prior
# (with noise)
prob = rw.pdf(new_state_vec_w_enoise, state_vec,
timestamp=new_timestamp, time_interval=time_interval)
assert approx(prob) == multivariate_normal.pdf(
new_state_vec_w_enoise.T,
mean=sp.array(F@state_vec).ravel(),
cov=Q)
|
StarcoderdataPython
|
1727830
|
import scipy as sp
import scipy.linalg as la
import matplotlib.pyplot as plt
def Problem1():
x = sp.linspace(-5, 5, 10)
plt.plot(x, x*3, 'kD')
plt.show()
def Problem2(x):
x = sp.arange(x)
return sp.array([x*i for i in xrange(x)])
def Problem3(x):
numbers = sp.arange(x)
return sp.outer(number, numbers)
def Problem4():
#Need another problem
def Problem5():
matrix = sp.zeros((10,10))
matrix2 = sp.zeros((12,12))
vector1 = sp.ones(12)
try:
print "Setting a array row length {0} with vector size
{1}".format(matrix[0].shape, vector1.shape)
matrix[0] = vector1
except ValueError, err:
print "ValueError: ", err
try:
print "Concatenating a {0} size array with {1} size
array".format(matrix.shape, matrix2.shape)
sp.concatenate((matrix, matrix2))
except ValueError, err:
print "ValueError: ", err
def Problem6(h):
x = sp.arange(0, sp.pi, h)
approx = sp.diff(sp.sin(x**2))/h
x = sp.delete(x, 0)
actual = 2 * sp.cos(x**2) * x
print "Error: ", sp.absolute(actual - approx).max()
plt.plot(x, approx, x, actual, x, approx - actual)
plt.show()
def Problem7():
x = sp.rand(10000)
print "Mean: {0} (0.5 - {0} = {1})".format(x.mean(), 0.5 - x.mean())
print "Standard Deviation: {0} (1/sqrt(12) - {0} =
{1})".format(x.std(), 1./sp.math.sqrt(12) - x.std())
def Problem8(n):
"""Verify the numerical accuracy of linalg.lstsq vs la.inv"""
from scipy.linalg import lstsq, inv, norm
from scipy import dot, rand, allclose
A = rand(n, n)
b = rand(n, 1)
inv_method = dot(inv(A), b)
lstsq_method = lstsq(A, b)[0]
#check the accuracy
return norm(inv_method - lstsq_method)
|
StarcoderdataPython
|
1736086
|
<filename>aisutils/daemon.py<gh_stars>10-100
#!/usr/bin/env python
__author__ = '<NAME>'
__version__ = '$Revision: 11839 $'.split()[1]
__revision__ = __version__
__date__ = '$Date: 2009-05-05 17:34:17 -0400 (Tue, 05 May 2009) $'.split()[1]
__copyright__ = '2007, 2008'
__license__ = 'Apache 2.0'
__doc__ = '''
Daemon tool to detach from the terminal
@requires: U{epydoc<http://epydoc.sourceforge.net/>} > 3.0alpha3
@status: under development
@since: 2008-Feb-04
@undocumented: __doc__
@todo: Clean way to shut down
'''
import os
def stdCmdlineOptions(parser,skip_short=False):
'''
Standard command line options
@param parser: OptionParser parser that will get the additional options
'''
if skip_short:
parser.add_option('--daemon'
,dest='daemon_mode'
,default=False,action='store_true'
,help='Detach from the terminal and run as a daemon service.'
+' Returns the pid. [default: %default]')
else:
parser.add_option('-d'
,'--daemon'
,dest='daemon_mode'
,default=False,action='store_true'
,help='Detach from the terminal and run as a daemon service.'
+' Returns the pid. [default: %default]')
# FIX: have an option to make a default pid file location
parser.add_option('--pid-file'
,dest='pid_file'
,default=None
,help='Where to write the process id when in daemon mode')
def start(pid_file=None):
'''
Jump to daemon mode. Must set either
@param options: must have pid_file key
'''
create()
if pid_file != None:
open(pid_file, 'w').write(str(os.getpid())+'\n')
def create():
"""
nohup like function to detach from the terminal. Best to call start(), not this.
"""
try:
pid = os.fork()
except OSError, except_params:
raise Exception, "%s [%d]" % (except_params.strerror, except_params.errno)
if (pid == 0):
# The first child.
os.setsid()
try:
pid = os.fork() # Fork a second child.
except OSError, except_params:
raise Exception, "%s [%d]" % (except_params.strerror, except_params.errno)
if (pid != 0):
os._exit(0) # Exit parent (the first child) of the second child.
else:
os._exit(0) # Exit parent of the first child.
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = 1024
# Iterate through and close all file descriptors.
if True:
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Send all output to /dev/null - FIX: send it to a log file
os.open('/dev/null', os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
return (0)
|
StarcoderdataPython
|
1745445
|
import os
from glob import glob
from setuptools import setup
package_name = 'pybullet_ros'
submodules = [os.path.join(package_name, sub) for sub in ['plugins', 'sdf']]
data_files = [
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
]
def glob_recursive(data_files, directory):
files = glob(directory+'*.*')
data_files.append((os.path.join('share', package_name, directory), files))
subdirectories = glob(directory+'*/')
if (subdirectories == []):
return
else:
for dir in subdirectories:
glob_recursive(data_files, dir)
data_directories = ['launch', 'config', 'scripts', 'config', 'common']
for directory in data_directories:
glob_recursive(data_files, directory)
setup(
name=package_name,
version='0.0.1',
packages=[package_name] + submodules,
data_files=data_files,
install_requires=['setuptools'],
zip_safe=True,
maintainer='jasonx',
maintainer_email='59<EMAIL>',
description='ROS2 wrapper for pybullet simulator',
license='MIT',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'pybullet_ros_wrapper = pybullet_ros.pybullet_ros_wrapper:main'
],
},
)
|
StarcoderdataPython
|
1769341
|
<filename>users/urls.py
from django.urls import path
from .api import CheckVerificationCodeView, EmailView
urlpatterns = [
path("user/email/", EmailView.as_view(), name="send-verification-code"),
path(
"user/email/check/",
CheckVerificationCodeView.as_view(),
name="check-verification-code",
),
]
|
StarcoderdataPython
|
3273577
|
<filename>exasol_advanced_analytics_framework/deployment/scripts_deployer.py
import pyexasol
import logging
from jinja2 import Environment, PackageLoader, select_autoescape
from exasol_advanced_analytics_framework.deployment import constants, utils
from exasol_advanced_analytics_framework.deployment.bundle_lua_scripts import \
BundleLuaScripts
logger = logging.getLogger(__name__)
class ScriptsDeployer:
def __init__(self, language_alias: str, schema: str,
pyexasol_conn: pyexasol.ExaConnection):
self._language_alias = language_alias
self._schema = schema
self._pyexasol_conn = pyexasol_conn
logger.debug(f"Init {ScriptsDeployer.__name__}.")
def _open_schema(self) -> None:
queries = ["CREATE SCHEMA IF NOT EXISTS {schema_name}",
"OPEN SCHEMA {schema_name}"]
for query in queries:
self._pyexasol_conn.execute(query.format(schema_name=self._schema))
logger.debug(f"Schema {self._schema} is opened.")
def _deploy_udf_scripts(self) -> None:
for udf_call_src, template_src in constants.UDF_CALL_TEMPLATES.items():
udf_content = constants.SOURCE_DIR.joinpath(
udf_call_src).read_text()
udf_query = utils.load_and_render_statement(
template_src,
script_content=udf_content,
language_alias=self._language_alias)
self._pyexasol_conn.execute(udf_query)
logger.debug(f"UDF statement of the template "
f"{template_src} is executed.")
def _deploy_lua_scripts(self) -> None:
with open(constants.LUA_SCRIPT_OUTPUT, "r") as file:
lua_query = file.read()
self._pyexasol_conn.execute(lua_query)
logger.debug(f"The Lua statement of the template "
f"{constants.LUA_SCRIPT_TEMPLATE} is executed.")
def deploy_scripts(self) -> None:
self._open_schema()
self._deploy_udf_scripts()
self._deploy_lua_scripts()
logger.debug(f"Scripts are deployed.")
@classmethod
def run(cls, dsn: str, user: str, password: str,
schema: str, language_alias: str, develop: bool):
if develop:
BundleLuaScripts.save_statement()
pyexasol_conn = pyexasol.connect(dsn=dsn, user=user, password=password)
scripts_deployer = cls(language_alias, schema, pyexasol_conn)
scripts_deployer.deploy_scripts()
|
StarcoderdataPython
|
7703
|
import tensorflow as tf
import numpy as np
from graphsage.models import FCPartition
from graphsage.partition_train import construct_placeholders
from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap
flags = tf.app.flags
FLAGS = flags.FLAGS
# flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')
# DIR = 'trained_models'
# MODEL = 'partition'
# with tf.Session() as sess:
# new_saver = tf.train.import_meta_graph(DIR+'/'+MODEL+'.ckpt.meta')
# new_saver.restore(sess, tf.train.latest_checkpoint(DIR + '/./'))
# new_saver.run()
# print(new_saver)
def predict(train_data, id_map):
num_classes = 3
placeholders = construct_placeholders(num_classes)
placeholders['features'] = train_data
# feed_dict = dict()
# train_data = train_data.astype('float32')
# feed_dict.update({placeholders['features']: train_data})
dim = []
# print("f:{}".format(len(train_data[0])))
dim.append(len(train_data[0]))
dim.append(FLAGS.dim_1)
dim.append(num_classes)
model = FCPartition(placeholders, dim)
sess = tf.Session()
model.load(sess)
results = model.predict()
results_np = results.eval(session=sess)
# print(results.eval(session=sess))
# print(results_np.shape)
id_map = id_map.astype('int')
results_np = np.expand_dims(results_np, axis=1)
results_np = np.insert(results_np, 0, id_map, axis=1)
results_np = results_np[results_np[:,0].argsort()]
print(results_np)
np.save(FLAGS.outDir+'/predict_predict.npy', results_np)
def main():
print("load data ...")
train_data = load_embedded_data(FLAGS.train_prefix)
id_map = load_embedded_idmap(FLAGS.train_prefix)
predict(train_data, id_map)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
176545
|
<reponame>Duke-GCB/bespin-api
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-06-15 15:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0051_auto_20180615_1536'),
]
operations = [
migrations.AlterField(
model_name='jobquestionnairetype',
name='tag',
field=models.SlugField(help_text='Unique tag for specifying a questionnaire for a workflow version', unique=True),
),
migrations.AlterField(
model_name='workflow',
name='tag',
field=models.SlugField(help_text='Unique tag to represent this workflow', unique=True),
),
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.