id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3269316
|
<filename>old/dea/io/qasync.py
""" Tools for working with async queues and tasks.
These are mostly failed experiments, too much complexity. Futures based
techniques compose better and are only slightly more expensive in terms of
overheads. I'm keeping these for now, but probably they will be deleted.
"""
import asyncio
import queue
import logging
from types import SimpleNamespace
from concurrent.futures import ThreadPoolExecutor
from odc.ppt import EOS_MARKER
log = logging.getLogger(__name__)
async def async_q2q_map(func, q_in, q_out,
eos_marker=EOS_MARKER,
eos_passthrough=True,
**kwargs):
"""Like `map` but operating on values from/to queues.
Roughly equivalent to:
> while not end of stream:
> q_out.put(func(q_in.get(), **kwargs))
Processing stops when `eos_marker` object is observed on input, by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0
loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def run_test(func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx = run(N, 20, 0.1)
assert len(xx) == N + 1
assert 1 < st.max_active <= 20
assert set(xx) == expect
st, xx = run(N, 4, 0.01)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
st, xx = run(N, 4, 0.01, eos_passthrough=False)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
def test_gen2q():
async def gen_func(idx, state):
if state.count >= state.max_count:
return EOS_MARKER
cc = state.count
state.count += 1
await asyncio.sleep(state.dt)
return cc
async def sink(q):
xx = []
while True:
x = await q.get()
if x is EOS_MARKER:
return xx
xx.append(x)
return xx
async def run_async(nconcurrent, max_count=100, dt=0.1):
state = SimpleNamespace(count=0,
max_count=max_count,
dt=dt)
gen = lambda idx: gen_func(idx, state)
q = asyncio.Queue(maxsize=10)
g2q = asyncio.ensure_future(gen2q_async(gen, q, nconcurrent))
xx = await sink(q)
return g2q.result(), xx
loop = asyncio.new_event_loop()
def run(*args, **kwargs):
return loop.run_until_complete(run_async(*args, **kwargs))
n, xx = run(10, max_count=100, dt=0.1)
assert len(xx) == n
assert len(xx) == 100
assert set(xx) == set(range(100))
|
StarcoderdataPython
|
1636292
|
from psy.irt import grm
|
StarcoderdataPython
|
3209489
|
<reponame>maikelvl/workspace
from collections import defaultdict
import json
from os import environ, getcwd, path
import shutil
import subprocess
import ssh_utils
import utils
WORKSPACE = getcwd()
HOSTS_PATH = path.join(WORKSPACE, 'hosts')
HOSTS_TEMPLATE_PATH = path.join(WORKSPACE, '.hosts-template')
def host_path(host_dir):
return path.join(HOSTS_PATH, host_dir)
def config(host_dir):
_host_path = host_path(host_dir)
config_file = path.join(_host_path, 'config.json')
try:
with open(config_file, 'r') as f:
_config = json.load(f)
except IOError:
if not path.isdir(HOSTS_PATH):
shutil.copytree(HOSTS_TEMPLATE_PATH, HOSTS_PATH)
# Try again
return config(host_dir)
elif path.isdir(_host_path):
raise Exception('Host not found: {}'.format(
_host_path.replace(environ.get('HOME'), '~')))
else:
raise HostconfigFileNotFound('Host config file not found: {}'.format(
config_file.replace(environ.get('HOME'), '~')))
except ValueError as e:
raise Exception('There is a syntax error in {}: {}'.format(config_file, e))
return _config
class HostDownException(Exception):
pass
class HostconfigFileNotFound(Exception):
pass
class BaseHost(object):
_data = None
root = None
config = None
def __init__(self, root):
self.root = root
@property
def name(self):
return self.config.get('host-name', path.basename(self.root))
def ping(self):
ip_list = self.ip_list
utils.log('IP-addresses: '+', '.join(ip_list))
for ip in ip_list:
utils.log('Pinging {} ({})'.format(self.name, ip))
if utils.ping(ip):
utils.log('Ping successful')
with open('{}/ip-address.txt'.format(self.root), 'w') as f:
f.write(ip)
return ip
utils.log('Ping unsuccessful')
raise HostDownException
@property
def ip(self):
return self.ping()
def command(self, command, stdout=False):
self.ping()
return self.ssh(command=command, stdout=stdout)
@property
def flat_ssh_config(self):
return ssh_utils.flat_ssh_config(ssh_config=self.ssh_config)
def ssh(self, command=None, stdout=False):
ssh_config = self.ssh_config
try:
return ssh_utils.ssh(ssh_config=ssh_config, command=command, stdout=stdout)
except ssh_utils.SshException as e:
exit()
def ssh_command(self, command=None):
return ssh_utils.ssh_command(ssh_config=self.ssh_config,
command=command)
def scp_from(self, from_file, to_file):
return ssh_utils.scp(ssh_config=self.ssh_config, from_file=from_file, to_file=to_file, from_remote=True)
def scp_to(self, from_file, to_file):
return ssh_utils.scp(ssh_config=self.ssh_config, from_file=from_file, to_file=to_file, to_remote=True)
def get(self, key):
if self.data.has_key(key):
return self.data.get(key)
return None
def set(self, key, value):
self.data[key] = value
return self
def unset(self, key):
if self.datahas_key(key):
del self.data[key]
return self
def remove_data(self):
self._data = {}
return self
@property
def data(self):
if self._data is None:
self._data = self.state_file_content
return self._data
@property
def state_file(self):
return '{}/.state.json'.format(self.root)
@property
def state_file_content(self):
utils.log('Reading state from file {}'.format(self.state_file))
try:
return json.load(open(self.state_file))
except IOError:
return defaultdict(dict)
except ValueError as e:
utils.log('There is a syntax error in {}: {}'.format(self.state_file, e))
exit(1)
def save(self):
utils.log('Saving state to file {}'.format(self.state_file))
with open(self.state_file, 'w') as f:
f.write(json.dumps(self.data, indent=4))
|
StarcoderdataPython
|
8038
|
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import os
import unittest
import re
from . import sysinfo
# Linux/OS X/BSD platforms can implement this by calling out to lsof
if sysinfo.WIN:
def _run_lsof():
raise unittest.SkipTest("lsof not expected on Windows")
else:
def _run_lsof():
import tempfile
pid = os.getpid()
fd, tmpname = tempfile.mkstemp('get_open_files')
os.close(fd)
lsof_command = 'lsof -p %s > %s' % (pid, tmpname)
if os.system(lsof_command):
# XXX: This prints to the console an annoying message: 'lsof is not recognized'
raise unittest.SkipTest("lsof failed")
with open(tmpname) as fobj:
data = fobj.read().strip()
os.remove(tmpname)
return data
def default_get_open_files(pipes=False):
data = _run_lsof()
results = {}
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("COMMAND"):
# Skip header and blank lines
continue
split = re.split(r'\s+', line)
_command, _pid, _user, fd = split[:4]
# Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u"
if fd[:-1].isdigit() or fd.isdigit():
if not pipes and fd[-1].isdigit():
continue
fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd)
if fd in results:
params = (fd, line, split, results.get(fd), data)
raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params)
results[fd] = line
if not results:
raise AssertionError('failed to parse lsof:\n%s' % (data, ))
results['data'] = data
return results
def default_get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
fd_directory = '/proc/%d/fd' % os.getpid()
return len(os.listdir(fd_directory))
try:
return len(get_open_files(pipes=True)) - 1
except (OSError, AssertionError, unittest.SkipTest):
return 0
lsof_get_open_files = default_get_open_files
try:
# psutil import subprocess which on Python 3 imports selectors.
# This can expose issues with monkey-patching.
import psutil
except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def get_open_files():
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
.. important:: If you want to find open sockets, on Windows
and linux, it is important that the socket at least be listening
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
results = dict()
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
def get_number_open_files():
process = psutil.Process()
try:
return process.num_fds()
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
|
StarcoderdataPython
|
1640429
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
from gensim.summarization.bm25 import get_bm25_weights
from gensim.test.utils import common_texts
class TestBM25(unittest.TestCase):
def test_max_match_with_itself(self):
""" Document should show maximum matching with itself """
weights = get_bm25_weights(common_texts)
for index, doc_weights in enumerate(weights):
expected = max(doc_weights)
predicted = doc_weights[index]
self.assertAlmostEqual(expected, predicted)
def test_with_generator(self):
""" Check above function with input as generator """
text_gen = (i for i in common_texts)
weights = get_bm25_weights(text_gen)
for index, doc_weights in enumerate(weights):
expected = max(doc_weights)
predicted = doc_weights[index]
self.assertAlmostEqual(expected, predicted)
def test_nonnegative_weights(self):
""" All the weights for a partiular document should be non negative """
weights = get_bm25_weights(common_texts)
for doc_weights in weights:
for weight in doc_weights:
self.assertTrue(weight >= 0.)
def test_same_match_with_same_document(self):
""" A document should always get the same weight when matched with a particular document """
corpus = [['cat', 'dog', 'mouse'], ['cat', 'lion'], ['cat', 'lion']]
weights = get_bm25_weights(corpus)
self.assertAlmostEqual(weights[0][1], weights[0][2])
def test_disjoint_docs_if_weight_zero(self):
""" Two disjoint documents should have zero matching"""
corpus = [['cat', 'dog', 'lion'], ['goat', 'fish', 'tiger']]
weights = get_bm25_weights(corpus)
self.assertAlmostEqual(weights[0][1], 0)
self.assertAlmostEqual(weights[1][0], 0)
def test_multiprocessing(self):
""" Result should be the same using different processes """
weights1 = get_bm25_weights(common_texts)
weights2 = get_bm25_weights(common_texts, n_jobs=2)
weights3 = get_bm25_weights(common_texts, n_jobs=-1)
self.assertAlmostEqual(weights1, weights2)
self.assertAlmostEqual(weights1, weights3)
self.assertAlmostEqual(weights2, weights3)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
StarcoderdataPython
|
142743
|
from configs import EXP_CONFIGS
import xml.etree.cElementTree as ET
from xml.etree.ElementTree import dump
from lxml import etree as ET
import os
E = ET.Element
def indent(elem, level=0):
i = "\n " + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + ""
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class Network():
def __init__(self, configs):
self.configs = configs
self.sim_start = self.configs['sim_start']
self.max_steps = self.configs['max_steps']
self.current_path = os.path.dirname(os.path.abspath(__file__))
gen_training_data_path = os.path.join(
self.current_path, 'training_data')
if os.path.exists(gen_training_data_path) == False:
os.mkdir(gen_training_data_path)
if self.configs['mode'] == 'train' or self.configs['mode'] == 'train_old':
self.file_name = self.configs['file_name']
os.mkdir(os.path.join(self.current_path, 'training_data',
self.configs['time_data']))
os.mkdir(os.path.join(self.current_path, 'training_data',
self.configs['time_data'], 'net_data'))
self.current_Env_path = os.path.join(
self.current_path, 'training_data', self.configs['time_data'], 'net_data')
elif self.configs['mode'] == 'test':
self.file_name = self.configs['file_name']
os.mkdir(os.path.join(self.current_path, 'training_data',
self.configs['time_data']))
os.mkdir(os.path.join(self.current_path, 'training_data',
self.configs['time_data'], 'net_data'))
self.current_Env_path = os.path.join(
self.current_path, 'training_data', self.configs['time_data'], 'net_data')
else: # simulate
self.file_name = self.configs['file_name']
self.current_Env_path = os.path.join(
self.current_path, 'Net_data')
if os.path.exists(self.current_Env_path) == False:
os.mkdir(self.current_Env_path)
# data directory generate
gen_data_path = os.path.join(self.current_path, 'data')
if os.path.exists(gen_data_path) == False:
os.mkdir(gen_data_path)
self.num_cars = str(self.configs['num_cars'])
self.num_lanes = str(self.configs['num_lanes'])
self.flow_start = str(self.configs['flow_start'])
self.flow_end = str(self.configs['flow_end'])
self.laneLength = self.configs['laneLength']
self.nodes = list()
self.flows = list()
self.vehicles = list()
self.edges = list()
self.connections = list()
self.outputData = list()
self.traffic_light = list()
if self.configs['mode'] == 'test':
self.generate_cfg(True, 'test')
if self.configs['mode'] == 'train':
self.generate_cfg(True, 'train')
def specify_edge(self):
edges = list()
'''
상속을 위한 함수
'''
return edges
def specify_node(self):
nodes = list()
'''
상속을 위한 함수
'''
return nodes
def specify_flow(self):
flows = list()
'''
상속을 위한 함수
'''
return flows
def specify_connection(self):
connections = list()
'''
상속을 위한 함수
'''
return connections
def specify_outdata(self):
outputData = list()
'''
상속을 위한 함수
'''
return outputData
def specify_traffic_light(self):
traffic_light = list()
'''
상속을 위한 함수
'''
return traffic_light
def _generate_nod_xml(self):
self.nodes = self.specify_node()
nod_xml = ET.Element('nodes')
for node_dict in self.nodes:
# node_dict['x']=format(node_dict['x'],'.1f')
nod_xml.append(E('node', attrib=node_dict))
indent(nod_xml, 1)
dump(nod_xml)
tree = ET.ElementTree(nod_xml)
# tree.write(self.file_name+'.xml',encoding='utf-8',xml_declaration=True)
tree.write(os.path.join(self.current_Env_path, self.file_name+'.nod.xml'), pretty_print=True,
encoding='UTF-8', xml_declaration=True)
def _generate_edg_xml(self):
self.edges = self.specify_edge()
edg_xml = ET.Element('edges')
for _, edge_dict in enumerate(self.edges):
edg_xml.append(E('edge', attrib=edge_dict))
indent(edg_xml, 1)
dump(edg_xml)
tree = ET.ElementTree(edg_xml)
# tree.write(self.xml_edg_name+'.xml',encoding='utf-8',xml_declaration=True)
tree.write(os.path.join(self.current_Env_path, self.file_name+'.edg.xml'), pretty_print=True,
encoding='UTF-8', xml_declaration=True)
def _generate_net_xml(self):
# file_name_str=os.path.join(self.current_Env_path,self.file_name)
file_name_str = os.path.join(self.current_Env_path, self.file_name)
if len(self.traffic_light) != 0:
os.system('netconvert -n {0}.nod.xml -e {0}.edg.xml -i {0}_tl.add.xml -o {0}.net.xml --no-turnarounds True'.format(
file_name_str))
elif len(self.connections) == 0:
os.system('netconvert -n {}.nod.xml -e {}.edg.xml -o {}.net.xml --no-turnarounds True'.format(
file_name_str, file_name_str, file_name_str))
else: # connection이 존재하는 경우 -x
os.system('netconvert -n {}.nod.xml -e {}.edg.xml -x {}.con.xml -o {}.net.xml --no-turnarounds True'.format(
file_name_str, file_name_str, file_name_str, file_name_str))
def _generate_rou_xml(self):
self.flows = self.specify_flow()
route_xml = ET.Element('routes')
if len(self.vehicles) != 0: # empty
for _, vehicle_dict in enumerate(self.vehicles):
route_xml.append(E('veh', attrib=vehicle_dict))
indent(route_xml, 1)
if len(self.flows) != 0:
for _, flow_dict in enumerate(self.flows):
route_xml.append(E('flow', attrib=flow_dict))
indent(route_xml, 1)
dump(route_xml)
tree = ET.ElementTree(route_xml)
tree.write(os.path.join(self.current_Env_path, self.file_name+'.rou.xml'), pretty_print=True,
encoding='UTF-8', xml_declaration=True)
def _generate_con_xml(self):
self.cons = self.specify_connection()
con_xml = ET.Element('connections')
if len(self.connections) != 0: # empty
for _, connection_dict in enumerate(self.connections):
con_xml.append(E('connection', attrib=connection_dict))
indent(con_xml, 1)
dump(con_xml)
tree = ET.ElementTree(con_xml)
tree.write(os.path.join(self.current_Env_path, self.file_name+'.con.xml'), pretty_print=True,
encoding='UTF-8', xml_declaration=True)
def _generate_add_xml(self):
traffic_light_set = self.specify_traffic_light()
self.traffic_light = traffic_light_set
data_additional = ET.Element('additional')
# edgeData와 landData파일의 생성위치는 data
data_additional.append(E('edgeData', attrib={'id': 'edgeData_00', 'file': '{}_edge.xml'.format(self.current_path+'\\data\\'+self.configs['mode']+'\\'+self.file_name), 'begin': '0', 'end': str(
self.configs['max_steps']), 'freq': '900'}))
indent(data_additional, 1)
data_additional.append(E('laneData', attrib={'id': 'laneData_00', 'file': '{}_lane.xml'.format(self.current_path+'\\data\\'+self.configs['mode']+'\\'+self.file_name), 'begin': '0', 'end': str(
self.configs['max_steps']), 'freq': '900'}))
indent(data_additional, 1)
dump(data_additional)
tree = ET.ElementTree(data_additional)
tree.write(os.path.join(self.current_Env_path, self.file_name+'_data.add.xml'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
tl_additional = ET.Element('additional')
if len(self.traffic_light) != 0 or self.configs['mode'] == 'simulate':
for _, tl in enumerate(traffic_light_set):
phase_set = tl.pop('phase')
tlLogic = ET.SubElement(tl_additional, 'tlLogic', attrib=tl)
indent(tl_additional, 1)
for _, phase in enumerate(phase_set):
tlLogic.append(E('phase', attrib=phase))
indent(tl_additional, 2)
dump(tl_additional)
tree = ET.ElementTree(tl_additional)
tree.write(os.path.join(self.current_Env_path, self.file_name+'_tl.add.xml'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
def generate_cfg(self, route_exist, mode='simulate'):
'''
if all the generation over, inherit this function by `super`.
'''
sumocfg = ET.Element('configuration')
inputXML = ET.SubElement(sumocfg, 'input')
inputXML.append(
E('net-file', attrib={'value': os.path.join(self.current_Env_path, self.file_name+'.net.xml')}))
indent(sumocfg)
if route_exist == True:
if self.configs['network'] == 'grid': # grid에서만 생성
self._generate_rou_xml()
if os.path.exists(os.path.join(self.current_Env_path, self.file_name+'.rou.xml')):
inputXML.append(
E('route-files', attrib={'value': os.path.join(self.current_Env_path, self.file_name+'.rou.xml')}))
indent(sumocfg)
# if os.path.exists(os.path.join(self.current_Env_path, self.file_name+'_data.add.xml')):
# inputXML.append(
# E('additional-files', attrib={'value': os.path.join(self.current_Env_path, self.file_name+'_data.add.xml')}))
# indent(sumocfg)
inputXML.append(E('additional-files', attrib={'value': os.path.join(self.current_Env_path, self.file_name+'_data.add.xml')}))
indent(sumocfg)
time = ET.SubElement(sumocfg, 'time')
time.append(E('begin', attrib={'value': str(self.sim_start)}))
indent(sumocfg)
time.append(E('end', attrib={'value': str(self.max_steps)}))
indent(sumocfg)
outputXML = ET.SubElement(sumocfg, 'output')
indent(sumocfg)
dump(sumocfg)
tree = ET.ElementTree(sumocfg)
if mode == 'simulate':
tree.write(os.path.join(self.current_Env_path, self.file_name+'_simulate.sumocfg'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
elif mode == 'test':
tree.write(os.path.join(self.current_Env_path, self.file_name+'_test.sumocfg'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
elif mode == 'train' or mode == 'train_old':
tree.write(os.path.join(self.current_Env_path, self.file_name+'_train.sumocfg'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
def test_net(self):
self.generate_cfg(False)
os.system('sumo-gui -c {}.sumocfg'.format(os.path.join(self.current_Env_path,
self.file_name+'_simulate')))
def sumo_gui(self):
self.generate_cfg(True)
os.system('sumo-gui -c {}.sumocfg'.format(
os.path.join(self.current_Env_path, self.file_name+'_simulate')))
def generate_all_xml(self):
self._generate_nod_xml()
self._generate_edg_xml()
self._generate_add_xml()
self._generate_net_xml()
self._generate_rou_xml()
if __name__ == '__main__':
network = Network(EXP_CONFIGS)
network.sumo_gui()
|
StarcoderdataPython
|
182112
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from django.utils import timezone
from .models import Choice, Question
# Create your views here.
# All Django wants returned is an HttpResponse. Or an exception.
'''
file views
'''
# REPLACE BY class IndexView(generic.ListView)
# def index(request):
# '''
# function index(request)
# '''
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# # The code below loads the template called polls/index.html
# # and passes it a context.
# # The context is a dictionary mapping template variable names to Python objects.
# context = {'latest_question_list': latest_question_list}
# # The render() function takes the request object as its first argument,
# # a template name as its second argument
# # and a dictionary as its optional third argument.
# # It returns an HttpResponse object of the given template rendered with the given context.
# return render(request, 'polls/index.html', context)
# We're using a generic view here: ListView.
# This view abstracts the concepts of "display a list of objects."
# Each generic view needs to know what model it will be acting upon.
# This is provided using the model attribute.
# The ListView generic view uses a default template called <app name>/<model name>_list.html;
# we use template_name to tell ListView to use our existing "polls/index.html" template.
# For ListView, the automatically generated context variable is question_list.
# To override this we provide the context_object_name attribute,
# specifying that we want to use latest_question_list instead.
# As an alternative approach, you could change your templates to match the new default context variables -
# but it's a lot easier to just tell Django to use the variable you want.
class IndexView(generic.ListView):
'''
class IndexView
'''
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions (not including those set
to be published in the future)."""
# Question.objects.filter(pub_date__lte = timezone.now()) returns a queryset
# containing Questions whose pub_date is less than or equal to - that is,
# earlier than or equal to - timezone.now.
return Question.objects.filter(
pub_date__lte = timezone.now()
).order_by('-pub_date')[:5]
# REPLACE BY class DetailView(generic.DetailView)
# def detail(request, question_id):
# '''
# function detail(request, question_id)
# '''
# # The get_object_or_404() function takes a Django model as its first argument
# # and an arbitrary number of keyword arguments,
# # which it passes to the get() function of the model's manager.
# # It raises Http404 if the object doesn't exist.
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
# We're using a generic view here: DetailView.
# This view abstracts the concepts of "display a detail page for a particular type of object."
# Each generic view needs to know what model it will be acting upon.
# This is provided using the model attribute.
# The DetailView generic view expects the primary key value captured from the URL to be called "pk",
# so we've changed question_id to pk for the generic views.
# By default, the DetailView generic view uses a template called <app name>/<model name>_detail.html.
# In our case, it would use the template "polls/question_detail.html".
# The template_name attribute is used to tell Django to use a specific template name
# instead of the autogenerated default template name.
# Previously, the templates have been provided with a context that contains the question
# and latest_question_list context variables.
# For DetailView the question variable is provided automatically -
# since we're using a Django model (Question),
# Django is able to determine an appropriate name for the context variable.
class DetailView(generic.DetailView):
'''
class DetailView
'''
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet (e.g. with publish dates in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now())
# REPLACE BY class ResultsView(generic.DetailView)
# def results(request, question_id):
# '''
# function results(request, question_id)
# '''
# # response = "You're looking at the results of question %s."
# # return HttpResponse(response % question_id)
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/results.html', {'question': question})
# We're using a generic view here: DetailView.
# This view abstracts the concepts of "display a detail page for a particular type of object."
# Each generic view needs to know what model it will be acting upon.
# This is provided using the model attribute.
# The DetailView generic view expects the primary key value captured from the URL to be called "pk",
# so we've changed question_id to pk for the generic views.
# By default, the DetailView generic view uses a template called <app name>/<model name>_detail.html.
# In our case, it would use the template "polls/question_detail.html".
# The template_name attribute is used to tell Django to use a specific template name
# instead of the autogenerated default template name.
# We also specify the template_name for the results list view -
# this ensures that the results view and the detail view have a different appearance when rendered,
# even though they're both a DetailView behind the scenes.
# Previously, the templates have been provided with a context that contains the question
# and latest_question_list context variables.
# For DetailView the question variable is provided automatically -
# since we're using a Django model (Question),
# Django is able to determine an appropriate name for the context variable.
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
'''
function vote(request, question_id)
'''
#return HttpResponse("You're voting on question %s." % question_id)
p = get_object_or_404(Question, pk=question_id)
# request.POST is a dictionary-like object that lets you access submitted data by key name.
# In this case, request.POST['choice'] returns the ID of the selected choice, as a string.
# request.POST values are always strings.
#
# Note that Django also provides request.GET for accessing GET data in the same way - but
# we're explicitly using request.POST in our code, to ensure that data is only altered via a POST call.
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# request.POST['choice'] will raise KeyError if choice wasn't provided in POST data.
# This code checks for KeyError and redisplays the question form with an error message if choice isn't given.
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# After incrementing the choice count, the code returns an HttpResponseRedirect rather than a normal HttpResponse.
# HttpResponseRedirect takes a single argument: the URL to which the user will be redirected
# (see the following point for how we construct the URL in this case).
#
# As the Python comment above points out, you should always return an HttpResponseRedirect
# after successfully dealing with POST data.
# This tip isn't specific to Django; it's just good Web development practice.
#
# We are using the reverse() function in the HttpResponseRedirect constructor in this example.
# This function helps avoid having to hardcode a URL in the view function.
# It is given the name of the view that we want to pass control to
# and the variable portion of the URL pattern that points to that view.
# In this case, using the URLconf we set up in urls.py,
# this reverse() call will return a string like '/polls/3/results/'
# where the 3 is the value of p.id.
# This redirected URL will then call the 'results' view to display the final page.
#
# Always return an HttpResponseRedirect after successfully dealing with POST data.
# This prevents data from being posted twice if a user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
|
StarcoderdataPython
|
1754239
|
import openai # pip install openai
from config import API_KEY_GPT3 as config
from train import CONTEXT as train
openai.api_key = config.API_KEY_GPT3
start_sequence = "\nSAM:"
restart_sequence = "\nHuman:"
def ask(question, chat_log=None):
prompt_text = f"{chat_log}{restart_sequence}: {question}{start_sequence}:"
response = openai.Completion.create(
engine="davinci",
prompt=prompt_text,
temperature=0.8,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.3,
stop=["\n"]
)
story = response['choices'][0]['text']
return str(story)
def append_interaction_to_chat_log(question, answer, chat_log):
if chat_log is None:
chat_log = train.CONTEXT
return f"{chat_log}{restart_sequence} {question}{start_sequence}{answer}:"
def getAnswer(question, chat_log):
answer = ask(question, chat_log)
new_log = append_interaction_to_chat_log(question, answer, chat_log)
return answer, new_log
if __name__=="__main__":
texto_usuario = "hola, como estas, quiero conocerte, como te llamas?"
chat_log = train.CONTEXT
answer, _ = getAnswer(texto_usuario, chat_log)
print(answer)
|
StarcoderdataPython
|
3290555
|
<reponame>bbargstaedt/conflowgen<filename>conflowgen/application_models/container_flow_statistics_report.py<gh_stars>1-10
import logging
import statistics
from typing import List
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.repositories.large_scheduled_vehicle_repository import LargeScheduledVehicleRepository
from conflowgen.domain_models.vehicle import AbstractLargeScheduledVehicle, LargeScheduledVehicle
class ContainerFlowStatisticsReport:
def __init__(self, transportation_buffer=None):
self.large_scheduled_vehicle_repository = LargeScheduledVehicleRepository()
self.logger = logging.getLogger("conflowgen")
self.free_capacity_inbound_statistics = {}
self.free_capacity_outbound_statistics = {}
if transportation_buffer:
self.set_transportation_buffer(transportation_buffer=transportation_buffer)
def set_transportation_buffer(self, transportation_buffer: float):
self.large_scheduled_vehicle_repository.set_transportation_buffer(transportation_buffer)
self.logger.info(f"Use transportation buffer of {transportation_buffer} for reporting statistics.")
def generate(self):
vehicles_of_types = self.large_scheduled_vehicle_repository.load_all_vehicles()
self._generate_free_capacity_statistics(vehicles_of_types)
def _generate_free_capacity_statistics(self, vehicles_of_types):
buffer_factor = 1 + self.large_scheduled_vehicle_repository.transportation_buffer
free_capacities_inbound = {}
free_capacities_outbound = {}
vehicle_type: ModeOfTransport
vehicles: List[AbstractLargeScheduledVehicle]
for vehicle_type, vehicles in vehicles_of_types.items():
for vehicle in vehicles:
large_scheduled_vehicle: LargeScheduledVehicle = vehicle.large_scheduled_vehicle
free_capacity_inbound = self.large_scheduled_vehicle_repository.get_free_capacity_for_inbound_journey(
vehicle
)
free_capacity_outbound = self.large_scheduled_vehicle_repository.get_free_capacity_for_outbound_journey(
vehicle
)
assert free_capacity_inbound <= large_scheduled_vehicle.capacity_in_teu, \
f"A vehicle can only load at maximum its capacity, but for vehicle {vehicle} the free capacity " \
f"of {free_capacity_inbound} for inbound does not match with the capacity of the vehicle of " \
f"{large_scheduled_vehicle.capacity_in_teu} TEU"
assert free_capacity_outbound <= large_scheduled_vehicle.capacity_in_teu, \
f"A vehicle can only load at maximum its capacity, but for vehicle {vehicle} the free capacity " \
f"of {free_capacity_outbound} for outbound does not match with the capacity of the vehicle of " \
f"{large_scheduled_vehicle.capacity_in_teu} TEU"
assert (free_capacity_inbound <= large_scheduled_vehicle.moved_capacity), \
f"A vehicle must not exceed its moved capacity, but for vehicle {vehicle} the free " \
f"capacity of {free_capacity_inbound} TEU for inbound does not match with the moved capacity " \
f"of {large_scheduled_vehicle.moved_capacity}"
moved_capacity_with_outbound_buffer = (large_scheduled_vehicle.moved_capacity * buffer_factor)
assert (free_capacity_outbound <= moved_capacity_with_outbound_buffer), \
f"A vehicle must not exceed its transportation buffer, but for vehicle {vehicle} the free " \
f"capacity of {free_capacity_outbound} for outbound does not match with the moved capacity " \
f"(including the outbound buffer) of {moved_capacity_with_outbound_buffer}"
free_capacities_inbound[vehicle] = free_capacity_inbound
free_capacities_outbound[vehicle] = free_capacity_outbound
self.free_capacity_inbound_statistics = self.descriptive_statistics(free_capacities_inbound.values())
self.free_capacity_outbound_statistics = self.descriptive_statistics(free_capacities_outbound.values())
@staticmethod
def descriptive_statistics(list_of_values):
mean = statistics.mean(list_of_values)
minimum = min(list_of_values)
maximum = max(list_of_values)
stddev = statistics.stdev(list_of_values) if len(list_of_values) > 1 else 0
return {
"mean": mean,
"minimum": minimum,
"maximum": maximum,
"stddev": stddev
}
def get_text_representation(self):
report = f"""
Free Inbound Capacity Statistics
Minimum: {self.free_capacity_inbound_statistics["minimum"]:.2f}
Maximum: {self.free_capacity_inbound_statistics["maximum"]:.2f}
Mean: {self.free_capacity_inbound_statistics["mean"]:.2f}
Stddev: {self.free_capacity_inbound_statistics["stddev"]:.2f}
(rounding errors might exist)
Free Outbound Capacity Statistics
Minimum: {self.free_capacity_outbound_statistics["minimum"]:.2f}
Maximum: {self.free_capacity_outbound_statistics["maximum"]:.2f}
Mean: {self.free_capacity_outbound_statistics["mean"]:.2f}
Stddev: {self.free_capacity_outbound_statistics["stddev"]:.2f}
(rounding errors might exist)
"""
return report
|
StarcoderdataPython
|
1666180
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
CSS SCRIPT
"""
from collections import OrderedDict
import random
import math
class CssScript:
def __init__(self):
#
# HTML TEMPLATES PARTS
#
self.header = """
<!DOCTYPE html>
<html>
<head>
<title></title>
<style type="">
</style>
</head>
<body>
<div style="position:relative;">
"""
self.footer = """
</div>
</body>
</html>
"""
# TODO : add paramters to funcs verify '3'.isdigit()
self.output = open('C:/Users/Dell/Desktop/entry/git/css-script-candy/css_script/output.html', 'w+')
#
# SHAPE GLOBAL PROPERTIES
#
self.bg_col = 'black'
self.rotation = 0
self.translation = 0
self.X = ''
self.Y = ''
#
# REGISTRIES
#
self.funcs = {0:1}
self.vars = {0:1}
self.colors = {}
#
# FLAGS
#
self.funcpass = False
self.looppass = False
#
# ONGOING DATA
#
self.ongoing_func = ''
self.ongoing_loop_times = ''
#
# ONGOING BODIES
#
self.glass = ''
self.loopbody = ''
#
# COUNTS
#
self.line = 1
#
# OUTPUT
#
def out(self, x):
self.output.write(x)
#
# BASE ELEMENT
#
def elem(self, x, y, styles, classes='', content=''):
return ' <div style="position:absolute;left:{}px;top:{}px;transform:rotate({}deg);\
{}" class=" {}">{}</div>\n'.format(x, y, self.rotation, styles, classes, content)
#
# DERIVED ELEMENTS
#
#
# BASICS
#
def circle(self, x, y, width, height):
self.out(
self.elem(x, y, 'width:{}px;height:{}px;background-color:{};border-radius:50%;'.format(width,
height, self.bg_col))
)
def rect(self, x, y, width, height):
self.out(
self.elem(x, y, 'width:{}px;height:{}px;background-color:{};'.format(width,
height, self.bg_col))
)
def roundRect(self, x, y, width, height, br, bl, tr, tl):
self.out(
self.elem(x, y, 'width:{}px;height:{}px;background-color:{};\
border-bottom-right-radius:{}px;border-bottom-left-radius:{}px;\
border-top-right-radius:{}px;border-top-left-radius:{}px;\
'.format(width,
height, self.bg_col, br, bl, tr, tl))
)
#
# SPECIFIC SHAPES - ARROWS
#
def arrowUp(self, x, y, width, height):
self.out(
self.elem(x, y, 'width: 0;height: 0;border-left: {}px solid \
transparent;border-right: {}px solid transparent;border-bottom: {}px \
solid {};'.format(int(width)/2, int(width)/2,height, self.bg_col))
)
def arrowDown(self, x, y, width, height):
self.out(
self.elem(x, y, 'width: 0;height: 0;border-left: {}px solid \
transparent;border-right: {}px solid transparent;border-top: {}px \
solid {};'.format(int(width)/2, int(width)/2,height, self.bg_col))
)
def arrowRight(self, x, y, width, height):
self.out(
self.elem(x, y, 'width: 0;height: 0;border-top: {}px solid \
transparent;border-bottom: {}px solid transparent;border-left: {}px \
solid {};'.format(int(width)/2, int(width)/2,height, self.bg_col))
)
def arrowLeft(self, x, y, width, height):
self.out(
self.elem(x, y, 'width: 0;height: 0;border-top: {}px solid \
transparent;border-bottom: {}px solid transparent;border-right: {}px \
solid {};'.format(int(width)/2, int(width)/2,height, self.bg_col))
)
#
# SPECIFIC SHAPES - OTHERS
#
#
# TEXT
#
def text(self, x, y, text):
self.out(
self.elem(x, y, 'color:{};'.format(self.bg_col), content=text)
)
#
# GLOBAL PROPERTIES
#
def fill(self, x):
self.bg_col = x
def rotate(self, x):
self.rotation = x
#
# RESOLVE DIGIT VALUE
#
def resolve_digit(self, registry, value):
'''
checks if value is digit or if variable, fetches it's value
else returns none
'''
if value.isdigit():
return value
elif '|' in value:
wds = value.split('|')
if wds[0] == 'rand' and len(wds) == 3:
l = int(self.resolve_digit(registry, wds[1]))
h = int(self.resolve_digit(registry, wds[2]))
return random.randint(l, h)
elif wds[0] == 'sine' and len(wds) == 2:
val = int(self.resolve_digit(registry, wds[1]))
return math.sin(val)
elif wds[0] == 'cos' and len(wds) == 2:
val = int(self.resolve_digit(registry, wds[1]))
return math.cos(val)
elif wds[0] == 'abssine' and len(wds) == 2:
val = int(self.resolve_digit(registry, wds[1]))
return abs(math.sin(val))
else:
try:
return registry[value]
except KeyError:
return None
def resolve_col(self, registry, value):
if '|' in value:
wds = list(filter((lambda x:x!=''), value.split('|')))
if wds[0] == 'randCol' and len(wds) == 1:
return 'rgb({},{},{})'.format(random.randint(0,255),
random.randint(0,255), random.randint(0,255)
)
else:
return value
#
# INDEPENDENT KEYWORD PARSE
#
def parse(self, registry, command, params):
# TODO : add registry parameter to use in both local and func context
params = params.strip()
if command == 'circle':
params = params.split(' ')
if len(params) == 4:
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
sizex = self.resolve_digit(registry, params[2])
sizey = self.resolve_digit(registry, params[3])
self.circle(x, y, sizex, sizey)
elif command == 'rect':
params = list(filter((lambda x:x!=''), params.split(' ')))
if len(params) == 4:
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
sizex = self.resolve_digit(registry, params[2])
sizey = self.resolve_digit(registry, params[3])
self.rect(x, y, sizex, sizey)
elif command == 'roundRect':
params = list(filter((lambda x:x!=''), params.split(' ')))
if len(params) == 8:
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
sizex = self.resolve_digit(registry, params[2])
sizey = self.resolve_digit(registry, params[3])
br = self.resolve_digit(registry, params[4])
bl = self.resolve_digit(registry, params[5])
tr = self.resolve_digit(registry, params[6])
tl = self.resolve_digit(registry, params[7])
self.roundRect(x, y, sizex, sizey, br, bl, tr, tl)
elif command == 'arrowUp':
params = list(filter((lambda x:x!=''), params.split(' ')))
if len(params) == 4:
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
sizex = self.resolve_digit(registry, params[2])
sizey = self.resolve_digit(registry, params[3])
self.arrowUp(x, y, sizex, sizey)
elif command == 'arrowDown':
params = list(filter((lambda x:x!=''), params.split(' ')))
if len(params) == 4:
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
sizex = self.resolve_digit(registry, params[2])
sizey = self.resolve_digit(registry, params[3])
self.arrowDown(x, y, sizex, sizey)
elif command == 'arrowRight':
params = list(filter((lambda x:x!=''), params.split(' ')))
if len(params) == 4:
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
sizex = self.resolve_digit(registry, params[2])
sizey = self.resolve_digit(registry, params[3])
self.arrowRight(x, y, sizex, sizey)
elif command == 'arrowLeft':
params = list(filter((lambda x:x!=''), params.split(' ')))
if len(params) == 4:
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
sizex = self.resolve_digit(registry, params[2])
sizey = self.resolve_digit(registry, params[3])
self.arrowLeft(x, y, sizex, sizey)
elif command == 'fill':
color = self.resolve_col(registry, params)
self.fill(color)
elif command == 'rotate':
self.rotate(self.resolve_digit(registry, params))
elif command == 'text':
params = params.split(' ')
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
self.text(x, y, ' '.join(params[2:]))
#
# VAL MODIFS
#
elif command == 'set':
params = list(filter((lambda x:x!=''), params.split(' ')))
if len(params) == 2 and params[0].isdigit() == False:
var_value = self.resolve_digit(registry, params[1])
var_name = params[0]
self.vars[var_name] = var_value
# print(self.vars)
else:
print('wrong assignment format')
# TODO : ADD COLOUR VARS
elif command == 'do':
params = list(filter((lambda x:x!=''), params.split(' ')))
op = params[0]
val = self.resolve_digit(registry, params[1])
var = params[3]
if params[2] == 'to':
if op == '+':
registry[var] = float(registry[var]) + float(val)
if op == '-':
registry[var] = float(registry[var]) - float(val)
if op == '*':
registry[var] = float(registry[var]) * float(val)
if op == '/':
registry[var] = float(registry[var]) // float(val)
#
# DEBUG / SHOW
#
# SHOW shows on screen
#
elif command == 'show':
params = list(filter((lambda x:x!=''), params.split(' ')))
try:
x = self.resolve_digit(registry, params[0])
y = self.resolve_digit(registry, params[1])
self.text(x, y, registry[params[2]])
except KeyError:
print('key undefined')
#
# DEBUG shows in console
#
elif command == 'debug':
params = list(filter((lambda x:x!=''), params.split(' ')))
try:
value = self.resolve_digit(registry, params[0])
print(value)
except KeyError:
print('key undefined')
#
# NON-WORD KEYWORD PARSE AND FLAGS
#
def passover(self, source):
for l in source.readlines():
# print(self.vars)
if self.funcpass == True: # before to not include + line
self.glass += l
if self.looppass == True: # before to not include + line
self.loopbody += l
if l == '\n':
if self.funcpass == True:
self.funcs[self.ongoing_func]['body'] = self.glass
# self.funcs[---] = {'params':{}, 'body':{}}
self.glass = ''
self.funcpass = False
self.ongoing_func = ''
print(self.glass)
if self.looppass == True:
for i in range(int(self.ongoing_loop_times)):
for line in self.loopbody.strip('\n').split('\n'):
x = line.strip('\n').split(' ', 1)
self.parse( self.vars, x[0], x[1])
self.ongoing_loop_times = ''
self.looppass = False
self.loopbody = ''
continue
elif l[0] == '#':
continue
elif l[0] == '+':
wds = l.strip('\n').split(' ')
self.funcpass = True
fname = wds[1].strip()
self.ongoing_func = fname
if len(wds) == 2:
self.funcs[fname] = {'params':None, 'body':''}
# print(fname.replace('\n','#'))
elif len(wds) > 2:
param_names = wds[2:]
params = OrderedDict()
for p in param_names:
params[p] = None
self.funcs[fname] = {'params':params, 'body':''}
elif l.split(' ', 1)[0] == 'call':
# TODO add resolve-digits to parameters of funcs
wds = l.strip('\n').split(' ')
fname = wds[1]
# TODO : ADD A READ STR FUNC INSTEAD OF READLINE
try:
if self.funcs[fname]['params'] == None:
for line in self.funcs[fname]['body'].strip('\n').split('\n'):
x = line.strip('\n').split(' ', 1)
self.parse( self.vars, x[0], x[1])
else: # TODO resolve digit for parameters
params = wds[2:]
i = 0
for key in self.funcs[fname]['params']:
self.funcs[fname]['params'][key] = params[i]
i += 1
for line in self.funcs[fname]['body'].strip('\n').split('\n'):
x = line.strip('\n').split(' ', 1)
self.parse( self.funcs[fname]['params'], x[0], x[1])
except KeyError:
print('no such func exists')
elif l[:4] == 'loop':
wds = l.strip('\n').split(' ')
self.looppass = True
self.ongoing_loop_times = wds[1]
if self.funcpass == False:
line = l.strip('\n').split(' ', 1)
command = line[0]
params = line[1]
self.parse(self.vars, command, params)
#
# ABSTRACTED METHODS
#
def init(self):
self.output.write(self.header)
print('begun')
def end(self):
self.output.write(self.footer)
self.output.flush()
self.output.close()
print('ended')
#
# CALLABLE METHOD
#
def exec(self, file_path):
self.source = open(file_path, 'r')
self.init()
self.passover(self.source)
self.end()
if __name__ == '__main__':
script = CssScript()
script.exec('file.candy')
|
StarcoderdataPython
|
3378322
|
# Copyright (c) 2014 <NAME>.
# See LICENSE for details.
"""
Filesystem code used by all operating systems, including Windows as
Windows has its layer of POSIX compatibility.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from contextlib import contextmanager
from datetime import date
import errno
import os
import posixpath
import re
import shutil
import stat
import struct
import sys
import time
import unicodedata
try:
# On some systems (AIX/Windows) the public scandir module will fail to
# load the C based scandir function. We force it here by direct import.
from _scandir import scandir
except ImportError:
from scandir import scandir_python as scandir
from six import text_type
from zope.interface import implements
from chevah.compat.exceptions import (
ChangeUserException,
CompatError,
CompatException,
)
from chevah.compat.interfaces import IFileAttributes
from chevah.compat.helpers import _, NoOpContext
_DEFAULT_FOLDER_MODE = 0o777
_DEFAULT_FILE_MODE = 0o600
class PosixFilesystemBase(object):
"""
Base implementation of ILocalFilesystem for
local Posix filesystems.
It handles `raw` access to the filesystem.
Classed using this base should implement path and segment handling
"""
OPEN_READ_ONLY = os.O_RDONLY
OPEN_WRITE_ONLY = os.O_WRONLY
OPEN_READ_WRITE = os.O_RDWR
OPEN_CREATE = os.O_CREAT
OPEN_APPEND = os.O_APPEND
OPEN_EXCLUSIVE = os.O_EXCL
OPEN_TRUNCATE = os.O_TRUNC
INTERNAL_ENCODING = u'utf-8'
# Windows specific constants, placed here to help with unit testing
# of Windows specific data.
#
# Not defined in winnt.h
# http://msdn.microsoft.com/en-us/library/windows/
# desktop/aa365511(v=vs.85).aspx
IO_REPARSE_TAG_SYMLINK = 0xA000000C
def __init__(self, avatar):
self._avatar = avatar
self._root_path = self._getRootPath()
self._validateVirtualFolders()
@property
def avatar(self):
return self._avatar
@property
def installation_segments(self):
"""
See `ILocalFilesystem`.
We use 'os' module to find where the python is installed, and from
there we find the base folder.
* Windows - INSTALL_FOLDER/ lib/ Lib/ os.py
* Unix - INSTALL_FOLDER/ lib/ python2.X/ os.py
"""
path = os.path.dirname(os.__file__).decode('utf-8')
segments = self.getSegmentsFromRealPath(path)
return segments[:-2]
def _impersonateUser(self):
"""
Returns an impersonation context for current user.
"""
if not self._avatar:
return NoOpContext()
try:
return self._avatar.getImpersonationContext()
except ChangeUserException:
raise CompatError(
1006,
_(u'Could not switch process to local account "%s".' % (
self._avatar.name)),
)
def _pathSplitRecursive(self, path):
"""
Recursive split of a path.
"""
if os.path.sep == '\\':
# We are on Windows.
# Also handle Unix separators and escape the regex.
separators = r'[\\/]'
else:
separators = '[/]'
segments = re.split(separators, path)
if len(segments) > 0:
segments[0] = segments[0].strip(':')
return [segment for segment in segments if segment != '']
@classmethod
def getEncodedPath(cls, path):
'''Return the encoded representation of the path, use in the lower
lever API for accessing the filesystem.'''
return path.encode(u'utf-8')
@property
def home_segments(self):
'''See `ILocalFilesystem`.'''
if not self._avatar:
return self._pathSplitRecursive(text_type(os.path.expanduser('~')))
if self._avatar.root_folder_path is None:
return self._pathSplitRecursive(self._avatar.home_folder_path)
home_lower = self._avatar.home_folder_path.lower()
root_lower = self._avatar.root_folder_path.rstrip('/\\').lower()
# Check that we have a valid home folder.
if not home_lower.startswith(root_lower):
raise CompatError(
20019,
_(
'User home folder "%s" is not withing the root folder '
'"%s".' % (
self._avatar.home_folder_path,
self._avatar.root_folder_path),
),
)
path = self._avatar.home_folder_path[len(root_lower):]
return self._pathSplitRecursive(path)
def getPath(self, segments):
"""
See `ILocalFilesystem`.
"""
if segments == []:
return u'/'
normalized_path = posixpath.normpath(u'/'.join(segments))
return u'/' + u'/'.join(self._pathSplitRecursive(normalized_path))
def getSegments(self, path):
"""
See `ILocalFilesystem`.
Get segment is the place where segments are created and we make sure
they are in the internal encoding.
"""
if path is None or path == '' or path == '.':
return self.home_segments
if not isinstance(path, text_type):
path = path.decode(self.INTERNAL_ENCODING)
if not path.startswith('/'):
# Resolve relative path.
home_path = u'/' + u'/'.join(self.home_segments) + u'/'
path = home_path + path
normalize_path = posixpath.normpath(path)
return self._pathSplitRecursive(normalize_path)
@property
def temp_segments(self):
'''See `ILocalFilesystem`.'''
import tempfile
temporary_folder = tempfile.gettempdir()
return self.getSegmentsFromRealPath(temporary_folder)
def getRealPathFromSegments(self, segments, include_virtual=True):
'''See `ILocalFilesystem`.'''
raise NotImplementedError('You must implement this method.')
def _areEqual(self, first, second):
"""
Return true if first and second segments are for the same path.
"""
if first == second:
return True
from chevah.compat import process_capabilities
if process_capabilities.os_name not in ['windows', 'osx']:
# On Linux and Unix we do strict case.
return False
# On Windows paths are case insensitive, so we compare based on
# lowercase.
# But first try with the same case, in case we have strange
first = [s.lower() for s in first]
second = [s.lower() for s in second]
return first == second
def _validateVirtualFolders(self):
"""
Check that virtual folders don't overlap with existing real folders.
"""
for virtual_segments, real_path in self._avatar.virtual_folders:
target_segments = virtual_segments[:]
# Check for the virtual segments, but also for any ancestor.
while target_segments:
inside_path = os.path.join(self._root_path, *target_segments)
if not os.path.lexists(self.getEncodedPath(inside_path)):
target_segments.pop()
continue
virtual_path = '/' + '/'.join(virtual_segments)
raise CompatError(
1005,
'Virtual path "%s" overlaps an existing file or '
'folder at "%s".' % (virtual_path, inside_path,))
def _getVirtualPathFromSegments(self, segments, include_virtual):
"""
Return the virtual path associated with `segments`
Return None if not found.
Raise CompatError when `include_virtual` is False and the segments
are for a virtual path (root or part of it).
"""
segments_length = len(segments)
for virtual_segments, real_path in self._avatar.virtual_folders:
if segments_length < len(virtual_segments):
# Not the virtual folder of a descended of it.
if (
not include_virtual and
self._areEqual(
segments, virtual_segments[:segments_length])
):
# But this is a parent of a virtual segment and we
# don't allow that.
raise CompatError(
1007, 'Modifying a virtual path is not allowed.')
continue
if (
not include_virtual and
self._areEqual(segments, virtual_segments)
):
# This is a virtual root, but we don't allow it.
raise CompatError(
1007, 'Modifying a virtual path is not allowed.')
base_segments = segments[:len(virtual_segments)]
if not self._areEqual(base_segments, virtual_segments):
# Base does not match
continue
tail_segments = segments[len(virtual_segments):]
return os.path.join(real_path, *tail_segments)
# At this point we don't have a match for a virtual folder, but
# we should check that ancestors are not virtual as we don't
# want to create files in the middle of a virtual path.
parent = segments[:-1]
if not include_virtual and parent:
# Make sure parent is not a virtual path.
self._getVirtualPathFromSegments(parent, include_virtual=False)
# No virtual path found for segments.
return None
def _isVirtualPath(self, segments):
"""
Return True if segments are a part or a full virtual folder.
Return False when they are a descendant of a virtual folder.
"""
if not segments:
return False
partial_virtual = False
segments_length = len(segments)
# Part of virtual paths, virtually exists.
for virtual_segments, real_path in self._avatar.virtual_folders:
# Any segment which does start the same way as a virtual path is
# normal path
if not self._areEqual(segments[0:1], virtual_segments[0:1]):
# No match
continue
if self._areEqual(segments, virtual_segments[:segments_length]):
# This is the root of a virtual path or a sub-part of it.
return True
# If it looks like a virtual path, but is not a full match, then
# this is a broken path.
partial_virtual = True
if not self._areEqual(
virtual_segments, segments[:len(virtual_segments)]):
# This is not a mapping for this virtual path.
continue
# Segments are the direct
partial_virtual = False
if segments_length > len(virtual_segments):
# Is longer than the virtual path so it can't be part of the
# full virtual path.
return False
# This is a virtual path which has a mapping.
return True
if partial_virtual:
raise CompatError(1004, 'Broken virtual path.')
return False
def getSegmentsFromRealPath(self, path):
'''See `ILocalFilesystem`.'''
raise NotImplementedError('You must implement this method.')
def getAbsoluteRealPath(self, path):
'''See `ILocalFilesystem`.'''
absolute_path = os.path.abspath(self.getEncodedPath(path))
if not isinstance(absolute_path, text_type):
absolute_path = absolute_path.decode(self.INTERNAL_ENCODING)
return absolute_path
def isFolder(self, segments):
'''See `ILocalFilesystem`.'''
try:
return self.getAttributes(segments).is_folder
except OSError:
return False
def isFile(self, segments):
'''See `ILocalFilesystem`.'''
try:
return self.getAttributes(segments).is_file
except OSError:
return False
def isLink(self, segments):
"""
See `ILocalFilesystem`.
"""
raise NotImplementedError()
def exists(self, segments):
'''See `ILocalFilesystem`.'''
try:
if self._isVirtualPath(segments):
return True
else:
"""
Let the normal code to check the existence.
"""
except CompatError:
# A broken virtual path does not exits.
return False
path = self.getRealPathFromSegments(segments)
path_encoded = self.getEncodedPath(path)
with self._impersonateUser():
return os.path.lexists(path_encoded)
def createFolder(self, segments, recursive=False):
'''See `ILocalFilesystem`.'''
path = self.getRealPathFromSegments(segments, include_virtual=False)
path_encoded = self.getEncodedPath(path)
with self._impersonateUser():
if recursive:
return os.makedirs(path_encoded, _DEFAULT_FOLDER_MODE)
else:
return os.mkdir(path_encoded, _DEFAULT_FOLDER_MODE)
def deleteFolder(self, segments, recursive=True):
"""
See `ILocalFilesystem`.
"""
raise NotImplementedError('deleteFolder not implemented.')
def _rmtree(self, path):
"""
Remove whole directory tree.
"""
def on_error(func, path, exception_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error on Windows (ex,
read only file) it attempts to add write permission and then
retries.
If the error is for another reason it re-raises the error.
"""
if os.name != 'nt':
raise
if (
func in (os.rmdir, os.remove) and
exception_info[1].errno == errno.EACCES
):
os.chmod(
path,
stat.S_IWUSR | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO,
)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=on_error)
def deleteFile(self, segments, ignore_errors=False):
"""
See: `ILocalFilesystem`.
"""
path = self.getRealPathFromSegments(segments, include_virtual=False)
path_encoded = self.getEncodedPath(path)
with self._impersonateUser():
try:
try:
return os.unlink(path_encoded)
except OSError as error:
# This is done to allow lazy initialization of this module.
from chevah.compat import process_capabilities
# On Unix (AIX, Solaris) when segments is a folder,
# we get EPERM, so we force a EISDIR.
# For now, Unix is everything else, other than Linux.
if process_capabilities.os_name != 'linux':
self._requireFile(segments)
# On Windows we might get an permissions error when
# file is ready-only.
if (
process_capabilities.os_name == 'windows' and
error.errno == errno.EACCES
):
os.chmod(path_encoded, stat.S_IWRITE)
return os.unlink(path_encoded)
raise error
except Exception:
if ignore_errors:
return
raise
def rename(self, from_segments, to_segments):
'''See `ILocalFilesystem`.'''
from_path = self.getRealPathFromSegments(
from_segments, include_virtual=False)
to_path = self.getRealPathFromSegments(
to_segments, include_virtual=False)
from_path_encoded = self.getEncodedPath(from_path)
to_path_encoded = self.getEncodedPath(to_path)
with self._impersonateUser():
return os.rename(from_path_encoded, to_path_encoded)
@contextmanager
def _convertToOSError(self, path):
"""
Convert the errors raised to OSError... if possible.
"""
try:
yield
except EnvironmentError as error:
if not error.filename:
error.filename = self.getEncodedPath(path)
raise OSError(
error.errno,
error.strerror,
error.filename,
)
def _requireFile(self, segments):
"""
Raise an OSError when segments is not a file.
"""
path = self.getRealPathFromSegments(segments)
path_encoded = path.encode('utf-8')
if self.isFolder(segments):
raise OSError(
errno.EISDIR,
'Is a directory: %s' % path_encoded,
path_encoded,
)
def openFile(self, segments, flags, mode):
'''See `ILocalFilesystem`.'''
path = self.getRealPathFromSegments(segments, include_virtual=False)
path_encoded = self.getEncodedPath(path)
self._requireFile(segments)
with self._convertToOSError(path), self._impersonateUser():
return os.open(path_encoded, flags, mode)
def openFileForReading(self, segments):
'''See `ILocalFilesystem`.'''
path = self.getRealPathFromSegments(segments, include_virtual=False)
path_encoded = self.getEncodedPath(path)
self._requireFile(segments)
with self._convertToOSError(path), self._impersonateUser():
fd = os.open(
path_encoded,
self.OPEN_READ_ONLY,
)
return os.fdopen(fd, 'rb')
def openFileForWriting(self, segments, mode=_DEFAULT_FILE_MODE):
"""
See `ILocalFilesystem`.
For security reasons, the file is only opened with read/write for
owner.
"""
path = self.getRealPathFromSegments(segments, include_virtual=False)
path_encoded = self.getEncodedPath(path)
self._requireFile(segments)
with self._convertToOSError(path), self._impersonateUser():
fd = os.open(
path_encoded,
(self.OPEN_WRITE_ONLY | self.OPEN_CREATE |
self.OPEN_TRUNCATE),
mode)
return os.fdopen(fd, 'wb')
def openFileForAppending(self, segments, mode=_DEFAULT_FILE_MODE):
'''See `ILocalFilesystem`.'''
def fail_on_read():
raise AssertionError(
'File opened for appending. Read is not allowed.')
path = self.getRealPathFromSegments(segments, include_virtual=False)
path_encoded = self.getEncodedPath(path)
self._requireFile(segments)
with self._convertToOSError(path), self._impersonateUser():
fd = os.open(
path_encoded,
(self.OPEN_APPEND | self.OPEN_CREATE |
self.OPEN_WRITE_ONLY),
mode)
new_file = os.fdopen(fd, 'ab')
return new_file
def getFileSize(self, segments):
'''See `ILocalFilesystem`.'''
if self._isVirtualPath(segments):
# Virtual path are non-existent in real filesystem but we return
# a value instead of file not found.
return 0
path = self.getRealPathFromSegments(segments)
path_encoded = self.getEncodedPath(path)
with self._impersonateUser():
return os.path.getsize(path_encoded)
def _getVirtualMembers(self, segments):
"""
Return a list with virtual folders which are children of `segments`.
"""
result = []
segments_length = len(segments)
for virtual_segments, real_path in self._avatar.virtual_folders:
if segments_length >= len(virtual_segments):
# Not something that might look like the parent of a
# virtual folder.
continue
if not self._areEqual(
virtual_segments[:segments_length], segments):
continue
child_segments = virtual_segments[segments_length:]
result.append(child_segments[0])
# Reduce duplicates and convert to attributes..
return [
self._getPlaceholderAttributes(segments + [m])
for m in set(result)
]
def getFolderContent(self, segments):
"""
See `ILocalFilesystem`.
"""
result = [m.name for m in self._getVirtualMembers(segments)]
if segments and result:
# We only support mixing virtual folder names with real names
# for the root folder.
# For all the other paths, we ignore the real folders if they
# overlay a virtual path.
return result
path = self.getRealPathFromSegments(segments)
path_encoded = self.getEncodedPath(path)
try:
with self._impersonateUser():
for entry in os.listdir(path_encoded):
name = self._decodeFilename(entry)
if name in result:
continue
result.append(name)
except Exception as error:
if not result:
raise error
return result
def iterateFolderContent(self, segments):
"""
See `ILocalFilesystem`.
"""
path = self.getRealPathFromSegments(segments)
path_encoded = self.getEncodedPath(path)
virtual_members = self._getVirtualMembers(segments)
if segments and virtual_members:
# We only support mixing virtual folder names with real names
# for the root folder.
# For all the other paths, we ignore the real folders if they
# overlay a virtual path.
return iter(virtual_members)
# We start with possible virtual folders as they should shadow the
# real folders.
firsts = virtual_members
try:
with self._impersonateUser():
folder_iterator = scandir(path_encoded)
# On Windows we need to iterate over the first element to get the
# errors.
# Otherwise just by opening the directory, we don't get any errors.
# This is why we try to extract the first element, and yield it
# later.
try:
first_member = next(folder_iterator)
except StopIteration:
# The folder is empty so just return an iterator with possible
# virtual members.
return iter(virtual_members)
real_first_attributes = self._dirEntryToFileAttributes(
first_member)
first_names = [m.name for m in firsts]
if real_first_attributes.name not in first_names:
firsts.append(real_first_attributes)
except Exception as error:
# We fail to list the actual folder.
if not virtual_members:
# Since there are no virtual folder, we just raise the error.
raise error
# We have virtual folders.
# No direct listing.
folder_iterator = iter([])
return self._iterateScandir(set(firsts), folder_iterator)
def _iterateScandir(self, firsts, folder_iterator):
"""
This generator wrapper needs to be delegated to this method as
otherwise we get a GeneratorExit error.
`firsts` is a list of FileAttributes.
`folder_iterators` is the iterator resulted from scandir.
"""
first_names = []
for member in firsts:
first_names.append(member.name)
yield member
for entry in folder_iterator:
attributes = self._dirEntryToFileAttributes(entry)
if attributes.name in first_names:
# Make sure we don't add duplicate from previous
# virtual folders.
continue
yield attributes
def _dirEntryToFileAttributes(self, entry):
"""
Convert the result from scandir to FileAttributes.
"""
name = self._decodeFilename(entry.name)
path = self._decodeFilename(entry.path)
with self._impersonateUser():
stats = entry.stat(follow_symlinks=False)
is_link = entry.is_symlink()
mode = stats.st_mode
is_directory = bool(stat.S_ISDIR(mode))
if is_directory and sys.platform.startswith('aix'):
# On AIX mode contains an extra most significant bit
# which we don't use.
mode = mode & 0o077777
# We use the INODE from stats, as on Windows getting INODE from
# scandir result is slow.
inode = stats.st_ino
modified = stats.st_mtime
if os.name == 'nt':
# On Windows, scandir gets float precision while
# getAttributes only integer.
modified = int(modified)
return FileAttributes(
name=name,
path=path,
size=stats.st_size,
is_file=bool(stat.S_ISREG(mode)),
is_folder=is_directory,
is_link=is_link,
modified=modified,
mode=mode,
hardlinks=stats.st_nlink,
uid=stats.st_uid,
gid=stats.st_gid,
node_id=inode,
)
def _decodeFilename(self, name):
"""
Return the Unicode representation of file from `name`.
`name` is in the encoded format stored on the filesystem.
"""
# This is done to allow lazy initialization of process_capabilities.
from chevah.compat import process_capabilities
if not isinstance(name, text_type):
name = name.decode(self.INTERNAL_ENCODING)
# OSX HFS+ store file as Unicode, but in normalized format.
# On OSX we might also read files from other filesystems, not only
# HFS+, but we are lucky here as normalize will not raise errors
# if input is already normalized.
if process_capabilities.os_name == 'osx':
name = unicodedata.normalize('NFC', name)
return name
def getAttributes(self, segments):
"""
See `ILocalFilesystem`.
"""
if self._isVirtualPath(segments):
return self._getPlaceholderAttributes(segments)
stats = self.getStatus(segments)
mode = stats.st_mode
is_directory = bool(stat.S_ISDIR(mode))
if is_directory and sys.platform.startswith('aix'):
# On AIX mode contains an extra most significant bit
# which we don't use.
mode = mode & 0o077777
try:
name = segments[-1]
except Exception:
name = None
path = self.getRealPathFromSegments(segments)
return FileAttributes(
name=name,
path=path,
size=stats.st_size,
is_file=bool(stat.S_ISREG(mode)),
is_folder=is_directory,
is_link=self.isLink(segments),
modified=stats.st_mtime,
mode=mode,
hardlinks=stats.st_nlink,
uid=stats.st_uid,
gid=stats.st_gid,
node_id=stats.st_ino,
)
def _getPlaceholderAttributes(self, segments):
"""
Return the attributes which can be used for the case when a real
attribute don't exists for `segments`.
"""
modified = time.mktime((
date.today().year,
1,
1,
0,
0,
0,
0,
0,
-1,
))
return FileAttributes(
name=segments[-1],
path=self.getRealPathFromSegments(segments),
size=0,
is_file=False,
is_folder=True,
is_link=False,
modified=modified,
mode=0o40555,
hardlinks=1,
uid=1,
gid=1,
node_id=None,
)
def _getPlaceholderStatus(self):
"""
Return a placeholder status result.
"""
modified = time.mktime((
date.today().year,
1,
1,
0,
0,
0,
0,
0,
-1,
))
return os.stat_result([
0o40555, 0, 0, 0, 1, 1, 0, 1, modified, 0])
def setAttributes(self, segments, attributes):
'''See `ILocalFilesystem`.'''
path = self.getRealPathFromSegments(segments, include_virtual=False)
path_encoded = self.getEncodedPath(path)
with self._impersonateUser():
if 'uid' in attributes and 'gid' in attributes:
os.chown(path_encoded, attributes['uid'], attributes['gid'])
if 'mode' in attributes:
os.chmod(path_encoded, attributes['mode'])
if 'atime' in attributes and 'mtime' in attributes:
os.utime(
path_encoded, (attributes['atime'], attributes['mtime']))
def touch(self, segments):
"""
See: ILocalFilesystem.
"""
path = self.getRealPathFromSegments(segments, include_virtual=False)
path_encoded = self.getEncodedPath(path)
with self._impersonateUser():
with open(path_encoded, 'a'):
os.utime(path_encoded, None)
def copyFile(self, source_segments, destination_segments, overwrite=False):
"""
See: ILocalFilesystem.
"""
if self.isFolder(destination_segments):
destination_segments = destination_segments[:]
destination_segments.append(source_segments[-1])
destination_path = self.getRealPathFromSegments(
destination_segments, include_virtual=False)
destination_path_encoded = self.getEncodedPath(destination_path)
if not overwrite and self.exists(destination_segments):
raise OSError(
errno.EEXIST, 'Destination exists', destination_path_encoded)
source_path = self.getRealPathFromSegments(
source_segments, include_virtual=False)
source_path_encoded = self.getEncodedPath(source_path)
with self._impersonateUser():
shutil.copyfile(
source_path_encoded, destination_path_encoded)
def setGroup(self, segments, group, permissions=None):
'''Informational method for not using setGroup.'''
raise AssertionError(u'Use addGroup for setting a group.')
def raiseFailedToAddGroup(self, group, path, message=u''):
"""
Helper for raising the exception from a single place.
"""
raise CompatError(
1017,
_(u'Failed to add group "%s" for "%s". %s' % (
group, path, message)),
)
def raiseFailedToSetOwner(self, owner, path, message=u''):
"""
Helper for raising the exception from a single place.
"""
raise CompatError(
1016,
_(u'Failed to set owner to "%s" for "%s". %s' % (
owner, path, message)),
)
def _checkChildPath(self, root, child):
"""
Check that child path is inside root path.
"""
child_strip = self.getAbsoluteRealPath(child)
root_strip = self.getAbsoluteRealPath(root)
if not child_strip.startswith(root_strip):
raise CompatError(
1018, u'Path "%s" is outside of locked folder "%s"' % (
child, root))
def _parseReparseData(self, raw_reparse_data):
"""
Parse reparse buffer.
Return a dict in format:
{
'tag': TAG,
'length': LENGTH,
'data': actual_payload_as_byte_string,
...
'optional_struct_member_1': VALUE_FOR_STRUCT_MEMBER,
...
}
When reparse data contains an unknown tag, it will parse the tag
and length headers and put everything else in data.
"""
# Size of our types.
SIZE_ULONG = 4 # sizeof(ULONG)
SIZE_USHORT = 2 # sizeof(USHORT)
HEADER_SIZE = 20
# buffer structure:
#
# typedef struct _REPARSE_DATA_BUFFER {
# ULONG ReparseTag;
# USHORT ReparseDataLength;
# USHORT Reserved;
# union {
# struct {
# USHORT SubstituteNameOffset;
# USHORT SubstituteNameLength;
# USHORT PrintNameOffset;
# USHORT PrintNameLength;
# ULONG Flags;
# WCHAR PathBuffer[1];
# } SymbolicLinkReparseBuffer;
# struct {
# USHORT SubstituteNameOffset;
# USHORT SubstituteNameLength;
# USHORT PrintNameOffset;
# USHORT PrintNameLength;
# WCHAR PathBuffer[1];
# } MountPointReparseBuffer;
# struct {
# UCHAR DataBuffer[1];
# } GenericReparseBuffer;
# } DUMMYUNIONNAME;
# } REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER;
# Supported formats for reparse data.
# For now only SymbolicLinkReparseBuffer is supported.
formats = {
# http://msdn.microsoft.com/en-us/library/cc232006.aspx
self.IO_REPARSE_TAG_SYMLINK: [
('substitute_name_offset', SIZE_USHORT),
('substitute_name_length', SIZE_USHORT),
('print_name_offset', SIZE_USHORT),
('print_name_length', SIZE_USHORT),
('flags', SIZE_ULONG),
],
}
if len(raw_reparse_data) < HEADER_SIZE:
raise CompatException('Reparse buffer to small.')
result = {}
# Parse header.
result['tag'] = struct.unpack('<L', raw_reparse_data[:4])[0]
result['length'] = struct.unpack(
'<H', raw_reparse_data[4:6])[0]
# Reserved header member is ignored.
tail = raw_reparse_data[8:]
try:
structure = formats[result['tag']]
except KeyError:
structure = []
for member_name, member_size in structure:
member_data = tail[:member_size]
tail = tail[member_size:]
if member_size == SIZE_USHORT:
result[member_name] = struct.unpack('<H', member_data)[0]
else:
result[member_name] = struct.unpack('<L', member_data)[0]
# result[member_name] = 0
# for byte in member_data:
# result[member_name] += ord(byte)
# Remaining tail is set as data.
result['data'] = tail
return result
def _parseSymbolicLinkReparse(self, symbolic_link_data):
"""
Return a diction with 'name' and 'target' for `symbolic_link_data` as
Unicode strings.
"""
result = {
'name': None,
'target': None,
}
offset = symbolic_link_data['print_name_offset']
ending = offset + symbolic_link_data['print_name_length']
result['name'] = (
symbolic_link_data['data'][offset:ending].decode('utf-16'))
offset = symbolic_link_data['substitute_name_offset']
ending = offset + symbolic_link_data['substitute_name_length']
target_path = (
symbolic_link_data['data'][offset:ending].decode('utf-16'))
# Have no idea why we get this marker, but we convert it to
# long UNC.
if target_path.startswith('\\??\\'):
target_path = '\\\\?' + target_path[3:]
result['target'] = target_path
return result
class FileAttributes(object):
"""
See: IFileAttributes.
"""
implements(IFileAttributes)
def __init__(
self, name, path, size=0,
is_file=False, is_folder=False, is_link=False,
modified=0,
mode=0, hardlinks=1,
uid=None, gid=None,
owner=None, group=None,
node_id=None,
):
self.name = name
self.path = path
self.size = size
self.is_folder = is_folder
self.is_file = is_file
self.is_link = is_link
self.modified = modified
self.mode = mode
self.hardlinks = hardlinks
self.uid = uid
self.gid = gid
self.node_id = node_id
self.owner = owner
self.group = group
def __hash__(self):
return hash((
self.name,
self.path,
self.size,
self.is_folder,
self.is_file,
self.is_link,
self.modified,
self.mode,
self.hardlinks,
self.uid,
self.gid,
self.node_id,
self.owner,
self.group,
))
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.__dict__ == other.__dict__
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return u"%s:%s:%s" % (self.__class__, id(self), self.__dict__)
|
StarcoderdataPython
|
25707
|
<filename>ind3.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import math
if __name__ == '__main__':
a = input('Введите время: ')
t = 0
A = 1
V = int(int(a)/3)
if V == 0:
print('Ошибка')
else:
while t < int(a):
t = t + 3
A *= 2
print('Через ' + a + ' часов будет ' + str(A) + ' амёбы')
|
StarcoderdataPython
|
4829638
|
from pprint import pprint
from configparser import ConfigParser
from azure.mgmt.resource import ResourceManagementClient
from azure.common.credentials import ServicePrincipalCredentials
# These are only imported for Type Hinting and Intellisense.
from azure.mgmt.resource.resources.models import ResourceGroup
from azure.mgmt.resource.resources.models import GenericResourceExpanded
# Initialize the Parser.
config = ConfigParser()
# Read the file.
try:
config.read('config/config.ini')
except:
config.read('configs/config.ini')
# Grab the Azure Credentials needed.
tenant_id = config.get('azure_credentials', 'azure_tenant_id')
client_id = config.get('azure_credentials', 'azure_client_id')
client_secret = config.get('azure_credentials', 'azure_client_secret')
subscription_id = config.get('azure_credentials', 'azure_subscription_id')
# Define the Credentials.
credential = ServicePrincipalCredentials(
tenant=tenant_id,
client_id=client_id,
secret=client_secret
)
# Pass through the credential.
resource_management_client = ResourceManagementClient(
credentials=credential,
subscription_id=subscription_id
)
# Loop through each resource group that falls under the subscription.
for resource_group in resource_management_client.resource_groups.list():
# Redfine this for Type Hinting.
resource_group: ResourceGroup = resource_group
print('')
print(resource_group.id)
print(resource_group.name)
print(resource_group.managed_by)
print('')
pprint(resource_group.as_dict())
|
StarcoderdataPython
|
28720
|
<gh_stars>0
import datetime
import re
from app import db
from bson.objectid import ObjectId
from pymongo import InsertOne, UpdateOne
from pymongo.errors import BulkWriteError
from app.error.factoryInvalid import FactoryInvalid
class Model(object):
def __init__(self, id=None, name=None):
if name is None:
name = self.__class__.__name__.lower()
self.col = db[name]
self.__id = id
def getAll(self, filter={}, limit=10, skip=0):
result = self.col.find(filter) \
.limit(limit) \
.skip(skip)
return list(result)
def count(self, filter={}):
return self.col.count(filter)
def get(self):
return self.col.find_one(Model.makeObjectId(self.__id))
def update(self, data):
if not self.__id:
return FactoryInvalid.responseInvalid(
{'msg': 'Id not setted'},
422)
setUpdatedData = {'$set': data}
result = self.col.update_one(Model.makeObjectId(self.__id), setUpdatedData)
return result.raw_result
def updateMany(self, filters, data):
setUpdatedData = {'$set': data}
result = self.col.update_many(filters, setUpdatedData)
return result.raw_result
def batch_process(self, data):
requests = []
for item in data:
obj = {**Model.makeDateAt(key='updated_at'), **item['data']}
if item['filter']:
args = Model.reservedWordMongo(obj)
cal = UpdateOne(item['filter'], args, upsert=True)
else:
obj = {**Model.makeDateAt(key='created_at'), **obj}
cal = InsertOne(obj)
requests.append(cal)
try:
result = self.col.bulk_write(requests, ordered=False)
except BulkWriteError as bwe:
print(bwe.details)
raise
return result.bulk_api_result
@staticmethod
def makeDateAt(key):
return {key: datetime.datetime.utcnow()}
@staticmethod
def reservedWordMongo(obj):
filter = {'$set': {}}
for key, item in obj.items():
if item is not None:
if re.match(r"\$", key):
filter[key] = item
else:
filter['$set'][key] = item
return filter
@staticmethod
def makeObjectId(id):
if id:
return {'_id': Model.castObjectId(id)}
@staticmethod
def castObjectId(id):
return ObjectId(id)
|
StarcoderdataPython
|
58336
|
<filename>odoo-13.0/addons/sale_quotation_builder/models/res_config_settings.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, api
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
@api.model
def _set_default_sale_order_template_id_if_empty(self):
IrDefault = self.env['ir.default']
if not IrDefault.get('sale.order', 'sale_order_template_id'):
template = self.env.ref('sale_quotation_builder.sale_order_template_default', raise_if_not_found=False)
if template and template.active:
IrDefault.set('sale.order', 'sale_order_template_id', template.id)
|
StarcoderdataPython
|
3330734
|
<gh_stars>10-100
"""
A Simple program to detect the human faces using Haarcascade Classifier
Date : 21 March 2019
Author : <NAME>
"""
import cv2
haar_cascade_face = cv2.CascadeClassifier('/home/shiyaztech/Documents/Applications/OpenCV/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.video = cv2.VideoCapture(0)
self.frameWidth = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))
self.frameHeight = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces_rects = haar_cascade_face.detectMultiScale(image_gray, scaleFactor=1.2, minNeighbors=5);
# Let us print the no. of faces found
#print('Faces found: ', len(faces_rects))
for (x, y, w, h) in faces_rects:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
if len(faces_rects) > 0:
cv2.putText(img = image, text = 'Faces found:' + str(len(faces_rects)), org=(50,50), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, color=(0, 0, 255))
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
|
StarcoderdataPython
|
197688
|
from uuid import uuid4
from unittest import TestCase
from servicelayer.cache import get_redis, make_key
class CacheTest(TestCase):
def test_redis(self):
key = make_key('test', uuid4())
conn = get_redis()
assert not conn.exists(key)
conn.set(key, 'banana')
assert conn.get(key) == 'banana', conn.get(key)
assert conn.exists(key)
|
StarcoderdataPython
|
159158
|
<filename>schemas/product_type.py
from marshmallow.fields import Nested
from marshmallow_mongoengine import ModelSchema
from models.product_type import ProductTypeModel
class ProductTypeSchema(ModelSchema):
# __nested__ = False
class Meta:
model = ProductTypeModel
exclude = ('translations',)
attributes = Nested('AttributeSchema', many=True, exclude=('translations',))
|
StarcoderdataPython
|
3351149
|
from invoke import task
@task
def test(c):
c.run("pytest -v --doctest-modules --import-mode=append", pty=True)
@task
def lint(c):
c.run("python -m flake8 src/.", pty=True)
c.run("python -m flake8 tests/.", pty=True)
c.run("python -m black --check .", pty=True)
|
StarcoderdataPython
|
61637
|
<gh_stars>0
from django.contrib import admin
from .models import Airport, Flight, Passenger
admin.site.register(Airport)
admin.site.register(Flight)
admin.site.register(Passenger)
|
StarcoderdataPython
|
1620879
|
import numpy as np
#initalize parameters
#layer_dims = katmanların nöron sayılarını tutan liste (özellikler dahil)
def initilaize_parameters(layer_dims):
np.random.seed(1)
parameters = {}
L = len(layer_dims)
for l in range(1,L):
#np.sqrt(layer_dims[l-1]) sayesinde W parametresini daha küçük sayılara indirgiyoruz ve öğrenimini arttırıyoruz. 0.01 gibi sayılarlada çarpabiliriz.
parameters['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) / np.sqrt(layer_dims[l-1])# W(l,l-1)
parameters['b' + str(l)] = np.zeros((layer_dims[l],1)) # b(l,1)
return parameters
def linear_forward(A_prev,W,b):
Z = np.dot(W,A_prev) + b # Z = WA + b (vectorized)
assert(Z.shape == (W.shape[0],A_prev.shape[1]))
cache = (A_prev,W,b)
return Z, cache
def sigmoid(Z): #activation function
A = 1 / (1 + np.exp(-Z))
cache = Z
return A, cache # Eğer relu kullanmazsanız Z yerine A yı cache'e atın.
def relu(Z): #activation function
A = np.maximum(0,Z)
cache = Z
return A, cache
def linear_activation_forward(A_prev,W,b,activation):
Z, linear_cache = linear_forward(A_prev,W,b)
if activation == "sigmoid":
A, activation_cache = sigmoid(Z)
elif activation == "relu":
A, activation_cache = relu(Z)
cache = (linear_cache,activation_cache) #backpropagation için gerekli değerler
return A,cache
def nn_forward_propagation(X,parameters): #Sınıflandırma problemleri için tasarlanmıştır.
caches = []
A = X
L = len(parameters) // 2
for l in range(1,L):
A_prev = A
A, cache = linear_activation_forward(A_prev,parameters['W' + str(l)],parameters['b' + str(l)], activation="relu")
caches.append(cache)
AL, cache = linear_activation_forward(A,parameters['W' + str(L)],parameters['b' + str(L)], activation="sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def cost_function(AL,Y): #tahmindeki hatayı gösterir.
m = Y.shape[1]
cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y,np.log(1-AL).T))
cost = np.squeeze(cost)
assert(cost.shape == ())
return cost
def linear_backward(dZ,cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1./m) * np.dot(dZ,A_prev.T)
db = (1./m) * np.sum(dZ,axis=1,keepdims=True)
dA_prev = np.dot(W.T,dZ)
return dA_prev, dW, db
def sigmoid_backward(dA,cache):
Z = cache
s = 1/(1 + np.exp(-Z))
dZ = dA * s * (1-s)
return dZ
def relu_backward(dA,cache):
Z = cache
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
assert(dZ.shape == Z.shape)
return dZ
def linear_activation_backward(dA,cache,activation):
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA,activation_cache)
dA_prew, dW, db = linear_backward(dZ,linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA,activation_cache)
dA_prew, dW, db = linear_backward(dZ,linear_cache)
return dA_prew, dW, db
def nn_backward_propagation(AL,Y,caches):
grads = {}
L = len(caches)
m = AL.shape[1]
Y = Y.reshape(AL.shape)
dAL = -(np.divide(Y,AL) - np.divide(1-Y,1-AL)) #Cost function türevi
current_cache = caches[L - 1]
grads['dA' + str(L-1)], grads['dW' + str(L)], grads['db' + str(L)] = linear_activation_backward(dAL,current_cache,activation="sigmoid")
for l in reversed(range(L-1)):
current_cache = caches[l]
grads['dA' + str(l)], grads['dW' + str(l+1)], grads['db' + str(l+1)] = linear_activation_backward(grads['dA'+str(l+1)],current_cache,activation="relu")
return grads
def update_parameters(parameters,grads,learning_rate):
L = len(parameters) // 2
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads["db" + str(l+1)]
return parameters
def predict(X,parameters):
AL, cache = nn_forward_propagation(X,parameters)
predictions = (AL>0.5)
return predictions
def accuracy(predict,Y):
accury = np.squeeze(((np.dot(Y,predict.T) + np.dot(1-Y,1-predict.T))/float(Y.size)) * 100)
return accury
|
StarcoderdataPython
|
3331337
|
class Solution:
"""
@param nums: A list of integers
@return: The majority number occurs more than 1/3
"""
def majorityNumber(self, nums):
# write your code here
count1 = 0
count2 = 0
candidate = [0,0]
for i in xrange(len(nums)):
if count1 == 0:
candidate[0] = nums[i]
if count2 == 0 and nums[i] != candidate[0]:
candidate[1] = nums[i]
if nums[i] != candidate[0] and nums[i] != candidate[1] \
and count2 != 0 and count1 != 0:
count1 -= 1
count2 -= 1
if nums[i] == candidate[0]:
count1 += 1
if nums[i] == candidate[1]:
count2 += 1
count1 = 0
count2 = 0
for i in xrange(len(nums)):
if nums[i] == candidate[0]:
count1 += 1
elif nums[i] == candidate[1]:
count2 += 1
if count1 > count2:
return candidate[0]
else:
return candidate[1]
a = Solution()
print a.majorityNumber([1,1,1,1,2,2,3,3,4,4,4])
|
StarcoderdataPython
|
4815270
|
<reponame>texpomru13/espnet
#!/usr/bin/env python3
# Copyright 2021 Carnegie Mellon University (<NAME>)
import os
import os.path
import argparse
parser = argparse.ArgumentParser(description="Calculate classification accuracy.")
parser.add_argument("--wer_dir", type=str, help="folder containing hyp.trn and ref.trn")
args = parser.parse_args()
with open(os.path.join(args.wer_dir, "hyp.trn"), "r") as f:
hyp_dict = {ln.split()[1]: ln.split()[0] for ln in f.readlines()}
with open(os.path.join(args.wer_dir, "ref.trn"), "r") as f:
ref_dict = {ln.split()[1]: ln.split()[0] for ln in f.readlines()}
n_correct = 0
n_samples = 0
for sample_id in ref_dict:
n_samples += 1
if ref_dict[sample_id] == hyp_dict[sample_id]:
n_correct += 1
with open(os.path.join(args.wer_dir, "..", "accuracy.csv"), "w") as f:
f.write("total,correct,accuracy\n")
f.write(f"{n_samples},{n_correct},{n_correct/n_samples}\n")
|
StarcoderdataPython
|
1686488
|
<filename>readimc/data/_panorama.py
from dataclasses import dataclass
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
import readimc.data
@dataclass(frozen=True)
class Panorama:
"""Panorama metadata (only for panoramas with panorama image data)"""
slide: "readimc.data.Slide"
"""Parent slide"""
id: int
"""Panorama ID"""
metadata: Dict[str, str]
"""Full panorama metadata"""
@property
def description(self) -> Optional[str]:
"""User-provided panorama description"""
return self.metadata.get("Description")
@property
def x1_um(self) -> Optional[float]:
"""Panorama coordinate 1 (x-axis), in micrometers"""
val = self.metadata.get("SlideX1PosUm")
if val is not None:
return float(val)
return None
@property
def y1_um(self) -> Optional[float]:
"""Panorama coordinate 1 (y-axis), in micrometers"""
val = self.metadata.get("SlideY1PosUm")
if val is not None:
return float(val)
return None
@property
def x2_um(self) -> Optional[float]:
"""Panorama coordinate 2 (x-axis), in micrometers"""
val = self.metadata.get("SlideX2PosUm")
if val is not None:
return float(val)
return None
@property
def y2_um(self) -> Optional[float]:
"""Panorama coordinate 2 (y-axis), in micrometers"""
val = self.metadata.get("SlideY2PosUm")
if val is not None:
return float(val)
return None
@property
def x3_um(self) -> Optional[float]:
"""Panorama coordinate 3 (x-axis), in micrometers"""
val = self.metadata.get("SlideX3PosUm")
if val is not None:
return float(val)
return None
@property
def y3_um(self) -> Optional[float]:
"""Panorama coordinate 3 (y-axis), in micrometers"""
val = self.metadata.get("SlideY3PosUm")
if val is not None:
return float(val)
return None
@property
def x4_um(self) -> Optional[float]:
"""Panorama coordinate 4 (x-axis), in micrometers"""
val = self.metadata.get("SlideX4PosUm")
if val is not None:
return float(val)
return None
@property
def y4_um(self) -> Optional[float]:
"""Panorama coordinate 4 (y-axis), in micrometers"""
val = self.metadata.get("SlideY4PosUm")
if val is not None:
return float(val)
return None
@property
def width_um(self) -> Optional[float]:
"""Panorama width, in micrometers"""
if None not in (
self.x1_um,
self.y1_um,
self.x2_um,
self.y2_um,
self.x3_um,
self.y3_um,
self.x4_um,
self.y4_um,
):
a = (self.x1_um - self.x2_um) ** 2 + (self.y1_um - self.y2_um) ** 2
b = (self.x3_um - self.x4_um) ** 2 + (self.y3_um - self.y4_um) ** 2
if abs(a - b) > 0.001:
raise ValueError(
f"Panorama {self.id}: inconsistent image widths"
)
return (a ** 0.5 + b ** 0.5) / 2.0
return None
@property
def height_um(self) -> Optional[float]:
"""Panorama height, in micrometers"""
if None not in (
self.x1_um,
self.y1_um,
self.x2_um,
self.y2_um,
self.x3_um,
self.y3_um,
self.x4_um,
self.y4_um,
):
a = (self.x1_um - self.x4_um) ** 2 + (self.y1_um - self.y4_um) ** 2
b = (self.x2_um - self.x3_um) ** 2 + (self.y2_um - self.y3_um) ** 2
if abs(a - b) > 0.001:
raise ValueError(
f"Panorama {self.id}: inconsistent image heights"
)
return (a ** 0.5 + b ** 0.5) / 2.0
return None
def __str__(self) -> str:
return (
f"Panorama {self.id}: {self.description or 'unnamed'} ("
f"x = {self.x1_um or '?'}um, "
f"y = {self.y1_um or '?'}um, "
f"width = {self.width_um or '?'}um, "
f"height = {self.height_um or '?'}um)"
)
|
StarcoderdataPython
|
149603
|
import random
import pandas as pd
shots = 90
ot_shots = 10
##########
# Team 1 #
##########
team1 = {
'2pt rate': .80,
'3pt rate': .20,
'2pt%': .50,
'3pt%': .33333,
'orbd': .225,
'foul3': .015,
'foul2': .057,
'ft%': .77
}
##########
# Team 2 #
##########
team2 = {
'2pt rate': .50,
'3pt rate': .50,
'2pt%': .50,
'3pt%': .33333,
'orbd': .225,
'foul3': .015,
'foul2': .057,
'ft%': .77
}
def shoot_ft(team, ft_attempts):
ft_points = 0
i = 0
while i <= ft_attempts:
make = random.random()
if make < team['ft%']:
ft_points += 1
i += 1
return ft_points
def points(team):
roll_shot_type = random.random()
roll_make = random.random()
roll_foul = random.random()
if roll_shot_type <= team['2pt rate']:
if roll_foul <= team['foul2']:
if roll_make <= team['2pt%']:
return 2 + shoot_ft(team, 1)
else:
return shoot_ft(team, 2)
elif roll_make <= team['2pt%']:
return 2
else:
if roll_foul <= team['foul3']:
if roll_make <= team['3pt%']:
return 3 + shoot_ft(team, 1)
else:
return shoot_ft(team, 3)
elif roll_make <= team['3pt%']:
return 3
roll_orbd = random.random()
if roll_orbd <= team['orbd']:
return points(team)
return 0
def play_game(shots_to_take):
t1_points_in_game = 0
t2_points_in_game = 0
for shot in range(shots_to_take):
t1_points_in_game += points(team1)
t2_points_in_game += points(team2)
return t1_points_in_game, t2_points_in_game
results = []
for game in range(1000000):
t1_points, t2_points = play_game(shots)
while t1_points == t2_points:
t1_new, t2_new = play_game(ot_shots)
t1_points += t1_new
t2_points += t2_new
result = {
'team1': t1_points,
'team2': t2_points,
'game': game,
'team1_win': t1_points > t2_points,
'team2_win': t2_points > t1_points,
}
results.append(result)
frame = pd.DataFrame(results)
team1_wins = frame['team1_win'].sum() / frame.shape[0]
team2_wins = frame['team2_win'].sum() / frame.shape[0]
print('Team 1 wins {0:.2f}% of the time'.format(team1_wins * 100))
print('Team 2 wins {0:.2f}% of the time'.format(team2_wins * 100))
|
StarcoderdataPython
|
1791749
|
from collections import defaultdict
def brainfuck_interpreter(code, input_):
cells = defaultdict(int)
cell = 0
pos = 0
output = ""
stack = []
jump_to = {}
for i, c in enumerate(code):
if c == '[':
stack.append(i)
if c == ']':
jump_to[i] = stack[-1]
jump_to[stack.pop()] = i
while pos < len(code):
curr = code[pos]
if curr == '>':
cell += 1
elif curr == '<':
cell -= 1
elif curr == '+':
cells[cell] += 1
elif curr == '-':
cells[cell] -= 1
elif curr == '.':
output += chr(cells[cell])
elif curr == ',':
in_char = input_[0]
input_ = input[1:]
cells[cell] = ord(in_char)
elif curr == '[' and cells[cell] == 0:
pos = jump_to[pos]
elif curr == ']' and cells[cell] != 0:
pos = jump_to[pos]
pos += 1
return output
print(brainfuck_interpreter("++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.", ""))
|
StarcoderdataPython
|
29303
|
# importing libraries
import numpy as np
import pandas as pd
import random
import torch
def set_seeds(seed=1234):
"""[Set seeds for reproducibility.]
Keyword Arguments:
seed {int} -- [The seed value] (default: {1234})
"""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("[INFO] THE SEED IS ", seed)
def set_device(cuda=True):
"""[To set the type of machine CPU or GPU]
Keyword Arguments:
cuda {bool} -- [To use GPU or not] (default: {True})
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("[INFO] THE DEVICE IS ", device)
return device
paths = {
"train_path1": "data/atis/train/seq.in",
"train_path2": "data/atis/train/seq.out",
"train_path3": "data/atis/train/label",
"valid_path1": "data/atis/dev/seq.in",
"valid_path2": "data/atis/dev/seq.out",
"valid_path3": "data/atis/dev/label",
"test_path1": "data/atis/test/seq.in",
"test_path2": "data/atis/test/seq.out",
"test_path3":"data/atis/test/label"
}
|
StarcoderdataPython
|
1798651
|
<filename>dataCollection.py<gh_stars>10-100
from alpha_vantage.timeseries import TimeSeries
from datetime import datetime
import csv
import pandas as pd
import requests
import os
import glob
SYMBOL_URL = "http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange={}&render=download"
STOCK_EXCHANGES = ["nasdaq", "nyse"]
# Get last 7 days worth of data
def downloadHistory_stocks(symbol, interval='1min'):
try:
ts = TimeSeries(key='055UMQXJRDY71RG3', output_format='pandas')
data, meta_data = ts.get_intraday(
symbol=symbol, interval=interval, outputsize='full')
pd.set_option('display.max_rows', 5000)
dataCovert = str(pd.DataFrame(data))
f = open('data/output.txt', "w")
f.write(dataCovert)
f.close()
DataTemp = ["timestamp,open,high,low,close,volume,vwap\n"]
Data1 = []
f1 = open('data/output.txt')
line = f1.readline()
line = f1.readline()
while 1:
line = f1.readline()
if not line:
break
else:
Data1.append(line.split())
f1.close()
cumulative_total = 0
cumulative_volume = 0
for line in Data1:
# 2017-10-30,09:30:00
date = line.pop(0)
date += ' ' + line.pop(0)
typical_price = (float(line[0]) +
float(line[1]) + float(line[2])) / 3
cumulative_total += (typical_price * float(line[3]))
cumulative_volume += float(line[3])
DataTemp.append(
",".join([date] + line + [str(cumulative_total / cumulative_volume)]) + "\n")
write_csv(file_name="data/" + symbol + ".csv", data=DataTemp)
except ValueError:
pass
# get list of symbols automatically
def get_symbols(directory_name):
for se in STOCK_EXCHANGES:
with requests.Session() as s:
download = s.get(SYMBOL_URL.format(se))
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
data_list = []
for d in list(cr):
# print(d)
data_list.append(';'.join(d[:8]) + '\n')
write_csv(os.path.join(directory_name, se + ".csv"), data_list)
# Get data for all stocks below some price
def get_data():
get_symbols("data/symbols/")
for filename in glob.glob(os.path.join("data/symbols/", '*.csv')):
df = read_csv(file_name=filename, names=[
"Symbol", "Name", "LastSale", "MarketCap", "IPOyear", "Sector", "industry", "Summary Quote"], sep=";")
for chunk in df:
symbols = chunk["Symbol"].values.tolist()
for s in symbols:
print("Downloading data for ", s)
downloadHistory_stocks(s)
return
def read_csv(file_name, names=["timestamp", "open", "high", "low", "close", "volume", "vwap"], sep=',', chunksize=29):
df = pd.read_csv(file_name, names=names, sep=sep,
header=0, chunksize=chunksize)
return df
def write_csv(file_name="result.csv", data=[]):
file = open(file_name, "w")
file.writelines(data)
file.close()
if __name__ == '__main__':
apple_data = downloadHistory_stocks('SLV')
#mbi_data = downloadHistory_stocks('MBI')
#google_data = downloadHistory_stocks('GOOGL')
|
StarcoderdataPython
|
3336611
|
<reponame>LukasL97/shop-4-me
shop_owners = [
{
"id":"bg9hpzkhHcSIMm2C9IoniTSf",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Tyson","lastName":"Morar"}
},
{
"id":"usqsUKiOvpo0ao87oSIEKiqM",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Bruce","lastName":"Dibbert"}
},
{
"id":"cQ62zWADj6FlTVY5eF6JAEw7",
"login":{"<PASSWORD>Password":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Francisco","lastName":"Batz"}
},
{
"id":"2FeSVdmn6DWBn55cnEPIEWop",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Joel","lastName":"Tremblay"}
},
{
"id":"co9On75G0nnVXSOIKHQdrLJf",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Kaela","lastName":"Kutch"}
},
{
"id":"vnMfvDu35DIXtNKt9dZCuBR0",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Roxanne","lastName":"Corwin"}
},
{
"id":"WejnAARdEZS4L89CVnKIPiys",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Claudie","lastName":"Monahan"}
},
{
"id":"gRBVhu4vJmYLDKzZvsUmIqqh",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Jennifer","lastName":"Moore"}
},
{
"id":"U1Q1imHV3zqmzs46L8MWbABb",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Jonathon","lastName":"Rowe"}
},
{
"id":"rOSXbLgMs078y2RL2SYNQv1S",
"login":{"<PASSWORD>Password":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Alessia","lastName":"Langworth"}
},
{
"id":"CgSSKvFtV7b4FWe07KCZcuBD",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Bailee","lastName":"Conn"}
},
{
"id":"bpdGQkPVoSSS3uHZQadbTpNU",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Hal","lastName":"Rodriguez"}
},
{
"id":"DTsl0JNwZjpIArflmzzujT8w",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Marshall","lastName":"Blanda"}
},
{
"id":"VjVtF6Ujj0EfwdZM5X5w4Srm",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Everardo","lastName":"Erdman"}
},
{
"id":"WzFavjhD7onnIFjT18UFUzOX",
"login":{"<PASSWORD>Password":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Belle","lastName":"Jaskolski"}
},
{
"id":"FJtp1SmnzD2bAtgHa0EQzijt",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Yasmin","lastName":"Yost"}
},
{
"id":"3ym4XiofecjSz2Y8J2989LNm",
"login":{"<PASSWORD>Password":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Sherman","lastName":"Kuvalis"}
},
{
"id":"DRblckOBKg3gG08lht433dTF",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Dianna","lastName":"Schoen"}
},
{
"id":"PzLl5srAXwdzoP2khvU64IYI",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Juliet","lastName":"Hettinger"}
},
{
"id":"2NsQ5e8J2QLM6HmUqqG8E2Fa",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"May","lastName":"Ullrich"}
},
{
"id":"WwTVubusXEyBhRhILfCFD6Pb",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Sophia","lastName":"Abbott"}
},
{
"id":"HLHX2oYKPCDyb4d4d0qLsHuv",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Ahmad","lastName":"Witting"}
},
{
"id":"uSzThHtuzQYDQmEaPU3cCBqD",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Lon","lastName":"Waters"}
},
{
"id":"Zm08QQuWHIjq3JbFTOpygKdc",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Margie","lastName":"Abshire"}
},
{
"id":"AQaqYR2Z6Ua7Mt4GH92DXrEz",
"login":{"<PASSWORD>Password":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Vivienne","lastName":"Torp"}
},
{
"id":"N4oTSWiS7od39PnAqdkM7L8L",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Lavada","lastName":"Nienow"}
},
{
"id":"XoREJvuL7C141WR2QHyu5NY9",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Demetrius","lastName":"Olson"}
},
{
"id":"wLdsEz5sQNycN2QwP4HBrzMO",
"login":{"hashPassword":"_<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Jermey","lastName":"Hirthe"}
},
{
"id":"GntEGAiPaJXpeKrnIYzmCtpw",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Kaylie","lastName":"Jaskolski"}
},
{
"id":"LFD8yuyRz1IEPS7P7swrZ3cB",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Hollie","lastName":"Russel"}
},
{
"id":"v22jSZB6EHwB2cNETdLDt72P",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Anabelle","lastName":"Marks"}
},
{
"id":"7kf2hWpaXFyZHgMMsXW20ZO0",
"login":{"<PASSWORD>Password":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Dandre","lastName":"Schowalter"}
},
{
"id":"2YM6GZn2TrXEbLkTIP9vTVmz",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Leon","lastName":"Beatty"}
},
{
"id":"oU5drcb3Clblyuju4gkAp3n0",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Mack","lastName":"Crooks"}
},
{
"id":"oCDkTP133inNYd5w6RY2L7id",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Lisette","lastName":"Ullrich"}
},
{
"id":"C5zCus63Bsh74cvDBkrSV5I5",
"login":{"hashPassword":"d<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Dandre","lastName":"Wintheiser"}
},
{
"id":"whnQ3Suc27iXSNRnHsqbmApE",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Raven","lastName":"Towne"}
},
{
"id":"z5onqSm0WssRyP2c6jtEEV8U",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Lorena","lastName":"O'Keefe"}
},
{
"id":"ab17c9JnUkA86VJdIduYKFYO",
"login":{"<PASSWORD>Password":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Naomi","lastName":"Hyatt"}
},
{
"id":"WgDZj5OrErvSiQk0ZyeecUQi",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Dock","lastName":"Little"}
},
{
"id":"0DUcIW6WWMHBrvmPYh2vRdTN",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Frederique","lastName":"Lakin"}
},
{
"id":"PKg2oClUv1P0iOJqo4sJwF1R",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Kaitlin","lastName":"Barton"}
},
{
"id":"MXV9uGzANzQzjpjy3wEvKeME",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Esperanza","lastName":"Kertzmann"}
},
{
"id":"rgerp82WOP9cSmQOOrlXezJ6",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Maryam","lastName":"Greenholt"}
},
{
"id":"EgmFK37TpcIW3RiWdtOEJwfO",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Quinton","lastName":"Greenfelder"}
},
{
"id":"824F1KNTPM3Upzn7cTDOF49t",
"login":{"<PASSWORD>Password":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Jennings","lastName":"Anderson"}
},
{
"id":"awNy6i1BlflJs3Cfe1oA3Z7g",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Jacklyn","lastName":"Predovic"}
},
{
"id":"tZTfsD0C2iSfR77ZHcpB6ai6",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Laila","lastName":"Huel"}
},
{
"id":"ntMXizqkGBjl8qtToWu0v8s2",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Kellen","lastName":"Wuckert"}
},
{
"id":"CMyJJZfpnvltkiKpVns3cGpi",
"login":{"hashPassword":"<PASSWORD>","email":"<EMAIL>"},
"name":{"firstName":"Jettie","lastName":"Schulist"}
}
]
|
StarcoderdataPython
|
1675310
|
"""
Modified from django.utils.translation.trans_real
"""
from __future__ import unicode_literals
import re
from django.conf import settings
from django.urls import resolve
from django.urls import Resolver404
from django.urls.resolvers import RegexURLResolver
from django.utils.translation import get_language
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation.trans_real import check_for_language
from django.utils.translation.trans_real import get_language_from_path
from django.utils.translation.trans_real import get_languages
from django.utils.translation.trans_real import get_supported_language_variant
from django.utils.translation.trans_real import language_code_re
from django.utils.translation.trans_real import parse_accept_lang_header
from kolibri.core.device.utils import get_device_setting
def get_device_language():
language_id = get_device_setting("language_id", None)
try:
return get_supported_language_variant(language_id)
except LookupError:
return None
def get_accept_headers_language(request):
accept = request.META.get("HTTP_ACCEPT_LANGUAGE", "")
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == "*":
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
def get_settings_language():
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
def get_language_from_request_and_is_from_path(request): # noqa complexity-16
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language. It also returns a value to determine if the language code
was derived from a language code in the URL, or inferred from some other source.
:returns: tuple of language code, boolean. The former can be None if the url being
requested does not require translation, otherwise it should be a language code
from the values in settings.LANGUAGES. The boolean should indicate whether the
language code was calculated by reading a language code from the requested URL.
In the case that it was, True should be returned, in the case where the URL language
code was not used or not present, False is returned.
"""
try:
# If this is not a view that needs to be translated, return None, and be done with it!
if not getattr(resolve(request.path_info).func, "translated", False):
return None, False
except Resolver404:
# If this is an unrecognized URL, it may be redirectable to a language prefixed
# URL, so let the language code setting carry on from here.
pass
supported_lang_codes = get_languages()
lang_code = get_language_from_path(request.path_info)
if lang_code in supported_lang_codes and lang_code is not None:
return lang_code, True
if hasattr(request, "session"):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if (
lang_code in supported_lang_codes
and lang_code is not None
and check_for_language(lang_code)
):
return lang_code, False
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code), False
except LookupError:
pass
device_language = get_device_language()
if device_language is not None:
return device_language, False
headers_language = get_accept_headers_language(request)
if headers_language is not None:
return headers_language, False
return get_settings_language(), False
def i18n_patterns(urls, prefix=None):
"""
Add the language code prefix to every URL pattern within this function.
Vendored from https://github.com/django/django/blob/stable/1.11.x/django/conf/urls/i18n.py
to allow use of this outside of the root URL conf to prefix plugin non-api urls.
"""
if not settings.USE_I18N:
return list(urls)
def recurse_urls_and_set(urls_to_set):
for url in urls_to_set:
if hasattr(url, "urlpatterns") and url.urlpatterns:
recurse_urls_and_set(url.urlpatterns)
elif hasattr(url, "callback") and url.callback:
setattr(url.callback, "translated", True)
recurse_urls_and_set(urls)
return [LocaleRegexURLResolver(list(urls), prefix=prefix)]
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
Vendored from https://github.com/django/django/blob/stable/1.11.x/django/urls/resolvers.py
As using the Django internal version inside included URL configs is disallowed.
Rather than monkey patch Django to allow this for our use case, make a copy of this here
and use this instead.
"""
def __init__(
self,
urlconf_name,
default_kwargs=None,
app_name=None,
namespace=None,
prefix_default_language=True,
prefix=None,
):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace
)
self.prefix_default_language = prefix_default_language
self._prefix = prefix
@property
def regex(self):
device_language = get_device_language() or get_settings_language()
language_code = get_language() or device_language
if language_code not in self._regex_dict:
if language_code == device_language and not self.prefix_default_language:
regex_string = self._prefix or ""
else:
regex_string = ("^%s/" % language_code) + (self._prefix or "")
self._regex_dict[language_code] = re.compile(regex_string, re.UNICODE)
return self._regex_dict[language_code]
|
StarcoderdataPython
|
4840315
|
import json
import numpy as np
import matplotlib.pyplot as plt
from scipy.io.wavfile import write
# Synthesize the tone based on the input parameters
def tone_synthesizer(freq, duration, amplitude=1.0, sampling_freq=44100):
# Construct the time axis
time_axis = np.linspace(0, duration, duration * sampling_freq)
# Construct the audio signal
signal = amplitude * np.sin(2 * np.pi * freq * time_axis)
return signal.astype(np.int16)
if __name__=='__main__':
# Names of output files
file_tone_single = 'generated_tone_single.wav'
file_tone_sequence = 'generated_tone_sequence.wav'
# Source: http://www.phy.mtu.edu/~suits/notefreqs.html
mapping_file = 'tone_mapping.json'
# Load the tone to frequency map from the mapping file
with open(mapping_file, 'r') as f:
tone_map = json.loads(f.read())
# Set input parameters to generate 'F' tone
tone_name = 'F'
duration = 3 # seconds
amplitude = 12000
sampling_freq = 44100 # Hz
# Extract the tone frequency
tone_freq = tone_map[tone_name]
# Generate the tone using the above parameters
synthesized_tone = tone_synthesizer(tone_freq, duration, amplitude, sampling_freq)
# Write the audio signal to the output file
write(file_tone_single, sampling_freq, synthesized_tone)
# Define the tone sequence along with corresponding durations in seconds
tone_sequence = [('G', 0.4), ('D', 0.5), ('F', 0.3), ('C', 0.6), ('A', 0.4)]
# Construct the audio signal based on the above sequence
signal = np.array([])
for item in tone_sequence:
# Get the name of the tone
tone_name = item[0]
# Extract the corresponding frequency of the tone
freq = tone_map[tone_name]
# Extract the duration
duration = item[1]
# Synthesize the tone
synthesized_tone = tone_synthesizer(freq, duration, amplitude, sampling_freq)
# Append the output signal
signal = np.append(signal, synthesized_tone, axis=0)
# Save the audio in the output file
write(file_tone_sequence, sampling_freq, signal)
|
StarcoderdataPython
|
32704
|
from pydantic import BaseModel
class ReadyResponse(BaseModel):
status: str
|
StarcoderdataPython
|
3210356
|
def get_attr_by_type(node, type):
for attr in node.attributes:
if attr.type == type:
return attr
return None
|
StarcoderdataPython
|
80074
|
"""Project Euler problem 3"""
def sqrt(number):
"""Returns the square root of the specified number as an int, rounded down"""
assert number >= 0
offset = 1
while offset ** 2 <= number:
offset *= 2
count = 0
while offset > 0:
if (count + offset) ** 2 <= number:
count += offset
offset //= 2
return count
def smallest_prime_factor(number):
"""Returns the smallest prime factor of the specified number"""
assert number >= 2
for potential in range(2, sqrt(number) + 1):
if number % potential == 0:
return potential
return number
def calculate(number):
"""Returns the largest prime factor of the specified number"""
while True:
smallest = smallest_prime_factor(number)
if number > smallest:
number //= smallest
else:
answer = number
return answer
if __name__ == "__main__":
print(calculate(600851475143))
|
StarcoderdataPython
|
155327
|
<reponame>John-ye666/Python-for-Finance-Second-Edition
"""
Name : c8_02_get_date_variable.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import pandas as pd
import scipy as sp
sp.random.seed(1257)
mean=0.10
std=0.2
ddate = pd.date_range('1/1/2016', periods=252)
n=len(ddate)
rets=sp.random.normal(mean,std,n)
data = pd.DataFrame(rets, index=ddate,columns=['RET'])
print(data.head())
|
StarcoderdataPython
|
1641425
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from tqdm import tnrange, tqdm_notebook
import collections
import math
import numpy as np
from collections import defaultdict
import threading
import networkx as nx
import pandas as pd
import operator
def graph_dict():
'''
We first choose to use a dictionary for collect all the node with out_edges.
Because each row in the txt was "node\titsadiacentnode" we needed to parse all rows
in order to split the to, adding the first node as key and the second has its "end of one of its edge" node.
Of course each node can have more than one edge, thus we associated to each node-key a list.
'''
graph = defaultdict(list)
list1= list()
with open("wiki-topcats-reduced.txt") as f:
for line in f:
list1 = line.strip().split("\t")
if list1[0] in graph.keys():
graph[list1[0]].append(list1[1])
else:
graph[list1[0]]=[list1[1]]
return(graph)
def create_graph_and_dict():
'''
For the computation of the next method distance_graph, we needed the overall structure of the graph.
Thus, instead of use the dictionary as before, we employed the networkx library.
We setted for each node of the graph the attribute for the categories that each node belongs to; because at the same time we created the dictionary with all the name of categories as key, and the related list with all nodes associated to that category.
Parsing through this last dictionary, we have been able to associate the right list of categories to each node.
'''
G=nx.DiGraph()
with open("wiki-topcats-reduced.txt") as f:
for line in f:
list1 = line.strip().split("\t")
G.add_node(list1[0])
G.add_edge(list1[0],list1[1])
### adding attribute 'Category' to each node of the graph
for i in G:
G.node[i]['Category']=[]
category_dict = defaultdict()
with open("wiki-topcats-categories.txt", "r") as f:
lst2=G.nodes()
for line in f:
cat_list = line.strip().split(";")
category = cat_list[0][9:]
lst = cat_list[1].strip().split(" ")
if len(lst) > 3500:
lst1=[el for el in lst if el in lst2]
category_dict[category] = lst1
#Assign attributes to each node
for cat in category_dict:
lst=category_dict[cat]
for e in lst:
if e in G:
G.node[e]['Category'].append(cat)
return G, category_dict
# In our Algorithm for each root which is in the input category we go through graph, and each time we check each node attributes
#if it is belonging to the categories that we are looking for and at the same time it doesn't belong to input category.
#Therefore , each time the function is called , the nodes in the path of the roots are checked for 4 category
def distance_graph(G, C0, C1,category_dict):
'''
This method computes all procedures in order to return the median (distance) for each category given as input in the 3rd parameter.
Given the graph and the category_dict created above, C0 is always the same for each call, because we want the distance for this category with the others, C1 is a list that has 4 categories.
For each node from the category input C0, we needed to compute all the possible paths until the other nodes of the category.
We took only a slice of them (2000) for not getting stuck because nodes where a lot.
Starting from this idea,
We decided to have 4 categories instead of just one is because the graph is huge and for not iterating each time, checking just for one category, it checks for four.
We wrote something similar to the breadth first search algorithm, with the difference that the shortest path list related to a category is updated if the algorithm finds that that node belongs to one of the category we considered in the list C1.
In this way, we don't have a destination (or a loop for each node that belongs to the category that we want to reach).
We agreed that to save and keep four long lists (shortest_paths) was less heavy than to parse through all the graph, looking for a sepcific node.
Every time we go inside one node, without differences if it was from C0 or from one of the categories in C1,
we added it to SEEN variable, to avoid any possible loop or, of course, to not check again a node already discovered.
Once got the lists with all possible paths, we added the infinites values to the lists.
Because we start from c0 with 2000 nodes, but we go until all the other nodes from the categories of C1, there are some nodes that are not reachable.
And we added "manually" the infinitives: given the length of each shortest path we can get how many nodes have not been reached subtracting the combination of all passible paths (length of the i-th category of C1 multiplied by the length of the c0).
This value is how many nodes of the i-th category of C1 have not been reached. And we added them to the shortest path lists.
And we returned the median related to each category of C1.
'''
c0 = category_dict[C0][0:2000]
shortest_paths_1 = list()
shortest_paths_2 = list()
shortest_paths_3 = list()
shortest_paths_4 = list()
for i in tnrange(len(c0)):
root=c0[i]
step = 0
seen=set([root])
queue=collections.deque([(root,step)])
while queue:
vertex=queue.popleft()
if C1[0] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_1.append(step)
elif C1[1] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_2.append(step)
elif C1[2] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_3.append(step)
elif C1[3] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_4.append(step)
neighbors1 = list(G[vertex[0]])
step=vertex[1]+1
if neighbors1 == []:
continue
for i in neighbors1:
if i not in seen:
queue.append((i,step))
seen.add(i)
for i in range(len(C1)):
lc = len(category_dict[C1[i]])
if len(eval('shortest_paths_%d'% (i+1))) != lc:
diff = abs(len(eval('shortest_paths_%d'% (i+1))) - lc*len(c0))
aux_l = [math.inf for i in range(diff)]
eval("shortest_paths_{}".format(i+1)).extend(aux_l)
return [(C1[0], np.median(np.array(sorted(shortest_paths_1)))), (C1[1], np.median(np.array(sorted(shortest_paths_2)))),
(C1[2], np.median(np.array(sorted(shortest_paths_3)))), (C1[3], np.median(np.array(sorted(shortest_paths_4))))]
#@autojit
def distance_graph2(G, C0, C1,category_dict):
'''
We have done the same as before, but we runned this method for the last 2 categories + the input category.
'''
c0 = category_dict[C0][0:2000]
#with tqdm(total=value) as pbar:
shortest_paths_1 = list()
shortest_paths_2 = list()
shortest_paths_3 = list()
#shortest_paths_4 = list()
for i in tnrange(len(c0)):
root=c0[i]
#pbar.write("proccesed: %d" %c0)
#pbar.update(1)
step = 0
seen=set([root])
queue=collections.deque([(root,step)])
while queue:
vertex=queue.popleft()
if C1[0] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_1.append(step)
elif C1[1] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_2.append(step)
elif C1[2] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_3.append(step)
neighbors1 = list(G[vertex[0]])
step=vertex[1]+1
if neighbors1 == []:
continue
for i in neighbors1:
if i not in seen:
queue.append((i,step))
seen.add(i)
for i in range(len(C1)):
lc = len(category_dict[C1[i]])
if len(eval('shortest_paths_%d'% (i+1))) != lc:
diff = abs(len(eval('shortest_paths_%d'% (i+1))) - lc*len(c0))
aux_l = [math.inf for i in range(diff)]
eval("shortest_paths_{}".format(i+1)).extend(aux_l)
return [(C1[0], np.median(np.array(sorted(shortest_paths_1)))), (C1[1], np.median(np.array(sorted(shortest_paths_2))))
(C1[2], np.median(np.array(sorted(shortest_paths_3))))]#(C1[3], np.median(np.array(sorted(shortest_paths_4))))]
def steps(G,category_dict):
'''
This method is been created for computing the subgraph.
At first, we re-assign for each node only one category: exactly the category to which it belongs and that is the closest category to the input category ( C0 ).
After this, we initialize each node of the original graph with a new attribute: 'Score'.
Hence, we compute the subgraph for the list of nodes present in the input category, as first nodes in the subgraph and separately from the others.
And then we iterate through each following category that is present into the distance ranking.
For each category we create the subgraph from all the nodes (also the previouses),
but the iteration for give scores is done only with the nodes of the category considered;
For each of these nodes we checked in_edges and the nodes related if:
- the nodes haven't still had the score.
- if the nodes belong to that category as well.
- if they already have been assigned the score
In this way we could assign scores to each node of each category
'''
dfg=pd.read_csv('ranking_table.csv')
for e in G:
Distance={}
if len(G.node[e]['Category'])>1:
for i in G.node[e]['Category']:
Distance[i]=(dfg.loc[dfg.Category==i]['Distance'].values)[0]
sorted_d = sorted(Distance.items(), key=operator.itemgetter(1))
G.node[e]['Category']=sorted_d[0][0]
else:
G.node[e]['Category']=G.node[e]['Category'][0]
category_dict1={}
for k in category_dict:
m=category_dict[k]
l=[]
for n in m:
if G.node[n]['Category']==k:
l.append(n)
category_dict1[k]=l
nodes_G = G.nodes()
for n in nodes_G:
G.node[n]['score'] = -1
input_category='Indian_films'
c0 = category_dict[input_category][0:2000]
sub = G.subgraph(c0)
for s in sub:
G.node[s]['score'] = len(sub.in_edges(s))
categories = list(dfg['Category'].values) #taking all categories from the ranking dataframe
categories.remove('Indian_films')
from collections import defaultdict
l = c0 #list of nodes in the subgraph (now only input_category nodes)
for i in tnrange(len(categories)):
c = categories[i]
l1 = category_dict1[c]
l += l1
sub1 = G.subgraph(l)
d1=dict.fromkeys(l1, -2)
for e1 in l1:
if G.node[e1]['score']!=-1:
continue
else:
ie1 = sub1.in_edges(e1)
ie1 = [el[0] for el in ie1]
score = 0
for f in ie1:
try:
if d1[f]== -2:
score+=1
continue
elif G.node[f]['score'] == -1:
score+=1
continue
else:
score += G.node[f]['score']
except:
score += G.node[f]['score']
#elif G.node[f]['score'] == -1:
#score +=1
G.node[e1]['score'] = score
del d1
return G
|
StarcoderdataPython
|
4838967
|
<reponame>dcompane/controlm_py<gh_stars>1-10
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_py
from controlm_py.models.add_server_params import AddServerParams # noqa: E501
from controlm_py.rest import ApiException
class TestAddServerParams(unittest.TestCase):
"""AddServerParams unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAddServerParams(self):
"""Test AddServerParams"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_py.models.add_server_params.AddServerParams() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4823672
|
<gh_stars>0
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views import generic
from . import views_help
from .forms import *
def index(request):
return render(request, 'tracers/index.html')
# TODO: add a warning before leaving page (here & for contact)
def poscase(request):
test = views_help.next_test()
if test is not None:
context = {'success': True, 'date': test.test_date,
'phone': test.person.phone_num, 'name': test.person.name,
'form_addcontact': Form({'case_id': test.id, 'case_name': test.person.name}),
'form_confirm': TestContactedForm({'case_id': test.id})
}
print("test id is")
print(test.id)
else:
context = {'success': False, 'error': "No new positive case available"}
return render(request, 'tracers/poscase.html', context)
def contact(request):
cont = views_help.next_contact()
if cont is not None:
context = {'success': True,
'phone': cont.case_contact.phone_num, 'name': cont.case_contact.name,
'form_confirm': ContactContactedForm({'contact_id': cont.id})
}
print("contact id is")
print(cont.id)
else:
context = {'success': False, 'error': "No new contact available"}
return render(request, 'tracers/contact.html', context)
# TODO: make the addcontact form appear in new smaller window
def add_contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
form.add_contact()
return HttpResponse('<script type="text/javascript">window.close()</script>')
else:
return render(request, 'tracers/contactForm.html', context={'form': form})
else:
return HttpResponseRedirect('/tracers')
def add_testcontacted(request):
if request.method == 'POST':
form = TestContactedForm(request.POST)
if form.is_valid():
form.confirm_call()
return HttpResponseRedirect('/tracers')
else:
return HttpResponseRedirect('/tracers/error')
else:
return HttpResponseRedirect('/tracers/error')
def add_contactcontacted(request):
if request.method == 'POST':
form = ContactContactedForm(request.POST)
if form.is_valid():
form.confirm_call()
return HttpResponseRedirect('/tracers')
else:
return HttpResponseRedirect('/tracers/error')
else:
return HttpResponseRedirect('/tracers/error')
def error(request):
return HttpResponse('There was an error')
|
StarcoderdataPython
|
1711003
|
<filename>tests/test_event.py
from aiomessaging.event import Event
def test_simple():
e1 = Event('echo')
e2 = Event.from_dict({'type': 'echo'})
assert e1.type == e2.type
def test_to_dict():
e = Event('echo')
e.to_dict()
|
StarcoderdataPython
|
155947
|
<reponame>codingJWilliams/Assignment_55_Ciphers
# <NAME> 2017
# This module is included as part of https://github.com/codingJWilliams/jw_utils
# Liscenced according to ../../LISCENCE
# Contact codingJWilliams on github with any queries
import jw_utils, os, json
class Bucket:
def __init__(self, uid, location=("." + os.sep + "buckets.json")):
print(location)
if os.path.exists(location) == False:
print("MAKING")
with open(location, "w") as f:
f.write(json.dumps( {uid: ""} ))
else:
with open(location, "r") as f:
bc = json.loads(f.read())
try:
bc[uid]
except:
bc[uid] = ""
with open(location, "w") as f:
f.write(json.dumps( bc ))
self.location = location
self.uid = uid
def __repr__(self):
return "< JW_Bucket('" + self.location + "', '" + self.uid + "') >"
def get(self):
with open(self.location, "r") as f:
BucketCollection = json.loads(f.read())
bucket = BucketCollection[self.uid]
return bucket
def set(self, value):
with open(self.location, "r") as f:
BucketCollection = json.loads(f.read())
BucketCollection[self.uid] = value
with open(self.location, "w") as f:
f.write(json.dumps(BucketCollection))
|
StarcoderdataPython
|
1689787
|
<filename>onnx2torch/node_converters/reduce.py
__all__ = [
'OnnxReduceSumDynamicAxes',
'OnnxReduceSumStaticAxes',
'OnnxReduceStaticAxes',
]
from functools import partial
from typing import Any
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import torch
import torch._C as torch_C
from torch import nn
from onnx2torch.node_converters.registry import add_converter
from onnx2torch.onnx_graph import OnnxGraph
from onnx2torch.onnx_node import OnnxNode
from onnx2torch.utils.common import OnnxMapping
from onnx2torch.utils.common import OnnxToTorchModule
from onnx2torch.utils.common import OperationConverterResult
from onnx2torch.utils.common import get_const_value
from onnx2torch.utils.common import onnx_mapping_from_node
from onnx2torch.utils.custom_export_to_onnx import CustomExportToOnnx
from onnx2torch.utils.custom_export_to_onnx import OnnxToTorchModuleWithCustomExport
@torch.fx.wrap
def _get_element(x: Union[List, Tuple], index: int = 0) -> Any:
if isinstance(x, (tuple, list)):
return x[index]
return x
def _initialize_none_dim(dim: Optional[Union[int, Tuple[int, ...]]], input_dim: int):
if dim is None:
return list(range(input_dim))
return dim
def _log_sum(
input_tensor: torch.Tensor,
dim: Optional[Union[int, Tuple[int, ...]]] = None,
keepdim: bool = False,
):
dim = _initialize_none_dim(dim, input_tensor.dim())
return torch.log(torch.sum(input_tensor, dim=dim, keepdim=keepdim))
def _log_sum_exp(
input_tensor: torch.Tensor,
dim: Optional[Union[int, Tuple[int, ...]]] = None,
keepdim: bool = False,
):
dim = _initialize_none_dim(dim, input_tensor.dim())
return torch.logsumexp(input_tensor, dim=dim, keepdim=keepdim)
def _sum_square(
input_tensor: torch.Tensor,
dim: Optional[Union[int, Tuple[int, ...]]] = None,
keepdim: bool = False,
):
dim = _initialize_none_dim(dim, input_tensor.dim())
return torch.sum(torch.square(input_tensor), dim=dim, keepdim=keepdim)
_TORCH_FUNCTION_FROM_ONNX_TYPE = {
'ReduceL1': partial(torch.norm, p=1),
'ReduceL2': partial(torch.norm, p=2),
'ReduceLogSum': _log_sum,
'ReduceLogSumExp': _log_sum_exp,
'ReduceMax': torch.max,
'ReduceMean': torch.mean,
'ReduceMin': torch.min,
'ReduceProd': torch.prod,
'ReduceSum': torch.sum,
'ReduceSumSquare': _sum_square,
}
class OnnxReduceSumDynamicAxes(nn.Module, OnnxToTorchModuleWithCustomExport):
def __init__(self, keepdims: int = 1, noop_with_empty_axes: int = 0):
super().__init__()
self.keepdims = keepdims == 1
self.noop_with_empty_axes = noop_with_empty_axes == 1
def _do_forward(self, input_tensor: torch.Tensor, axes: torch.Tensor) -> torch.Tensor:
if axes is None or axes.nelement() == 0:
if self.noop_with_empty_axes:
return input_tensor
if not self.keepdims:
return torch.sum(input_tensor)
axes = list(range(input_tensor.dim()))
else:
axes = torch.sort(axes).values.tolist()
return torch.sum(input_tensor, dim=axes, keepdim=self.keepdims)
def forward(self, input_tensor: torch.Tensor, axes: Optional[torch.Tensor] = None) -> torch.Tensor:
output = self._do_forward(input_tensor, axes)
if torch.onnx.is_in_onnx_export():
args = [input_tensor]
if axes is not None:
args.append(axes)
return _ReduceSumExportToOnnx.set_output_and_apply(
output,
*args,
int(self.keepdims),
int(self.noop_with_empty_axes),
)
return output
class _ReduceSumExportToOnnx(CustomExportToOnnx): # pylint: disable=abstract-method
@staticmethod
def symbolic(graph: torch_C.Graph, *args) -> torch_C.Value:
*args, keepdims, noop_with_empty_axes = args
return graph.op(
'ReduceSum',
*args,
noop_with_empty_axes_i=noop_with_empty_axes,
keepdims_i=keepdims,
outputs=1,
)
class OnnxReduceSumStaticAxes(nn.Module, OnnxToTorchModule):
def __init__(
self,
axes: List[int],
keepdims: int = 1,
noop_with_empty_axes: int = 0,
):
super().__init__()
if axes is not None:
axes = sorted(axes)
self.keepdims = keepdims == 1
self.noop_with_empty_axes = noop_with_empty_axes == 1
self.axes = axes
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
if self.axes is None or len(self.axes) == 0:
if self.noop_with_empty_axes:
return input_tensor
if not self.keepdims:
return self.math_op_function(input_tensor)
self.axes = list(range(input_tensor.dim()))
return torch.sum(input_tensor, dim=self.axes, keepdim=self.keepdims)
class OnnxReduceStaticAxes(nn.Module, OnnxToTorchModule):
def __init__(
self,
operation_type: str,
axes: List[int],
keepdims: int = 1,
):
super().__init__()
self.operation_type = operation_type
self.math_op_function = _TORCH_FUNCTION_FROM_ONNX_TYPE[operation_type]
if axes is not None:
axes = sorted(axes)
self.keepdims = keepdims == 1
self.axes = axes
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
if self.axes is None or len(self.axes) == 0:
if not self.keepdims:
return self.math_op_function(input_tensor)
self.axes = list(range(input_tensor.dim()))
if self.operation_type not in ['ReduceMax', 'ReduceMin', 'ReduceProd']:
return self.math_op_function(input_tensor, dim=self.axes, keepdim=self.keepdims)
result = input_tensor
for passed_dims, axis in enumerate(self.axes):
result = self.math_op_function(
result,
dim=axis if self.keepdims else axis - passed_dims,
keepdim=self.keepdims,
)
result = _get_element(result, 0)
return result
@add_converter(operation_type='ReduceL1', version=1)
@add_converter(operation_type='ReduceL1', version=11)
@add_converter(operation_type='ReduceL1', version=13)
@add_converter(operation_type='ReduceL2', version=1)
@add_converter(operation_type='ReduceL2', version=11)
@add_converter(operation_type='ReduceL2', version=13)
@add_converter(operation_type='ReduceLogSum', version=1)
@add_converter(operation_type='ReduceLogSum', version=11)
@add_converter(operation_type='ReduceLogSum', version=13)
@add_converter(operation_type='ReduceLogSumExp', version=1)
@add_converter(operation_type='ReduceLogSumExp', version=11)
@add_converter(operation_type='ReduceLogSumExp', version=13)
@add_converter(operation_type='ReduceMax', version=1)
@add_converter(operation_type='ReduceMax', version=11)
@add_converter(operation_type='ReduceMax', version=12)
@add_converter(operation_type='ReduceMax', version=13)
@add_converter(operation_type='ReduceMean', version=1)
@add_converter(operation_type='ReduceMean', version=11)
@add_converter(operation_type='ReduceMean', version=13)
@add_converter(operation_type='ReduceMin', version=1)
@add_converter(operation_type='ReduceMin', version=11)
@add_converter(operation_type='ReduceMin', version=12)
@add_converter(operation_type='ReduceMin', version=13)
@add_converter(operation_type='ReduceProd', version=1)
@add_converter(operation_type='ReduceProd', version=11)
@add_converter(operation_type='ReduceProd', version=13)
@add_converter(operation_type='ReduceSum', version=1)
@add_converter(operation_type='ReduceSum', version=11)
@add_converter(operation_type='ReduceSumSquare', version=1)
@add_converter(operation_type='ReduceSumSquare', version=11)
@add_converter(operation_type='ReduceSumSquare', version=13)
def _(node: OnnxNode, graph: OnnxGraph) -> OperationConverterResult: # pylint: disable=unused-argument
node_attributes = node.attributes
axes = node_attributes.get('axes', None)
keepdims = node_attributes.get('keepdims', 1)
return OperationConverterResult(
torch_module=OnnxReduceStaticAxes(
operation_type=node.operation_type,
axes=axes,
keepdims=keepdims,
),
onnx_mapping=onnx_mapping_from_node(node=node),
)
@add_converter(operation_type='ReduceSum', version=13)
def _(node: OnnxNode, graph: OnnxGraph) -> OperationConverterResult: # pylint: disable=unused-argument
keepdims = node.attributes.get('keepdims', 1)
noop_with_empty_axes = node.attributes.get('noop_with_empty_axes', 0)
if len(node.input_values) == 2:
try:
axes = get_const_value(node.input_values[1], graph)
return OperationConverterResult(
torch_module=OnnxReduceSumStaticAxes(
axes=axes,
keepdims=keepdims,
noop_with_empty_axes=noop_with_empty_axes,
),
onnx_mapping=OnnxMapping(
inputs=(node.input_values[0],),
outputs=node.output_values,
),
)
except KeyError:
pass
return OperationConverterResult(
torch_module=OnnxReduceSumDynamicAxes(keepdims=keepdims, noop_with_empty_axes=noop_with_empty_axes),
onnx_mapping=onnx_mapping_from_node(node),
)
|
StarcoderdataPython
|
1733104
|
<gh_stars>0
from unittest import TestCase
from nose.tools import eq_
from . import fakeinstance
from ..utils import compose_key, decompose_key, translate
_foo_cache = {}
class FakeModelManager(object):
def get(self, **kwargs):
return _foo_cache[kwargs['id']]
class FakeModel(object):
def __init__(self, id_):
self.id = id_
_foo_cache[id_] = self
objects = FakeModelManager()
class TestKeys(TestCase):
def tearDown(self):
_foo_cache.clear()
def test_compose_key(self):
foo = FakeModel(15)
eq_(compose_key(foo), 'fjord.translations.tests.test__utils:FakeModel:15')
def test_decompose_key(self):
foo = FakeModel(15)
key = 'fjord.translations.tests.test__utils:FakeModel:15'
eq_(decompose_key(key), foo)
class TestTranslate(TestCase):
def test_translate_fake(self):
obj = fakeinstance(
fields={'desc': 'trans_desc'},
translate_with=lambda x: 'fake',
desc=u'This is a test string'
)
eq_(getattr(obj, 'trans_desc', None), None)
translate(obj, 'fake', 'br', 'desc', 'en-US', 'trans_desc')
eq_(getattr(obj, 'trans_desc', None), u'THIS IS A TEST STRING')
def test_translate_dennis(self):
obj = fakeinstance(
fields={'desc': 'trans_desc'},
translate_with=lambda x: 'dennis',
desc=u'This is a test string'
)
eq_(getattr(obj, 'trans_desc', None), None)
translate(obj, 'dennis', 'br', 'desc', 'en-US', 'trans_desc')
eq_(getattr(obj, 'trans_desc', None),
u'\xabTHIS IS A TEST STRING\xbb')
|
StarcoderdataPython
|
3215595
|
"""
Periodic Hexagonal
==================
"""
import numpy as np
from ...reactions import GenericReactionFactory
from ..topology_graph import Edge, NullOptimizer
from .cof import Cof
from .vertices import LinearVertex, NonLinearVertex
class PeriodicHexagonal(Cof):
"""
Represents a periodic hexagonal COF topology graph.
Unoptimzed construction
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock('BrCCBr', [stk.BromoFactory()])
bb2 = stk.BuildingBlock(
smiles='Brc1c(Br)c(Br)c(Br)c(Br)c1Br',
functional_groups=[stk.BromoFactory()],
)
cof = stk.ConstructedMolecule(
topology_graph=stk.cof.PeriodicHexagonal(
building_blocks=(bb1, bb2),
lattice_size=(2, 2, 1),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cof.get_atoms(),
cof.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cof.get_bonds()
if all(p == 0 for p in bond.get_periodicity())
),
)
``Collapser(scale_steps=False)`` optimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock('BrCCBr', [stk.BromoFactory()])
bb2 = stk.BuildingBlock(
smiles='Brc1c(Br)c(Br)c(Br)c(Br)c1Br',
functional_groups=[stk.BromoFactory()],
)
cof = stk.ConstructedMolecule(
topology_graph=stk.cof.PeriodicHexagonal(
building_blocks=(bb1, bb2),
lattice_size=(2, 2, 1),
optimizer=stk.Collapser(scale_steps=False),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cof.get_atoms(),
cof.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cof.get_bonds()
if all(p == 0 for p in bond.get_periodicity())
),
)
Building blocks with six and two functional groups are required
for this topology graph.
When using a :class:`dict` for the `building_blocks` parameter,
as in :ref:`cof-topology-graph-examples`:
*Multi-Building Block COF Construction*, a
:class:`.BuildingBlock`, with the following number of functional
groups, needs to be assigned to each of the following vertex ids:
| 6-functional groups: 0 to 3
| 2-functional groups: 4 to 15
Note that optimizers may not optimize the :class:`.PeriodicInfo`.
The documentation of the optimizer will state if it does.
See :class:`.Cof` for more details and examples.
"""
def __init__(
self,
building_blocks,
lattice_size,
vertex_alignments=None,
reaction_factory=GenericReactionFactory(),
num_processes=1,
optimizer=NullOptimizer(),
):
"""
Initialize a :class:`.PeriodicHexagonal` instance.
Parameters
----------
building_blocks : :class:`tuple` or :class:`dict`
Can be a :class:`tuple` of :class:`.BuildingBlock`
instances, which should be placed on the topology graph.
Can also be a :class:`dict` which maps the
:class:`.BuildingBlock` instances to the ids of the
vertices it should be placed on. A :class:`dict` is
required when there are multiple building blocks with the
same number of functional groups, because in this case
the desired placement is ambiguous.
lattice_size : :class:`tuple` of :class:`int`
The size of the lattice in the x, y and z directions.
vertex_alignments : :class:`dict`, optional
A mapping from the id of a :class:`.Vertex`
to an :class:`.Edge` connected to it.
The :class:`.Edge` is used to align the first
:class:`.FunctionalGroup` of a :class:`.BuildingBlock`
placed on that vertex. Only vertices which need to have
their default edge changed need to be present in the
:class:`dict`. If ``None`` then the default edge is used
for each vertex. Changing which :class:`.Edge` is used will
mean that the topology graph represents different
structural isomers. The edge is referred to by a number
between ``0`` (inclusive) and the number of edges the
vertex is connected to (exclusive).
reaction_factory : :class:`.ReactionFactory`, optional
The reaction factory to use for creating bonds between
building blocks.
num_processes : :class:`int`, optional
The number of parallel processes to create during
:meth:`construct`.
optimizer : :class:`.Optimizer`, optional
Used to optimize the structure of the constructed
molecule.
Raises
------
:class:`AssertionError`
If the any building block does not have a
valid number of functional groups.
:class:`ValueError`
If the there are multiple building blocks with the
same number of functional_groups in `building_blocks`,
and they are not explicitly assigned to vertices. The
desired placement of building blocks is ambiguous in
this case.
:class:`~.cof.UnoccupiedVertexError`
If a vertex of the COF topology graph does not have a
building block placed on it.
:class:`~.cof.OverlyOccupiedVertexError`
If a vertex of the COF topology graph has more than one
building block placed on it.
"""
super().__init__(
building_blocks=building_blocks,
lattice_size=lattice_size,
periodic=True,
vertex_alignments=vertex_alignments,
reaction_factory=reaction_factory,
num_processes=num_processes,
optimizer=optimizer,
)
_lattice_constants = _a, _b, _c = (
np.array([1., 0., 0.]),
np.array([0.5, 0.866, 0]),
np.array([0, 0, 5/1.7321])
)
_non_linears = (
NonLinearVertex(0, (1/4)*_a + (1/4)*_b + (1/2)*_c),
NonLinearVertex(1, (1/4)*_a + (3/4)*_b + (1/2)*_c),
NonLinearVertex(2, (3/4)*_a + (1/4)*_b + (1/2)*_c),
NonLinearVertex(3, (3/4)*_a + (3/4)*_b + (1/2)*_c),
)
_vertex_prototypes = (
*_non_linears,
LinearVertex.init_at_center(
id=4,
vertices=(_non_linears[0], _non_linears[1]),
),
LinearVertex.init_at_center(
id=5,
vertices=(_non_linears[0], _non_linears[2]),
),
LinearVertex.init_at_center(
id=6,
vertices=(_non_linears[1], _non_linears[2]),
),
LinearVertex.init_at_center(
id=7,
vertices=(_non_linears[1], _non_linears[3]),
),
LinearVertex.init_at_center(
id=8,
vertices=(_non_linears[2], _non_linears[3]),
),
LinearVertex.init_at_shifted_center(
id=9,
vertices=(_non_linears[0], _non_linears[2]),
cell_shifts=((0, 0, 0), (-1, 0, 0)),
lattice_constants=_lattice_constants,
),
LinearVertex.init_at_shifted_center(
id=10,
vertices=(_non_linears[0], _non_linears[1]),
cell_shifts=((0, 0, 0), (0, -1, 0)),
lattice_constants=_lattice_constants,
),
LinearVertex.init_at_shifted_center(
id=11,
vertices=(_non_linears[0], _non_linears[3]),
cell_shifts=((0, 0, 0), (0, -1, 0)),
lattice_constants=_lattice_constants,
),
LinearVertex.init_at_shifted_center(
id=12,
vertices=(_non_linears[2], _non_linears[1]),
cell_shifts=((0, 0, 0), (1, -1, 0)),
lattice_constants=_lattice_constants,
),
LinearVertex.init_at_shifted_center(
id=13,
vertices=(_non_linears[2], _non_linears[3]),
cell_shifts=((0, 0, 0), (0, -1, 0)),
lattice_constants=_lattice_constants,
),
LinearVertex.init_at_shifted_center(
id=14,
vertices=(_non_linears[1], _non_linears[3]),
cell_shifts=((0, 0, 0), (-1, 0, 0)),
lattice_constants=_lattice_constants,
),
LinearVertex.init_at_shifted_center(
id=15,
vertices=(_non_linears[3], _non_linears[0]),
cell_shifts=((0, 0, 0), (1, 0, 0)),
lattice_constants=_lattice_constants,
)
)
_edge_prototypes = (
Edge(0, _vertex_prototypes[4], _vertex_prototypes[0]),
Edge(1, _vertex_prototypes[4], _vertex_prototypes[1]),
Edge(2, _vertex_prototypes[5], _vertex_prototypes[0]),
Edge(3, _vertex_prototypes[5], _vertex_prototypes[2]),
Edge(4, _vertex_prototypes[6], _vertex_prototypes[1]),
Edge(5, _vertex_prototypes[6], _vertex_prototypes[2]),
Edge(6, _vertex_prototypes[7], _vertex_prototypes[1]),
Edge(7, _vertex_prototypes[7], _vertex_prototypes[3]),
Edge(8, _vertex_prototypes[8], _vertex_prototypes[2]),
Edge(9, _vertex_prototypes[8], _vertex_prototypes[3]),
Edge(10, _vertex_prototypes[9], _vertex_prototypes[0]),
Edge(
id=11,
vertex1=_vertex_prototypes[9],
vertex2=_vertex_prototypes[2],
periodicity=(-1, 0, 0),
),
Edge(12, _vertex_prototypes[10], _vertex_prototypes[0]),
Edge(
id=13,
vertex1=_vertex_prototypes[10],
vertex2=_vertex_prototypes[1],
periodicity=(0, -1, 0),
),
Edge(14, _vertex_prototypes[11], _vertex_prototypes[0]),
Edge(
id=15,
vertex1=_vertex_prototypes[11],
vertex2=_vertex_prototypes[3],
periodicity=(0, -1, 0),
),
Edge(16, _vertex_prototypes[12], _vertex_prototypes[2]),
Edge(
id=17,
vertex1=_vertex_prototypes[12],
vertex2=_vertex_prototypes[1],
periodicity=(1, -1, 0),
),
Edge(18, _vertex_prototypes[13], _vertex_prototypes[2]),
Edge(
id=19,
vertex1=_vertex_prototypes[13],
vertex2=_vertex_prototypes[3],
periodicity=(0, -1, 0),
),
Edge(20, _vertex_prototypes[14], _vertex_prototypes[1]),
Edge(
id=21,
vertex1=_vertex_prototypes[14],
vertex2=_vertex_prototypes[3],
periodicity=(-1, 0, 0),
),
Edge(22, _vertex_prototypes[15], _vertex_prototypes[3]),
Edge(
id=23,
vertex1=_vertex_prototypes[15],
vertex2=_vertex_prototypes[0],
periodicity=(1, 0, 0),
),
)
|
StarcoderdataPython
|
32191
|
<filename>apc/apc/apc_config.py
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 11:30:05 2019
@author: <NAME>
"""
# config.py
import os
from pathlib import Path
from inspect import currentframe, getframeinfo
fname = getframeinfo(currentframe()).filename # current file name
current_dir = Path(fname).resolve().parent
data_dir = Path(fname).resolve().parent.parent.parent/'data'
|
StarcoderdataPython
|
169432
|
<gh_stars>1-10
import pytest
@pytest.fixture(scope="module")
def states():
return ["ny", "tn", "tx", "pa"]
|
StarcoderdataPython
|
75531
|
#coding=utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import getpass
import os
import socket
import numpy as np
from PIL import Image, ImageFilter
import argparse
import time
import sys
#from utils import AverageMeter, calculate_accuracy
import pdb
import math
from dataset.dataset import *
from dataset.preprocess_data import *
from models.model import generate_model
from opts import parse_opts
from utils import *
import pdb
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.learning_rate_scheduler import ReduceLROnPlateau
def resume_params(model, optimizer, opt):
"""
加载模型参数
参数:
model,定义的网络模型
optimizer,网络优化器
opt,配置参数
:return:
如果有之前保存的checkpoint,从之前的checkpoint恢复模型参数
"""
if opt.continue_train and os.path.exists(opt.Flow_resume_path):
print("you now read checkpoint!!!")
checkpoint_list=os.listdir(opt.Flow_resume_path)
max_epoch=0
for checkpoint in checkpoint_list:
if 'model_Flow_' in checkpoint:
max_epoch=max(int(checkpoint.split('_')[2]),max_epoch)
if max_epoch>0:
#从checkpoint读取模型参数和优化器参数
para_dict, opti_dict = fluid.dygraph.load_dygraph(os.path.join(opt.Flow_resume_path,'model_Flow_'+str(max_epoch)+'_saved'))
#设置网络模型参数为读取的模型参数
model.set_dict(para_dict)
#设置优化器参数为读取的优化器参数
optimizer.set_dict(opti_dict)
#更新当前网络的开始迭代次数
opt.begin_epoch=max_epoch+1
def train():
#读取配置文件
opt = parse_opts()
print(opt)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
#
with fluid.dygraph.guard(place = fluid.CUDAPlace(0)):
#训练数据加载器
print("Preprocessing train data ...")
train_data = globals()['{}_test'.format(opt.dataset)](split = opt.split, train = 1, opt = opt)
train_dataloader = paddle.batch(train_data, batch_size=opt.batch_size, drop_last=True)
#训练数据加载器
print("Preprocessing validation data ...")
val_data = globals()['{}_test'.format(opt.dataset)](split = opt.split, train = 2, opt = opt)
val_dataloader = paddle.batch(val_data, batch_size=opt.batch_size, drop_last=True)
#如果使用光流图像进行训练,输入通道数为2
opt.input_channels = 2
#构建网络模型结构
print("Loading Flow model... ", opt.model, opt.model_depth)
model,parameters = generate_model(opt)
print("Initializing the optimizer ...")
if opt.Flow_premodel_path:
opt.weight_decay = 1e-5
opt.learning_rate = 0.001
print("lr = {} \t momentum = {}, \t nesterov = {} \t LR patience = {} "
.format(opt.learning_rate, opt.momentum, opt.nesterov, opt.lr_patience))
#构建优化器
optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=opt.learning_rate,
momentum=opt.momentum, parameter_list=parameters,
use_nesterov=opt.nesterov)
scheduler = ReduceLROnPlateau(opt.learning_rate, mode='min', patience=opt.lr_patience)
if opt.continue_train and opt.Flow_resume_path != '':
resume_params(model, optimizer, opt)
print('run')
losses_avg=np.zeros((1,),dtype=np.float)
for epoch in range(opt.begin_epoch, opt.n_epochs+1):
#设置模型为训练模式,模型中的参数可以被训练优化
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, data in enumerate(train_dataloader()):
#输入视频图像或者光流
inputs = np.array([x[0] for x in data]).astype('float32')
# 输入视频图像或者光流的标签
targets = np.array([x[1] for x in data]).astype('int')
inputs = fluid.dygraph.base.to_variable(inputs)
targets = fluid.dygraph.base.to_variable(targets)
targets.stop_gradient = True
data_time.update(time.time() - end_time)
#计算网络输出结果
outputs = model(inputs)
#计算网络输出和标签的交叉熵损失
loss = fluid.layers.cross_entropy(outputs, targets)
avg_loss = fluid.layers.mean(loss)
#计算网络预测精度
acc = calculate_accuracy(outputs, targets)
losses.update(avg_loss.numpy()[0], inputs.shape[0])
accuracies.update(acc[0], inputs.shape[0])
#反向传播梯度
optimizer.clear_gradients()
avg_loss.backward()
#最小化损失来优化网络中的权重
#print(avg_loss)
#pdb.set_trace()
optimizer.minimize(avg_loss)
batch_time.update(time.time() - end_time)
end_time = time.time()
print('Epoch: [{0}][{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss val:{loss.val:.4f} (avg:{loss.avg:.4f})\t'
'Acc val:{acc.val:.3f} (avg:{acc.avg:.3f})'.format(
epoch,
i + 1,
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
losses_avg[0]=losses.avg
scheduler.step(losses_avg)
if epoch % opt.checkpoint == 0 and epoch != 0:
fluid.dygraph.save_dygraph(model.state_dict(),os.path.join(opt.Flow_resume_path,'model_Flow_'+str(epoch)+'_saved'))
fluid.dygraph.save_dygraph(optimizer.state_dict(), os.path.join(opt.Flow_resume_path,'model_Flow_'+str(epoch)+'_saved'))
#设置模型为验证模式,对验证数据集进行验证
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, data in enumerate(val_dataloader()):
data_time.update(time.time() - end_time)
inputs = np.array([x[0] for x in data]).astype('float32')
targets = np.array([x[1] for x in data]).astype('int')
inputs = fluid.dygraph.base.to_variable(inputs)
targets = fluid.dygraph.base.to_variable(targets)
targets.stop_gradient = True
outputs = model(inputs)
loss = fluid.layers.cross_entropy(outputs, targets)
avg_loss = fluid.layers.mean(loss)
acc = calculate_accuracy(outputs, targets)
losses.update(avg_loss.numpy()[0], inputs.shape[0])
accuracies.update(acc[0], inputs.shape[0])
batch_time.update(time.time() - end_time)
end_time = time.time()
print('Val_Epoch: [{0}][{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch,
i + 1,
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
if __name__=="__main__":
train()
|
StarcoderdataPython
|
1602905
|
"""
Flask-S3-Custom
-------------
Link S3 to your file. Based on flask-s3 by e-dard
"""
from setuptools import setup
setup(
name='Flask-S3-Custom',
version='0.0.1',
url='http://github.com/yuerany/flask-s3-custom',
license='WTFPL',
author='<NAME>',
author_email='<EMAIL>',
description='Link S3 to your file. Based on flask-s3 by e-dard',
long_description=__doc__,
py_modules=['flask_s3_custom'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'Boto>=2.5.2'
],
tests_require=['nose', 'mock'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
StarcoderdataPython
|
147752
|
from typing import Generic, TypeVar
K = TypeVar('K')
V = TypeVar('V')
class MultiMap(Generic[K, V], dict[K, list[V]]):
def add(self, key: K, value: V):
if (values := self.get(key)) is not None:
values.append(value)
else:
self[key] = [value]
|
StarcoderdataPython
|
3374829
|
#!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
# Set the background color.
colors.SetColor("BkgColor", [51, 77, 102, 255])
titles = list()
textMappers = list()
textActors = list()
uGrids = list()
mappers = list()
actors = list()
renderers = list()
uGrids.append(MakeHexagonalPrism())
titles.append('Hexagonal Prism')
uGrids.append(MakeHexahedron())
titles.append('Hexahedron')
uGrids.append(MakePentagonalPrism())
titles.append('Pentagonal Prism')
uGrids.append(MakePolyhedron())
titles.append('Polyhedron')
uGrids.append(MakePyramid())
titles.append('Pyramid')
uGrids.append(MakeTetrahedron())
titles.append('Tetrahedron')
uGrids.append(MakeVoxel())
titles.append('Voxel')
uGrids.append(MakeWedge())
titles.append('Wedge')
renWin = vtk.vtkRenderWindow()
renWin.SetWindowName('Cell3D Demonstration')
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin)
# Create one text property for all
textProperty = vtk.vtkTextProperty()
textProperty.SetFontSize(16)
textProperty.SetJustificationToCentered()
# Create and link the mappers actors and renderers together.
for i in range(0, len(uGrids)):
textMappers.append(vtk.vtkTextMapper())
textActors.append(vtk.vtkActor2D())
mappers.append(vtk.vtkDataSetMapper())
actors.append(vtk.vtkActor())
renderers.append(vtk.vtkRenderer())
mappers[i].SetInputData(uGrids[i])
actors[i].SetMapper(mappers[i])
actors[i].GetProperty().SetColor(
colors.GetColor3d("Seashell"))
renderers[i].AddViewProp(actors[i])
textMappers[i].SetInput(titles[i])
textMappers[i].SetTextProperty(textProperty)
textActors[i].SetMapper(textMappers[i])
textActors[i].SetPosition(120, 16)
renderers[i].AddViewProp(textActors[i])
renWin.AddRenderer(renderers[i])
gridDimensions = 3
rendererSize = 300
renWin.SetSize(rendererSize * gridDimensions,
rendererSize * gridDimensions)
for row in range(0, gridDimensions):
for col in range(0, gridDimensions):
index = row * gridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [
float(col) * rendererSize /
(gridDimensions * rendererSize),
float(gridDimensions - (row + 1)) * rendererSize /
(gridDimensions * rendererSize),
float(col + 1) * rendererSize /
(gridDimensions * rendererSize),
float(gridDimensions - row) * rendererSize /
(gridDimensions * rendererSize)]
if index > len(actors) - 1:
# Add a renderer even if there is no actor.
# This makes the render window background all the same color.
ren = vtk.vtkRenderer()
ren.SetBackground(colors.GetColor3d("BkgColor"))
ren.SetViewport(viewport)
renWin.AddRenderer(ren)
continue
renderers[index].SetViewport(viewport)
renderers[index].SetBackground(colors.GetColor3d("BkgColor"))
renderers[index].ResetCamera()
renderers[index].GetActiveCamera().Azimuth(30)
renderers[index].GetActiveCamera().Elevation(-30)
renderers[index].GetActiveCamera().Zoom(0.85)
renderers[index].ResetCameraClippingRange()
iRen.Initialize()
renWin.Render()
iRen.Start()
def MakeHexagonalPrism():
"""
3D: hexagonal prism: a wedge with an hexagonal base.
Be careful, the base face ordering is different from wedge.
"""
numberOfVertices = 12
points = vtk.vtkPoints()
points.InsertNextPoint(0.0, 0.0, 1.0)
points.InsertNextPoint(1.0, 0.0, 1.0)
points.InsertNextPoint(1.5, 0.5, 1.0)
points.InsertNextPoint(1.0, 1.0, 1.0)
points.InsertNextPoint(0.0, 1.0, 1.0)
points.InsertNextPoint(-0.5, 0.5, 1.0)
points.InsertNextPoint(0.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 0.0, 0.0)
points.InsertNextPoint(1.5, 0.5, 0.0)
points.InsertNextPoint(1.0, 1.0, 0.0)
points.InsertNextPoint(0.0, 1.0, 0.0)
points.InsertNextPoint(-0.5, 0.5, 0.0)
hexagonalPrism = vtk.vtkHexagonalPrism()
for i in range(0, numberOfVertices):
hexagonalPrism.GetPointIds().SetId(i, i)
ug = vtk.vtkUnstructuredGrid()
ug.InsertNextCell(hexagonalPrism.GetCellType(),
hexagonalPrism.GetPointIds())
ug.SetPoints(points)
return ug
def MakeHexahedron():
"""
A regular hexagon (cube) with all faces square and three squares around
each vertex is created below.
Setup the coordinates of eight points
(the two faces must be in counter clockwise
order as viewed from the outside).
As an exercise you can modify the coordinates of the points to create
seven topologically distinct convex hexahedras.
"""
numberOfVertices = 8
# Create the points
points = vtk.vtkPoints()
points.InsertNextPoint(0.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 1.0, 0.0)
points.InsertNextPoint(0.0, 1.0, 0.0)
points.InsertNextPoint(0.0, 0.0, 1.0)
points.InsertNextPoint(1.0, 0.0, 1.0)
points.InsertNextPoint(1.0, 1.0, 1.0)
points.InsertNextPoint(0.0, 1.0, 1.0)
# Create a hexahedron from the points
hex_ = vtk.vtkHexahedron()
for i in range(0, numberOfVertices):
hex_.GetPointIds().SetId(i, i)
# Add the points and hexahedron to an unstructured grid
uGrid = vtk.vtkUnstructuredGrid()
uGrid.SetPoints(points)
uGrid.InsertNextCell(hex_.GetCellType(), hex_.GetPointIds())
return uGrid
def MakePentagonalPrism():
numberOfVertices = 10
# Create the points
points = vtk.vtkPoints()
points.InsertNextPoint(11, 10, 10)
points.InsertNextPoint(13, 10, 10)
points.InsertNextPoint(14, 12, 10)
points.InsertNextPoint(12, 14, 10)
points.InsertNextPoint(10, 12, 10)
points.InsertNextPoint(11, 10, 14)
points.InsertNextPoint(13, 10, 14)
points.InsertNextPoint(14, 12, 14)
points.InsertNextPoint(12, 14, 14)
points.InsertNextPoint(10, 12, 14)
# Pentagonal Prism
pentagonalPrism = vtk.vtkPentagonalPrism()
for i in range(0, numberOfVertices):
pentagonalPrism.GetPointIds().SetId(i, i)
# Add the points and hexahedron to an unstructured grid
uGrid = vtk.vtkUnstructuredGrid()
uGrid.SetPoints(points)
uGrid.InsertNextCell(pentagonalPrism.GetCellType(),
pentagonalPrism.GetPointIds())
return uGrid
def MakePolyhedron():
"""
Make a regular dodecahedron. It consists of twelve regular pentagonal
faces with three faces meeting at each vertex.
"""
# numberOfVertices = 20
numberOfFaces = 12
# numberOfFaceVertices = 5
points = vtk.vtkPoints()
points.InsertNextPoint(1.21412, 0, 1.58931)
points.InsertNextPoint(0.375185, 1.1547, 1.58931)
points.InsertNextPoint(-0.982247, 0.713644, 1.58931)
points.InsertNextPoint(-0.982247, -0.713644, 1.58931)
points.InsertNextPoint(0.375185, -1.1547, 1.58931)
points.InsertNextPoint(1.96449, 0, 0.375185)
points.InsertNextPoint(0.607062, 1.86835, 0.375185)
points.InsertNextPoint(-1.58931, 1.1547, 0.375185)
points.InsertNextPoint(-1.58931, -1.1547, 0.375185)
points.InsertNextPoint(0.607062, -1.86835, 0.375185)
points.InsertNextPoint(1.58931, 1.1547, -0.375185)
points.InsertNextPoint(-0.607062, 1.86835, -0.375185)
points.InsertNextPoint(-1.96449, 0, -0.375185)
points.InsertNextPoint(-0.607062, -1.86835, -0.375185)
points.InsertNextPoint(1.58931, -1.1547, -0.375185)
points.InsertNextPoint(0.982247, 0.713644, -1.58931)
points.InsertNextPoint(-0.375185, 1.1547, -1.58931)
points.InsertNextPoint(-1.21412, 0, -1.58931)
points.InsertNextPoint(-0.375185, -1.1547, -1.58931)
points.InsertNextPoint(0.982247, -0.713644, -1.58931)
# Dimensions are [numberOfFaces][numberOfFaceVertices]
dodechedronFace = [
[0, 1, 2, 3, 4],
[0, 5, 10, 6, 1],
[1, 6, 11, 7, 2],
[2, 7, 12, 8, 3],
[3, 8, 13, 9, 4],
[4, 9, 14, 5, 0],
[15, 10, 5, 14, 19],
[16, 11, 6, 10, 15],
[17, 12, 7, 11, 16],
[18, 13, 8, 12, 17],
[19, 14, 9, 13, 18],
[19, 18, 17, 16, 15]
]
dodechedronFacesIdList = vtk.vtkIdList()
# Number faces that make up the cell.
dodechedronFacesIdList.InsertNextId(numberOfFaces)
for face in dodechedronFace:
# Number of points in the face == numberOfFaceVertices
dodechedronFacesIdList.InsertNextId(len(face))
# Insert the pointIds for that face.
[dodechedronFacesIdList.InsertNextId(i) for i in face]
uGrid = vtk.vtkUnstructuredGrid()
uGrid.InsertNextCell(vtk.VTK_POLYHEDRON, dodechedronFacesIdList)
uGrid.SetPoints(points)
return uGrid
def MakePyramid():
"""
Make a regular square pyramid.
"""
numberOfVertices = 5
points = vtk.vtkPoints()
p = [
[1.0, 1.0, 0.0],
[-1.0, 1.0, 0.0],
[-1.0, -1.0, 0.0],
[1.0, -1.0, 0.0],
[0.0, 0.0, 1.0]
]
for pt in p:
points.InsertNextPoint(pt)
pyramid = vtk.vtkPyramid()
for i in range(0, numberOfVertices):
pyramid.GetPointIds().SetId(i, i)
ug = vtk.vtkUnstructuredGrid()
ug.SetPoints(points)
ug.InsertNextCell(pyramid.GetCellType(), pyramid.GetPointIds())
return ug
def MakeTetrahedron():
"""
Make a tetrahedron.
"""
numberOfVertices = 4
points = vtk.vtkPoints()
points.InsertNextPoint(0, 0, 0)
points.InsertNextPoint(1, 0, 0)
points.InsertNextPoint(1, 1, 0)
points.InsertNextPoint(0, 1, 1)
tetra = vtk.vtkTetra()
for i in range(0, numberOfVertices):
tetra.GetPointIds().SetId(i, i)
cellArray = vtk.vtkCellArray()
cellArray.InsertNextCell(tetra)
unstructuredGrid = vtk.vtkUnstructuredGrid()
unstructuredGrid.SetPoints(points)
unstructuredGrid.SetCells(vtk.VTK_TETRA, cellArray)
return unstructuredGrid
def MakeVoxel():
"""
A voxel is a representation of a regular grid in 3-D space.
"""
numberOfVertices = 8
points = vtk.vtkPoints()
points.InsertNextPoint(0, 0, 0)
points.InsertNextPoint(1, 0, 0)
points.InsertNextPoint(0, 1, 0)
points.InsertNextPoint(1, 1, 0)
points.InsertNextPoint(0, 0, 1)
points.InsertNextPoint(1, 0, 1)
points.InsertNextPoint(0, 1, 1)
points.InsertNextPoint(1, 1, 1)
voxel = vtk.vtkVoxel()
for i in range(0, numberOfVertices):
voxel.GetPointIds().SetId(i, i)
ug = vtk.vtkUnstructuredGrid()
ug.SetPoints(points)
ug.InsertNextCell(voxel.GetCellType(), voxel.GetPointIds())
return ug
def MakeWedge():
"""
A wedge consists of two triangular ends and three rectangular faces.
"""
numberOfVertices = 6
points = vtk.vtkPoints()
points.InsertNextPoint(0, 1, 0)
points.InsertNextPoint(0, 0, 0)
points.InsertNextPoint(0, .5, .5)
points.InsertNextPoint(1, 1, 0)
points.InsertNextPoint(1, 0.0, 0.0)
points.InsertNextPoint(1, .5, .5)
wedge = vtk.vtkWedge()
for i in range(0, numberOfVertices):
wedge.GetPointIds().SetId(i, i)
ug = vtk.vtkUnstructuredGrid()
ug.SetPoints(points)
ug.InsertNextCell(wedge.GetCellType(), wedge.GetPointIds())
return ug
def WritePNG(renWin, fn, magnification=1):
"""
Screenshot
Write out a png corresponding to the render window.
:param: renWin - the render window.
:param: fn - the file name.
:param: magnification - the magnification.
"""
windowToImageFilter = vtk.vtkWindowToImageFilter()
windowToImageFilter.SetInput(renWin)
windowToImageFilter.SetMagnification(magnification)
# Record the alpha (transparency) channel
# windowToImageFilter.SetInputBufferTypeToRGBA()
windowToImageFilter.SetInputBufferTypeToRGB()
# Read from the back buffer
windowToImageFilter.ReadFrontBufferOff()
windowToImageFilter.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(fn)
writer.SetInputConnection(windowToImageFilter.GetOutputPort())
writer.Write()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3349093
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
:py:class: Register modules for import
"""
import importlib
import logging
import os
class Register():
"""Register"""
def __init__(self, registry_name):
self._dict = {}
self._name = registry_name
def __setitem__(self, key, value):
if not callable(value):
raise Exception("Value of a Registry must be a callable.")
if key is None:
key = value.__name__
self._dict[key] = value
def register(self, param):
"""Decorator to register a function or class."""
def decorator(key, value):
"""decorator"""
self[key] = value
return value
if callable(param):
# @reg.register
return decorator(None, param)
# @reg.register('alias')
return lambda x: decorator(param, x)
def __getitem__(self, key):
try:
return self._dict[key]
except Exception as e:
logging.error("module {key} not found: {e}")
raise e
def __contains__(self, key):
return key in self._dict
def keys(self):
"""key"""
return self._dict.keys()
class RegisterSet():
"""RegisterSet"""
field_reader = Register("field_reader")
data_set_reader = Register("data_set_reader")
models = Register("models")
tokenizer = Register("tokenizer")
trainer = Register("trainer")
package_names = ['src.data.field_reader', 'src.data.data_set_reader', 'src.data.tokenizer',
'src.models', 'src.training']
ALL_MODULES = []
for package_name in package_names:
module_dir = os.path.join(
os.path.abspath(
os.path.dirname(__file__)),
"../../" +
package_name.replace(
".",
'/'))
module_files = []
for file in os.listdir(module_dir):
if os.path.isfile(os.path.join(module_dir, file)
) and file.endswith(".py"):
module_files.append(file.replace(".py", ""))
ALL_MODULES.append((package_name, module_files))
def import_modules():
"""import modules needed
:return:
"""
for base_dir, modules in RegisterSet.ALL_MODULES:
for name in modules:
try:
if base_dir != "":
full_name = base_dir + "." + name
else:
full_name = name
importlib.import_module(full_name)
except ImportError:
logging.error("error in import modules")
|
StarcoderdataPython
|
3241012
|
<reponame>rafaelclemes81/Python
"""043 - DESENVOLVA UMA LÓGICA QUE LEIA O PESO E A ALTURA DE UMA PESSOA, CALCULO SEU IMC E MOSTRE SEU STATUS,
DE ACORDO COM A TABELA ABAIXO:
- ABAIXO DE 18,5: ABAIXO DO PESO
- ENTRE 18,5 E 25: PESO IDEAL
- ENTRE 25 E 30: SOBREPESO
- ENTRE 30 E 40: OBESIDADE MORBIDA"""
print('-' * 20, 'DESAFIO 043', '-' * 20)
p = float(input('Informe seu peso: '))
a = float(input('Informe sua altura: '))
imc = p / (a ** 2)
if imc < 18.5:
print('Seu IMC é de {:.2f}kg/m e você está abaixo do peso.'.format(imc))
elif imc <= 25:
print('Seu IMC é de {:.2f}kg/m e você está no peso ideal.'.format(imc))
elif imc <= 30:
print('Seu IMC é de {:.2f}kg/m e você está com sobrepeso.'.format(imc))
else:
print('Seu IMC é {:.2f}kg/m e você está com obesidade morbida.'.format(imc))
|
StarcoderdataPython
|
3319453
|
import tensorflow as tf
import numpy as np
hparams = tf.contrib.training.HParams(
#####################################
# Audio config
#####################################
sample_rate=22050,
silence_threshold=2,
num_mels=80,
fmin=125,
fmax=7600,
fft_size=1024,
win_length=1024,
hop_length=256,
min_level_db=-100,
ref_level_db=20,
rescaling=True,
rescaling_max=0.999,
audio_max_value=32767,
allow_clipping_in_normalization=True,
#####################################
# Data config
#####################################
seg_len= 81 * 256,
file_list="training_data/files.txt",
spec_len= 81,
#####################################
# Model parameters
#####################################
n_heads = 2,
pre_residuals = 4,
up_residuals=0,
post_residuals = 12,
pre_conv_channels = [1, 1, 2],
layer_channels = [1025 * 2, 1024, 512, 256, 128, 64, 32, 16, 8],
#####################################
# Training config
#####################################
n_workers=2,
seed=12345,
batch_size=40,
lr=1.0 * 1e-3,
weight_decay=1e-5,
epochs=50000,
grad_clip_thresh=5.0,
checkpoint_interval=1000,
)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values)]
return 'Hyperparameters:\n' + '\n'.join(hp)
|
StarcoderdataPython
|
3287513
|
# -*- coding: utf-8 -*-
"""URLs to manipulate workflows, attributes, columns and shared."""
from django.urls import path, re_path
from rest_framework.urlpatterns import format_suffix_patterns
from ontask.workflow import api, views
app_name = 'workflow'
urlpatterns = [
path('create/', views.WorkflowCreateView.as_view(), name='create'),
path('<int:wid>/clone/', views.clone_workflow, name='clone'),
path('<int:wid>/update/', views.update, name='update'),
path('<int:wid>/delete/', views.delete, name='delete'),
path('<int:wid>/flush/', views.flush, name='flush'),
path('<int:wid>/star/', views.star, name='star'),
path('detail/', views.detail, name='detail'),
path('operations/', views.operations, name='operations'),
# Column table manipulation
path('column_ss/', views.column_ss, name='column_ss'),
# Import Export
path(
'<int:wid>/export_ask/',
views.export_ask,
name='export_ask'),
path('export/', views.export, name='export_empty'),
re_path(
r'(?P<page_data>\d+((,\d+)*))/export/',
views.export,
name='export'),
path('import/', views.import_workflow, name='import'),
# Attributes
path(
'attribute_create/',
views.attribute_create,
name='attribute_create'),
path(
'<int:pk>/attribute_edit/',
views.attribute_edit,
name='attribute_edit'),
path(
'<int:pk>/attribute_delete/',
views.attribute_delete,
name='attribute_delete'),
# Sharing
path('share_create/', views.share_create, name='share_create'),
path(
'<int:pk>/share_delete/',
views.share_delete,
name='share_delete'),
# Assign learner user email column
path(
'assign_luser_column/',
views.assign_luser_column,
name='assign_luser_column'),
path(
'<int:pk>/assign_luser_column/',
views.assign_luser_column,
name='assign_luser_column'),
# Column manipulation
path('column_add/', views.column_add, name='column_add'),
path('<int:pk>/question_add/', views.column_add, name='question_add'),
path(
'formula_column_add',
views.formula_column_add,
name='formula_column_add'),
path(
'random_column_add/',
views.random_column_add,
name='random_column_add'),
path(
'<int:pk>/column_delete/',
views.column_delete,
name='column_delete'),
path('<int:pk>/column_edit/', views.column_edit, name='column_edit'),
path(
'<int:pk>/question_edit/',
views.column_edit,
name='question_edit'),
path(
'<int:pk>/column_clone/',
views.column_clone,
name='column_clone'),
# Column movement
path('column_move/', views.column_move, name='column_move'),
path(
'<int:pk>/column_move_top/',
views.column_move_top,
name='column_move_top'),
path(
'<int:pk>/column_move_bottom/',
views.column_move_bottom,
name='column_move_bottom'),
path(
'<int:pk>/column_restrict/',
views.column_restrict_values,
name='column_restrict'),
# API
# Listing and creating workflows
path(
'workflows/',
api.WorkflowAPIListCreate.as_view(),
name='api_workflows'),
# Get, update content or destroy workflows
path(
'<int:pk>/rud/',
api.WorkflowAPIRetrieveUpdateDestroy.as_view(),
name='api_rud'),
# Manage workflow locks (get, set (post, put), unset (delete))
path(
'<int:pk>/lock/',
api.WorkflowAPILock.as_view(),
name='api_lock'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
StarcoderdataPython
|
1749141
|
#!/usr/bin/env python
from swagger_ui_bundle import swagger_ui_path
from flask import Flask, Blueprint, send_from_directory, render_template
swagger_bp = Blueprint(
'swagger_ui',
__name__,
static_url_path='',
static_folder=swagger_ui_path,
template_folder=swagger_ui_path
)
SWAGGER_UI_CONFIG = {
"openapi_spec_url": "https://petstore.swagger.io/v2/swagger.json"
}
@swagger_bp.route('/')
def swagger_ui_index():
return render_template('index.j2', **SWAGGER_UI_CONFIG)
app = Flask(__name__, static_url_path='')
app.register_blueprint(swagger_bp, url_prefix='/ui')
if __name__ == "__main__":
app.run()
|
StarcoderdataPython
|
4809729
|
# ----------------------------------------------------------------------------
# color_wheel.py
#
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
# 2020-12-20, v1
# ----------------------------------------------------------------------------
def getColorFromWheel(iWheel):
""" Get an RGB color from a wheel-like color representation
"""
iWheel = iWheel % 255
if iWheel < 85:
return (255 -iWheel*3, 0, iWheel*3)
elif iWheel < 170:
iWheel -= 85
return (0, iWheel*3, 255 -iWheel*3)
else:
iWheel -= 170
return (iWheel*3, 255 -iWheel*3, 0)
# ----------------------------------------------------------------------------
|
StarcoderdataPython
|
3398569
|
"""The tests for Netatmo webhook events."""
from homeassistant.components.netatmo.const import DATA_DEVICE_IDS, DATA_PERSONS
from homeassistant.components.netatmo.webhook import async_handle_webhook
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.aiohttp import MockRequest
async def test_webhook(hass):
"""Test that webhook events are processed."""
webhook_called = False
async def handle_event(_):
nonlocal webhook_called
webhook_called = True
response = (
b'{"user_id": "123", "user": {"id": "123", "email": "<EMAIL>"},'
b'"push_type": "webhook_activation"}'
)
request = MockRequest(content=response, mock_source="test")
async_dispatcher_connect(
hass,
"signal-netatmo-webhook-None",
handle_event,
)
await async_handle_webhook(hass, "webhook_id", request)
await hass.async_block_till_done()
assert webhook_called
async def test_webhook_error_in_data(hass):
"""Test that errors in webhook data are handled."""
webhook_called = False
async def handle_event(_):
nonlocal webhook_called
webhook_called = True
response = b'""webhook_activation"}'
request = MockRequest(content=response, mock_source="test")
async_dispatcher_connect(
hass,
"signal-netatmo-webhook-None",
handle_event,
)
await async_handle_webhook(hass, "webhook_id", request)
await hass.async_block_till_done()
assert not webhook_called
async def test_webhook_climate_event(hass):
"""Test that climate events are handled."""
webhook_called = False
async def handle_event(_):
nonlocal webhook_called
webhook_called = True
response = (
b'{"user_id": "123", "user": {"id": "123", "email": "<EMAIL>"},'
b'"home_id": "456", "event_type": "therm_mode",'
b'"home": {"id": "456", "therm_mode": "away"},'
b'"mode": "away", "previous_mode": "schedule", "push_type": "home_event_changed"}'
)
request = MockRequest(content=response, mock_source="test")
hass.data["netatmo"] = {
DATA_DEVICE_IDS: {},
}
async_dispatcher_connect(
hass,
"signal-netatmo-webhook-therm_mode",
handle_event,
)
await async_handle_webhook(hass, "webhook_id", request)
await hass.async_block_till_done()
assert webhook_called
async def test_webhook_person_event(hass):
"""Test that person events are handled."""
webhook_called = False
async def handle_event(_):
nonlocal webhook_called
webhook_called = True
response = (
b'{"user_id": "5c81004xxxxxxxxxx45f4",'
b'"persons": [{"id": "e2bf7xxxxxxxxxxxxea3", "face_id": "5d66xxxxxx9b9",'
b'"face_key": "<KEY>", "is_known": true,'
b'"face_url": "https://netatmocameraimage.blob.core.windows.net/production/5xxx"}],'
b'"snapshot_id": "5d19bae867368a59e81cca89", "snapshot_key": "<KEY>",'
b'"snapshot_url": "https://netatmocameraimage.blob.core.windows.net/production/5xxxx",'
b'"event_type": "person", "camera_id": "70:xxxxxx:a7", "device_id": "70:xxxxxx:a7",'
b'"home_id": "5c5dxxxxxxxd594", "home_name": "<NAME>.",'
b'"event_id": "5d19bxxxxxxxxcca88",'
b'"message": "Boulogne Billan.: Benoit has been seen by Indoor Camera ",'
b'"push_type": "NACamera-person"}'
)
request = MockRequest(content=response, mock_source="test")
hass.data["netatmo"] = {
DATA_DEVICE_IDS: {},
DATA_PERSONS: {},
}
async_dispatcher_connect(
hass,
"signal-netatmo-webhook-person",
handle_event,
)
await async_handle_webhook(hass, "webhook_id", request)
await hass.async_block_till_done()
assert webhook_called
|
StarcoderdataPython
|
4831015
|
<reponame>theslytherin/Invisibility-cloak---Harry-Potter<gh_stars>0
from .hough_circle import HoughCircleDetector
|
StarcoderdataPython
|
3221235
|
<reponame>JWKennington/pynstein<gh_stars>0
"""Unittests for pystein.symbolic.constants module"""
# pylint: disable=protected-access
from pystein import symbols
class TestSymbols:
"""Test Symbols"""
def test_numeric_symbol(self):
"""Test numeric symbol"""
a = symbols.numeric_symbol('a')
assert str(a) == 'a'
assert a._assumptions == symbols.DEFAULT_NUMERIC_ASSUMPTIONS
def test_coordinate_symbol(self):
"""Test values"""
assert str(symbols.t) == "t"
assert str(symbols.x) == "x"
assert str(symbols.y) == "y"
assert str(symbols.z) == "z"
assert str(symbols.r) == "r"
assert str(symbols.theta) == r"\theta"
assert str(symbols.phi) == r"\varphi"
def test_curvature_symbol(self):
"""Test values"""
assert str(symbols.k) == "k"
def test_matter_symbol(self):
"""Test values"""
assert str(symbols.rho) == r'\rho'
assert str(symbols.p) == 'p'
|
StarcoderdataPython
|
54614
|
<reponame>caedonhsieh/ps-munna
from setuptools import setup
with open('README.md') as file:
long_description = file.read()
# TODO - replace with details of your project
setup(
name='munna',
description='Pokemon Showdown MatchUp Neural Network Analysis',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/caedonhsieh/ps-munna',
install_requires=['pytorch-lightning'],
packages=['munna'],
package_data={'munna': ['assets/*']},
long_description=long_description,
long_description_content_type='text/markdown',
keywords=[],
classifiers=['License :: OSI Approved :: MIT License'],
license='MIT')
|
StarcoderdataPython
|
1772241
|
<filename>dev/05_18_2018/VFD_Controller.py
# Universal Power Supply Controller
# USAID Middle East Water Security Initiative
#
# Developed by: <NAME>
# Primary Investigator: <NAME>
#
# Version History (mm_dd_yyyy)
# 1.00 03_24_2018_NW
#
######################################################
# Import Libraries
import Parameters
import time
from VFD_Modbus_Wrapper import *
from VFD_Modbus_Registers import *
from PWM_Wrapper import *
from TransferSwitch import *
def VFD_Controller(SCIP_Power,SCIP_Freq):
# Measure Solar voltage
# Measure solar current
# Calculate solar power
if (P_Solar > 500):
Transfer_Switch(0)
time.sleep(1)
if (P_Solar >= SCIP_Power):
P_VFD = SCIP_Power
else:
P_VFD = P_Solar
if ((P_VFD/Parameters.P_Solar_Max)*Parameters.Theta_Max)<=SCIP_Freq:
Freq_VFD = ((P_VFD/Parameters.P_Solar_Max)*Parameters.Theta_Max)
else:
Freq_VFD = SCIP_Freq
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Motor_Start_Stop"), 1)
sleep.time(2)
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Frequency_Set"), Freq_VFD*100)
elif (P_Solar <= 500):
TransferSwitch(1)
time.sleep(5)
if (P_Solar >= SCIP_Power):
P_VFD = SCIP_Power
else:
P_VFD = P_Solar
if ((P_VFD / Parameters.P_Solar_Max) * Parameters.Theta_Max) <= SCIP_Freq:
Freq_VFD = ((P_VFD / Parameters.P_Solar_Max) * Parameters.Theta_Max)
else:
Freq_VFD = SCIP_Freq
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Motor_Start_Stop"), 1)
sleep.time(2)
VFD.VFDWrite(reg.get("WriteFunc", {}).get("Frequency_Set"), Freq_VFD * 100)
else:
UPS_Error('Error_VFD_Power')
|
StarcoderdataPython
|
1740178
|
<filename>gammafit/tests/test_utils.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.tests.helper import pytest
from astropy.utils.data import get_pkg_data_filename
import astropy.units as u
from ..utils import validate_data_table, sed_conversion, generate_energy_edges, build_data_table
from astropy.io import ascii
# Read data
fname = get_pkg_data_filename('data/CrabNebula_HESS_ipac.dat')
data_table = ascii.read(fname)
# Read spectrum with symmetric flux errors
fname_sym = get_pkg_data_filename('data/CrabNebula_HESS_ipac_symmetric.dat')
data_table_sym = ascii.read(fname_sym)
def test_validate_data_types():
data_table2 = data_table.copy()
data_table2['energy'].unit=''
with pytest.raises(TypeError):
data = validate_data_table(data_table2)
def test_validate_missing_column():
data_table2 = data_table.copy()
data_table2.remove_column('energy')
with pytest.raises(TypeError):
data = validate_data_table(data_table2)
def test_validate_string_uls():
from astropy.table import Column
data_table2 = data_table.copy()
# replace uls column with valid strings
data_table2.remove_column('ul')
data_table2.add_column(Column(name='ul',dtype=str, data=['False',]*len(data_table2)))
data_table2['ul'][1] = 'True'
data = validate_data_table(data_table2)
# put an invalid value
data_table2['ul'][2] = 'invalid'
with pytest.raises(TypeError):
data = validate_data_table(data_table2)
def test_validate_cl():
data_table2 = data_table.copy()
# use invalid value
data_table2.meta['keywords']['cl']['value']='test'
with pytest.raises(TypeError):
data = validate_data_table(data_table2)
# remove cl
data_table2.meta['keywords'].pop('cl')
data = validate_data_table(data_table2)
assert data['cl'] == 0.9
def test_build_data_table():
ene = np.logspace(-2,2,20) * u.TeV
flux = (ene/(1*u.TeV))**-2 * u.Unit('1/(cm2 s TeV)')
flux_error_hi = 0.2 * flux
flux_error_lo = 0.1 * flux
ul = np.zeros(len(ene))
ul[0] = 1
dene = generate_energy_edges(ene)
table = build_data_table(ene, flux, flux_error_hi=flux_error_hi, flux_error_lo=flux_error_lo, ul=ul)
table = build_data_table(ene, flux, flux_error_hi=flux_error_hi, flux_error_lo=flux_error_lo, ul=ul, cl=0.99)
table = build_data_table(ene, flux, flux_error=flux_error_hi, ene_width=dene[0])
table = build_data_table(ene, flux, flux_error=flux_error_hi, ene_lo=(ene-dene[0]), ene_hi=(ene+dene[1]))
# no flux_error
with pytest.raises(TypeError):
table = build_data_table(ene, flux)
# errors in energy physical type validation
with pytest.raises(TypeError):
table = build_data_table(ene.value, flux, flux_error=flux_error_hi)
with pytest.raises(TypeError):
table = build_data_table(ene.value*u.Unit('erg/(cm2 s)'), flux, flux_error=flux_error_hi)
|
StarcoderdataPython
|
4842107
|
import pytest
from app_data.selectors.amazon import NEXT_BUTTON
from helpers import dom
from helpers.amazon import do_search, verify_search_result_summary
URL = {
'link': 'https://www.amazon.com/',
'title': 'Amazon.com: Online Shopping for Electronics, Apparel, Computers, Books, DVDs & more'
}
@pytest.mark.smoke
@pytest.mark.usefixtures("open_url")
@pytest.mark.parametrize("search_term", ("gardening tools", "plush animals", "pots",))
def test_amazon_search_summary(selenium, search_term):
"""
This test validates the expected summary of a search is shown on the first and second search results page.
Search terms used are defined in the parameterize pytest marker above.
"""
# search for results
do_search(selenium, search_term)
# verify results shown for search
verify_search_result_summary(selenium, low=1, high=48, expected_search_term=search_term)
dom.click_element(selenium, NEXT_BUTTON)
verify_search_result_summary(selenium, low=49, high=96, expected_search_term=search_term)
|
StarcoderdataPython
|
3356029
|
# <NAME> @ipvs
# 180921
from __future__ import print_function
import h5py
import os
import numpy as np
# Create a dataset with the first dim is expandable
def create_appendable_dset(fname,dset_name,data):
dshape = data.shape
maxshape = [None]*len(dshape)
for i in range(1,len(dshape)):
maxshape[i] = dshape[i]
with h5py.File(fname,'a') as f:
dset = f.create_dataset(dset_name,dshape,maxshape=maxshape,
dtype=data.dtype,chunks=True)
dset[:] = data
# write data into h5 dataset in an appendable way
# data should have the following form: B x D0 x D1 x ...
# The first dim is expandable dim, B is the number of new data will be
# written
def append_data(fname, dset_name, data):
avail = True
if not os.path.exists(fname):
avail = False
else:
with h5py.File(fname,'r') as f:
if not dset_name in f:
avail = False
if not avail:
create_appendable_dset(fname,dset_name,data)
else:
with h5py.File(fname,'a') as f:
dset = f[dset_name]
dset.resize(dset.shape[0]+data.shape[0],axis=0)
dset[-data.shape[0]:,...] = data
|
StarcoderdataPython
|
150035
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: deeplab_saved_model copy.py
# Created Date: 2019-09-23
# Author: jingxiaofei
# Contact: <<EMAIL>>
#
# Last Modified: Thursday September 26th 2019 10:20:55 pm
#
# Copyright (c) 2019 KKWorld
# It is never too late to be what you might have been.
# -----
# HISTORY:
# Date By Comments
# ---------- --- ----------------------------------------------------------
###
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import argparse
import sys
import shutil
sys.path.insert(0, "../")
import network
import utils
import json
import ipdb
slim = tf.contrib.slim
tf.flags.DEFINE_string('checkpoint_dir', 'tboard_logs/16645/train', 'checkpoints directory path')
tf.flags.DEFINE_integer('image_size', '128', 'image size, default: 256')
tf.flags.DEFINE_string('serve_path', 'serve/1', 'path to save serve model')
tf.flags.DEFINE_string('pb_path', 'model_v1.pb', 'path to save serve model')
tf.app.flags.DEFINE_integer('model_version', 1, 'Models version number.')
tf.app.flags.DEFINE_string('work_dir', './tboard_logs', 'Working directory.')
tf.app.flags.DEFINE_integer('model_id', 16645, 'Model id name to be loaded.')
tf.app.flags.DEFINE_string('export_model_dir', "./versions", 'Directory where the model exported files should be placed.')
FLAGS = tf.flags.FLAGS
# best: 16645
model_name = str(FLAGS.model_id)
log_folder = FLAGS.work_dir
pre_trained_model_dir = os.path.join(log_folder, model_name, "train")
with open(log_folder + '/' + model_name + '/train/data.json', 'r') as fp:
args = json.load(fp)
class Dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
args = Dotdict(args)
graph = tf.Graph()
# Here, we instantiate a Model and inject our first layer.
with graph.as_default():
# Create placeholder for image bitstring
# This is the injection of the input bitstring layer
input_bytes = tf.placeholder(tf.string, shape=[], name="input_bytes")
# Next, we preprocess the bitstring to a tensor batch so it can be used in the model.
with graph.as_default():
input_bytes = tf.reshape(input_bytes, [])
# Transform bitstring to uint8 tensor
input_tensor = tf.image.decode_png(input_bytes, channels=3)
input_tensor = tf.cast(input_tensor, tf.float32)
# Model's inference function accepts a batch of images
# So expand the single tensor into a batch of 1
input_tensor = tf.expand_dims(input_tensor, 0)
# Resize the input tensor to a tertain size
#input_tensor = tf.image.resize_bilinear(
# input_tensor, [FLAGS.image_size, FLAGS.image_size], align_corners=False)
# Then, we feed the tensor to the model and save its output.
with graph.as_default():
# Get model predictions
logits_tf = network.deeplab_v3(input_tensor, args, is_training=False, reuse=False)
# extract the segmentation classes, each value refer to a class
predictions_tf = tf.argmax(logits_tf, axis=3) # int64
with graph.as_default():
# Cast the output to uint8
output_tensor = tf.cast(predictions_tf, tf.uint8)
# Remove the batch dimension
output_tensor = tf.squeeze(output_tensor, 0)
## Stack the tensor to (?, ?, 3) for image encoding
#output_tensor = tf.stack([output_tensor,output_tensor,output_tensor], 2)
output_tensor = tf.expand_dims(output_tensor, -1)
# Transform uint8 tensor to bitstring
output_bytes = tf.image.encode_png(output_tensor)
output_bytes = tf.identity(output_bytes, name="output_bytes")
# Instantiate a Saver
saver = tf.train.Saver()
# Now that we have injected the bitstring layers into our model, we will load our train checkpoints and save the graph as a ProtoBuf.
# Start a TensorFlow session
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
pre_trained_model_dir = os.path.join(log_folder, model_name, "train")
# Access variables and weights from last checkpoint
latest_ckpt = tf.train.latest_checkpoint(pre_trained_model_dir)
saver.restore(sess, latest_ckpt)
# Export graph to ProtoBuf
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), [output_bytes.op.name])
tf.train.write_graph(output_graph_def, ".", FLAGS.pb_path, as_text=False)
# With that, we've completed step one! In step two, we will wrap the ProtoBuf in a SavedModel to use the RESTful API.
# Instantiate a SavedModelBuilder
# Note that the serve directory is REQUIRED to have a model version subdirectory
if os.path.exists(FLAGS.serve_path):
shutil.rmtree(FLAGS.serve_path)
builder = tf.saved_model.builder.SavedModelBuilder(FLAGS.serve_path)
# Read in ProtoBuf file
with tf.gfile.GFile(FLAGS.pb_path, "rb") as protobuf_file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(protobuf_file.read())
# Get input and output tensors from GraphDef
# These are our injected bitstring layers
[inp, out] = tf.import_graph_def(graph_def, name="", return_elements=["input_bytes:0", "output_bytes:0"])
# Next, we define our signature definition, which expects the TensorInfo of the input
# and output to the model. When we save the model, we'll get a "No assets" message,
# but that's okay because our graph and variables were already saved in the ProtoBuf.
# Start a TensorFlow session with our saved graph
with tf.Session(graph=out.graph) as sess:
# Signature_definition expects a batch
# So we'll turn the output bitstring into a batch of 1 element
out = tf.expand_dims(out, 0)
# Build prototypes of input and output bitstrings
input_bytes = tf.saved_model.utils.build_tensor_info(inp)
output_bytes = tf.saved_model.utils.build_tensor_info(out)
# Create signature for prediction
signature_definition = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"input_bytes": input_bytes},
outputs={"output_bytes": output_bytes},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# Add meta-information
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_definition
})
# Create the SavedModel
builder.save()
|
StarcoderdataPython
|
48199
|
<gh_stars>0
###########################
#
# #171 Finding numbers for which the sum of the squares of the digits is a square - Project Euler
# https://projecteuler.net/problem=171
#
# Code by <NAME>
#
###########################
|
StarcoderdataPython
|
1683062
|
<reponame>maquinuz/flask-shop
from flask import render_template, redirect, url_for, request, flash
from flaskshop.public.models import MenuItem, Page
from flaskshop.dashboard.models import DashboardMenu, Setting
from flaskshop.product.models import Category, Collection
from flaskshop.checkout.models import ShippingMethod
from flaskshop.plugin.models import PluginRegistry
from flaskshop.account.utils import admin_required, permission_required, Permission
from flaskshop.dashboard.forms import (
DashboardMenuForm,
SiteMenuForm,
SitePageForm,
ShippingMethodForm,
generate_settings_form,
)
def shipping_methods():
page = request.args.get("page", type=int, default=1)
pagination = ShippingMethod.query.paginate(page, 10)
props = {
"id": "ID",
"title": "Title",
"price_human": "Price",
"created_at": "Created At",
}
context = {
"title": "Shipping Method",
"items": pagination.items,
"props": props,
"pagination": pagination,
"identity": "shipping_methods",
}
return render_template("list.html", **context)
def shipping_methods_manage(id=None):
if id:
shipping_method = ShippingMethod.get_by_id(id)
form = ShippingMethodForm(obj=shipping_method)
else:
form = ShippingMethodForm()
if form.validate_on_submit():
if not id:
shipping_method = ShippingMethod()
form.populate_obj(shipping_method)
shipping_method.save()
return redirect(url_for("dashboard.shipping_methods"))
return render_template("site/shipping_method.html", form=form)
def site_menus():
page = request.args.get("page", type=int, default=1)
pagination = MenuItem.query.paginate(page, 10)
props = {
"id": "ID",
"title": "Title",
"order": "Order",
"position": "Position",
"parent_id": "Parent Id",
}
context = {
"title": "Site Menus",
"items": pagination.items,
"props": props,
"pagination": pagination,
"identity": "site_menus",
}
return render_template("list.html", **context)
@admin_required
def site_menus_manage(id=None):
if id:
menu = MenuItem.get_by_id(id)
form = SiteMenuForm(obj=menu)
else:
form = SiteMenuForm()
if form.validate_on_submit():
if not id:
menu = MenuItem()
form.populate_obj(menu)
menu.save()
return redirect(url_for("dashboard.site_menus"))
parents = MenuItem.first_level_items()
categories = Category.query.all()
collections = Collection.query.all()
pages = Page.query.all()
context = {
"form": form,
"parents": parents,
"categories": categories,
"collections": collections,
"pages": pages,
}
return render_template("site/site_menu.html", **context)
def dashboard_menus():
page = request.args.get("page", type=int, default=1)
pagination = DashboardMenu.query.paginate(page, 10)
props = {
"id": "ID",
"title": "Title",
"order": "Order",
"endpoint": "Endpoint",
"icon_cls": "Icon class",
"parent_id": "Parent Id",
}
context = {
"title": "Dashboard Menus",
"items": pagination.items,
"props": props,
"pagination": pagination,
"identity": "dashboard_menus",
}
return render_template("list.html", **context)
@admin_required
def dashboard_menus_manage(id=None):
if id:
menu = DashboardMenu.get_by_id(id)
form = DashboardMenuForm(obj=menu)
else:
form = DashboardMenuForm()
if form.validate_on_submit():
if not id:
menu = DashboardMenu()
form.populate_obj(menu)
menu.save()
return redirect(url_for("dashboard.dashboard_menus"))
parents = DashboardMenu.first_level_items()
return render_template("site/dashboard_menu.html", form=form, parents=parents)
def site_pages():
page = request.args.get("page", type=int, default=1)
pagination = Page.query.paginate(page, 10)
props = {
"id": "ID",
"title": "Title",
"slug": "Slug",
"url": "Url",
"is_visible": "Is Visiable",
}
context = {
"title": "Site Pages",
"items": pagination.items,
"props": props,
"pagination": pagination,
"identity": "site_pages",
}
return render_template("list.html", **context)
def site_pages_manage(id=None):
if id:
page = Page.get_by_id(id)
form = SitePageForm(obj=page)
else:
form = SitePageForm()
if form.validate_on_submit():
if not id:
page = Page()
form.populate_obj(page)
page.save()
return redirect(url_for("dashboard.site_pages"))
return render_template("site/site_page.html", form=form)
def plugin_list():
plugins = PluginRegistry.query.all()
return render_template("site/plugin.html", plugins=plugins)
def plugin_enable(id):
plugin = PluginRegistry.get_by_id(id)
plugin.enabled = True
plugin.save()
flash("The plugin is enabled, Please restart flask-shop now!", "success")
return redirect(url_for("dashboard.plugin_list"))
def plugin_disable(id):
plugin = PluginRegistry.get_by_id(id)
plugin.enabled = False
plugin.save()
flash("The plugin is disabled, Please restart flask-shop now!", "info")
return redirect(url_for("dashboard.plugin_list"))
def site_setting():
settings = Setting.query.all()
form = generate_settings_form(settings)()
old_settings = Setting.get_settings()
if request.method == "GET":
for key, value in old_settings.items():
try:
form[key].data = value
except (KeyError, ValueError):
pass
if form.validate_on_submit():
new_settings = {}
for key, value in old_settings.items():
try:
# check if the value has changed
if value == form[key].data:
continue
else:
new_settings[key] = form[key].data
except KeyError:
pass
Setting.update(settings=new_settings)
flash("Settings saved.", "success")
return render_template("site/settings.html", form=form,)
def config_index():
return render_template("site/index.html")
|
StarcoderdataPython
|
3314827
|
x = int(input("Give a number: "))
if x < 10:
if x < 5:
print("x is less than 5")
else:
print("x is greater than 5 and less than 10")
elif x >= 10 and x < 20: #Can be written as x <= 10 < 20
if x < 15:
print("x is less than 15 and greater or equal than 10")
elif x == 15:
print("x is 15")
else:
print("x is greater than 15 and less than 20")
else:
print("x is greater than 20 or equal")
|
StarcoderdataPython
|
62833
|
<gh_stars>10-100
#!/usr/bin/env python
from distutils.core import setup
setup(name='Pyquake',
version='1.0',
entry_points={
'console_scripts': [
'demo_viewer = pyquake.render:demo_viewer_main',
'ray_tracer = pyquake.ray:raytracer_main',
'pyqclient = pyquake.client:client_main',
'aiopyqclient = pyquake.client:aioclient_main',
'aiodgram = pyquake.aiodgram:main',
'demo_parser = pyquake.proto:demo_parser_main',
'pyq_monitor_demos = pyquake.demo:monitor_demos',
'pyq_extract_lights = pyquake.mapsource:extract_lights_main',
'pyq_pak_extract = pyquake.pak:pak_extract_main',
]
},
description='Python Quake client',
install_requires=['numpy', 'scipy', 'parsley'],
author='<NAME>',
packages=['pyquake'])
|
StarcoderdataPython
|
3285211
|
<gh_stars>0
"""
functions to run FFT sampling
"""
from __future__ import print_function
import sys
import os
import netCDF4
import numpy as np
sys.path.append("../bpmfwfft")
from bpmfwfft.fft_sampling import Sampling
BSITE_FILE = None
def sampling(rec_prmtop, lj_sigma_scal_fact,
rec_inpcrd, grid_nc_file,
lig_prmtop, lig_inpcrd,
lig_coor_nc, nr_lig_conf, start_index,
energy_sample_size_per_ligand,
output_nc):
lig_nc_handle = netCDF4.Dataset(lig_coor_nc, "r")
lig_coord_ensemble = lig_nc_handle.variables["positions"][start_index : start_index + nr_lig_conf]
lig_nc_handle.close()
sampler = Sampling(rec_prmtop, lj_sigma_scal_fact, rec_inpcrd,
BSITE_FILE, grid_nc_file, lig_prmtop, lig_inpcrd,
lig_coord_ensemble,
energy_sample_size_per_ligand,
output_nc,
temperature=300.)
sampler.run_sampling()
print("Sampling Done")
return None
def is_sampling_nc_good(nc_file, nr_extracted_lig_conf):
if not os.path.exists(nc_file):
return False
try:
nc_handle = netCDF4.Dataset(nc_file, "r")
except RuntimeError as e:
print(nc_file)
print(e)
return True
else:
pass
cond1 = nc_handle.variables["lig_positions"][:].shape[0] == nr_extracted_lig_conf
if not cond1:
return False
cond2 = type(nc_handle.variables["lig_positions"][:]) == np.ndarray
if not cond2:
return False
return True
def parse_nr_ligand_confs(submit_file):
if os.path.exists(submit_file):
with open(submit_file, "r") as F:
for line in F:
if "--nr_lig_conf" in line:
nr_confs = line.split("--nr_lig_conf")[-1]
nr_confs = nr_confs.split()[0]
nr_confs = int(nr_confs)
return nr_confs
return None
|
StarcoderdataPython
|
1634864
|
<filename>K-Cap_2021/2A_KB_embeddings/inspect_embeddings.py
"""How well do our embeddings capture our annotation set vocabulary?
"""
import json
from functools import partial
import numpy as np
import pandas as pd
from gensim.models import Word2Vec
def main():
# embeddings subset tokens
with open("embeddings_subset/tokens.json", "r") as f:
tokens_subset: set = set(json.load(f))
# get the larger pool of embeddings
model = Word2Vec.load("embeddings/w2v.model")
tokens_larger: set = set(
model.wv.key_to_index
) # i.e., tokens[0] is the token corresponding to X[0]
# get the annotations
with open("data.csv", "r") as f:
data = pd.read_csv(f)
# find the matched, ...
data = data.apply(partial(compare, vocabulary=tokens_subset, col_prefix='em_subset'), axis=1)
data = data.apply(partial(compare, vocabulary=tokens_larger, col_prefix='em_larger'), axis=1)
# stats
print(f"tokens subset length:{len(tokens_subset)}")
print(f"tokens full length:{len(tokens_larger)}")
with open('inspect.csv', 'w') as f:
data.to_csv(f)
def compare(row, *, vocabulary: set, col_prefix: str) -> tuple:
"""Return row with known and unknown tokens in 'text_analysed'"""
text = row["text_analysed"]
# collect the known, unknown tokens in 'text'
known = set()
unknown = set()
for sent in text.split("<sent>"):
for token in sent.split(" "):
if token != "" and token != " ":
if token in vocabulary:
known.add(token)
else:
unknown.add(token)
# append to row
row[col_prefix + "_known"] = ", ".join(known)
row[col_prefix + "_unknown"] = ", ".join(unknown)
row[col_prefix + "%_known"] = len(known) / (len(known) + len(unknown))
return row
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
188506
|
import logging
from typing import List, Dict, Set, Union, cast, Type
import pandas as pd
from genomics_data_index.storage.SampleSet import SampleSet
from genomics_data_index.storage.model.NucleotideMutationTranslater import NucleotideMutationTranslater
from genomics_data_index.storage.model.QueryFeature import QueryFeature
from genomics_data_index.storage.model.QueryFeatureHGVS import QueryFeatureHGVS
from genomics_data_index.storage.model.QueryFeatureHGVSGN import QueryFeatureHGVSGN
from genomics_data_index.storage.model.QueryFeatureMLST import QueryFeatureMLST
from genomics_data_index.storage.model.QueryFeatureMutation import QueryFeatureMutation
from genomics_data_index.storage.model.QueryFeatureMutationSPDI import QueryFeatureMutationSPDI
from genomics_data_index.storage.model.db import NucleotideVariantsSamples, Reference, ReferenceSequence, MLSTScheme, \
SampleMLSTAlleles, MLSTAllelesSamples, Sample
from genomics_data_index.storage.model.db import SampleNucleotideVariation
from genomics_data_index.storage.service import DatabaseConnection
from genomics_data_index.storage.service import SQLQueryInBatcherDict, SQLQueryInBatcherList
logger = logging.getLogger(__name__)
class FeatureExplodeUnknownError(Exception):
def __init__(self, msg: str):
super().__init__(msg)
class SampleService:
def __init__(self, database_connection: DatabaseConnection, sql_select_limit: int):
self._connection = database_connection
self._sql_select_limit = sql_select_limit
def get_samples_with_variants(self, reference_name: str) -> List[Sample]:
"""
Gets a list of all samples that have variants associated with the given reference genome name.
:reference_name: The reference genome name.
:return: A list of Samples with variants with respect to the reference genome name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()
return samples
def feature_explode_unknown(self, feature: QueryFeature) -> List[QueryFeature]:
if isinstance(feature, QueryFeatureHGVSGN):
features_spdi = self.find_features_spdi_for_hgvsgn(feature)
if len(features_spdi) == 0:
raise FeatureExplodeUnknownError(f'feature={feature} is of type HGVSGN but the corresponding SPDI '
f'feature does not exist in the database. Cannot convert to unknown '
f'SPDI representation.')
else:
unknown_features = []
for feature in features_spdi:
unknown_features.extend(feature.to_unknown_explode())
return unknown_features
elif isinstance(feature, QueryFeatureHGVS):
if feature.is_nucleotide():
variants_hgvs = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_c == feature.id) \
.all()
elif feature.is_protein():
variants_hgvs = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_p == feature.id) \
.all()
else:
raise Exception(f'feature=[{feature}] is neither nucleotide or protein')
if len(variants_hgvs) == 0:
raise FeatureExplodeUnknownError(f'feature={feature} is of type HGVS but the corresponding SPDI '
f'feature does not exist in the database. Cannot convert to unknown '
f'SPDI representation.')
else:
unknown_features = []
for variants_sample_obj in variants_hgvs:
unknown_features.extend(QueryFeatureMutationSPDI(variants_sample_obj.spdi).to_unknown_explode())
return unknown_features
else:
return feature.to_unknown_explode()
def find_features_spdi_for_hgvsgn(self, feature: QueryFeatureHGVSGN) -> List[QueryFeatureMutationSPDI]:
if not isinstance(feature, QueryFeatureHGVSGN):
raise Exception(f'Cannot handle feature={feature}. Not of type {QueryFeatureHGVSGN.__name__}')
query = self._connection.get_session().query(NucleotideVariantsSamples).filter(
NucleotideVariantsSamples.sequence == feature.sequence)
if feature.has_gene():
query = query.filter(NucleotideVariantsSamples.annotation_gene_name == feature.gene)
if feature.is_nucleotide():
query = query.filter(NucleotideVariantsSamples.annotation_hgvs_c == feature.variant)
elif feature.is_protein():
query = query.filter(NucleotideVariantsSamples.annotation_hgvs_p == feature.variant)
else:
raise Exception(f'feature={feature} is neither protein nor nucleotide')
return [QueryFeatureMutationSPDI(s.spdi) for s in query.all()]
def get_samples_with_mlst_alleles(self, scheme_name: str) -> List[Sample]:
"""
Gets a list of all samples that have MLST alleles associated with the given scheme name.
:scheme_name: The scheme name.
:return: A list of Samples with MLST alleles with respect to the scheme name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_mlst_alleles) \
.join(SampleMLSTAlleles.scheme) \
.filter(MLSTScheme.name == scheme_name) \
.all()
return samples
def get_samples_with_variants_on_sequence(self, sequence_name: str) -> List[Sample]:
"""
Gets a list of all samples that have variants associated with the given sequence name.
:sequence_name: The sequence name.
:return: A list of Samples with variants with respect to the sequence name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.join(Reference.sequences) \
.filter(ReferenceSequence.sequence_name == sequence_name) \
.all()
return samples
def get_samples_associated_with_reference(self, reference_name: str) -> List[Sample]:
"""
Gets a list of all samples associated with a reference name.
:reference_name: The reference name.
:return: A list of Samples associated with the reference name or an empty list if no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()
return samples
def get_samples_set_associated_with_reference(self, reference_name: str) -> SampleSet:
"""
Gets a list of all samples associated with a reference name.
:reference_name: The reference name.
:return: A list of Samples associated with the reference name or an empty list if no Samples.
"""
sample_ids = [i for i, in self._connection.get_session().query(Sample.id) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()]
return SampleSet(sample_ids=sample_ids)
def create_dataframe_from_sample_set(self, present_set: SampleSet,
absent_set: SampleSet,
unknown_set: SampleSet,
queries_expression: str) -> pd.DataFrame:
sample_sets_status_list = [(present_set, 'Present'), (absent_set, 'Absent'), (unknown_set, 'Unknown')]
data = []
for sample_status in sample_sets_status_list:
sample_set = sample_status[0]
status = sample_status[1]
if not sample_set.is_empty():
samples = self.find_samples_by_ids(sample_set)
for sample in samples:
data.append([queries_expression, sample.name, sample.id, status])
return pd.DataFrame(data=data, columns=['Query', 'Sample Name', 'Sample ID', 'Status'])
def count_samples_associated_with_reference(self, reference_name: str) -> int:
return self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.count()
def count_samples_associated_with_mlst_scheme(self, scheme_name: str) -> int:
return len(self.get_samples_with_mlst_alleles(scheme_name))
def get_samples(self) -> List[Sample]:
return self._connection.get_session().query(Sample).all()
def count_samples(self) -> int:
return self._connection.get_session().query(Sample).count()
def get_all_sample_ids(self) -> SampleSet:
ids_list = [id for id, in self._connection.get_session().query(Sample.id).all()]
return SampleSet(ids_list)
def get_existing_samples_by_names(self, sample_names: List[str]) -> List[Sample]:
return self._connection.get_session().query(Sample) \
.filter(Sample.name.in_(sample_names)) \
.all()
def which_exists(self, sample_names: List[str]) -> List[str]:
"""
Returns which of the given samples exist in the database.
:param sample_names: The list of sample names.
:return: A list of those passed sample names that exist in the database.
"""
samples = self._connection.get_session().query(Sample) \
.filter(Sample.name.in_(sample_names)) \
.all()
return [sample.name for sample in samples]
def get_sample(self, sample_name: str) -> Sample:
return self._connection.get_session().query(Sample) \
.filter(Sample.name == sample_name) \
.one()
def exists(self, sample_name: str):
return self._connection.get_session().query(Sample) \
.filter(Sample.name == sample_name).count() > 0
def find_samples_by_ids(self, sample_ids: Union[List[int], SampleSet]) -> List[Sample]:
if isinstance(sample_ids, SampleSet):
sample_ids = list(sample_ids)
query_batcher = SQLQueryInBatcherList(in_data=sample_ids, batch_size=self._sql_select_limit)
def handle_batch(sample_ids_batch: List[int]) -> List[Sample]:
return self._connection.get_session().query(Sample) \
.filter(Sample.id.in_(sample_ids_batch)) \
.all()
return query_batcher.process(handle_batch)
def get_variants_samples_by_variation_features(self, features: List[QueryFeatureMutation]) -> Dict[
str, NucleotideVariantsSamples]:
standardized_features_to_input_feature = {}
standardized_features_ids = set()
standardized_feature_hgvs_c_ids = set()
standardized_feature_hgvs_p_ids = set()
for feature in features:
if isinstance(feature, QueryFeatureMutationSPDI):
dbf = NucleotideMutationTranslater.to_db_feature(feature)
if dbf.id in standardized_features_to_input_feature:
standardized_features_to_input_feature[dbf.id].append(feature.id)
else:
standardized_features_to_input_feature[dbf.id] = [feature.id]
standardized_features_ids.add(dbf.id)
elif isinstance(feature, QueryFeatureHGVSGN):
logger.warning(f'feature=[{feature}] is a QueryFeatureHGVSGN and I do not handle it here.')
elif isinstance(feature, QueryFeatureHGVS):
if feature.is_nucleotide():
standardized_feature_hgvs_c_ids.add(feature.id)
elif feature.is_protein():
standardized_feature_hgvs_p_ids.add(feature.id)
else:
raise Exception(f'feature=[{feature}] is neither nucleotide or protein')
else:
raise Exception(f'Invalid type for feature=[{feature}]. '
f'Must be either {QueryFeatureMutationSPDI.__class__.__name__} or '
f'{QueryFeatureHGVS.__class__.__name__}')
if len(standardized_features_ids) > 0:
variants_spdi = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._spdi.in_(standardized_features_ids)) \
.all()
else:
variants_spdi = []
if len(standardized_feature_hgvs_c_ids) > 0:
variants_hgvs_c = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_c.in_(standardized_feature_hgvs_c_ids)) \
.all()
else:
variants_hgvs_c = []
if len(standardized_feature_hgvs_p_ids) > 0:
variants_hgvs_p = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_p.in_(standardized_feature_hgvs_p_ids)) \
.all()
else:
variants_hgvs_p = []
# Map back unstandardized IDs to the actual variant object
# Use this because some features can have multiple identifiers for the same feature
# (e.g., ref:10:A:T and ref:10:1:T). I want to make sure I map each passed id to the
# same object (that is, in this example, I want to return a dictionary with two keys, one for each ID)
unstandardized_variants = {}
for v in variants_spdi:
for vid in standardized_features_to_input_feature[v.spdi]:
unstandardized_variants[vid] = v
unstandardized_variants.update({v.id_hgvs_c: v for v in variants_hgvs_c})
unstandardized_variants.update({v.id_hgvs_p: v for v in variants_hgvs_p})
return unstandardized_variants
def _get_mlst_samples_by_mlst_features(self, features: List[QueryFeatureMLST]) -> List[MLSTAllelesSamples]:
feature_ids = list({f.id_no_prefix for f in features})
mlst_alleles = self._connection.get_session().query(MLSTAllelesSamples) \
.filter(MLSTAllelesSamples._sla.in_(feature_ids)) \
.all()
return mlst_alleles
def _get_feature_type(self, features: List[QueryFeature]) -> Type[QueryFeature]:
feature_types = {f.__class__ for f in features}
if len(feature_types) != 1:
raise Exception(f'Should only be one feature type but instead got: {feature_types}.')
else:
return feature_types.pop()
def find_unknown_sample_sets_by_features(self, features: List[QueryFeature]) -> Dict[str, SampleSet]:
unknown_to_features_dict = {}
unknown_features = []
for feature in features:
try:
unknown_features_exploded = self.feature_explode_unknown(feature)
unknown_features.extend(unknown_features_exploded)
for unknown_feature in unknown_features_exploded:
unknown_to_features_dict[unknown_feature.id] = feature
except FeatureExplodeUnknownError as e:
logger.warning(
f'Could not map feature={feature} to a set of unknown features. Will assume no unknowns exist.')
if len(unknown_features) > 0:
unknown_features_sets = self.find_sample_sets_by_features(unknown_features)
else:
unknown_features_sets = set()
features_to_unknown_sample_sets = {}
for uid in unknown_features_sets:
fid = unknown_to_features_dict[uid].id
sample_set = unknown_features_sets[uid]
# If we've already set this sample set with the same feature,
# We need to merge together the unknown sample sets
# This can occur if, e.g., we have a large deletion and are iterating over each
# Base in the deletion in turn (e.g., ref:10:ATT:A -> gets converted to
# ['ref:10:A:?', 'ref:11:T:?', 'ref:12:T:?'], we need to merge unknown sample results
# for each of these features in turn.
if fid in features_to_unknown_sample_sets:
previous_sample_set = features_to_unknown_sample_sets[fid]
features_to_unknown_sample_sets[fid] = previous_sample_set.union(sample_set)
else:
features_to_unknown_sample_sets[fid] = sample_set
return features_to_unknown_sample_sets
def find_sample_sets_by_features(self, features: List[QueryFeature]) -> Dict[str, SampleSet]:
feature_type = self._get_feature_type(features)
if issubclass(feature_type, QueryFeatureHGVSGN):
# In this case where I'm querying by gene name, first convert to SPDI features before lookup
# TODO: it's not the most efficient to do this as a loop, but it's easier to implement right now
hgvs_gn_id_to_sampleset = dict()
for feature in features:
feature = cast(QueryFeatureHGVSGN, feature)
features_spdi = self.find_features_spdi_for_hgvsgn(feature)
variants_dict = self.get_variants_samples_by_variation_features(features_spdi)
variants_nuc_variants_samples = list(variants_dict.values())
if len(variants_nuc_variants_samples) == 0:
samples_union = SampleSet.create_empty()
else:
first_nuc_variant_samples = variants_nuc_variants_samples.pop()
samples_union = first_nuc_variant_samples.sample_ids
# Handle remaining, if any
for nuc_variant_samples in variants_nuc_variants_samples:
samples_union = samples_union.union(nuc_variant_samples.sample_ids)
hgvs_gn_id_to_sampleset[feature.id] = samples_union
return hgvs_gn_id_to_sampleset
elif issubclass(feature_type, QueryFeatureMutation):
features = cast(List[QueryFeatureMutation], features)
variants_dict = self.get_variants_samples_by_variation_features(features)
return {id: variants_dict[id].sample_ids for id in variants_dict}
elif issubclass(feature_type, QueryFeatureMLST):
features = cast(List[QueryFeatureMLST], features)
mlst_alleles = self._get_mlst_samples_by_mlst_features(features)
return {a.query_id: a.sample_ids for a in mlst_alleles}
else:
raise Exception(f'Invalid feature type {feature_type}')
def find_samples_by_features(self, features: List[QueryFeature]) -> Dict[str, List[Sample]]:
feature_type = self._get_feature_type(features)
if issubclass(feature_type, QueryFeatureMutation):
features = cast(List[QueryFeatureMutation], features)
variants_dict = self.get_variants_samples_by_variation_features(features)
return {id: self.find_samples_by_ids(variants_dict[id].sample_ids) for id in variants_dict}
elif issubclass(feature_type, QueryFeatureMLST):
features = cast(List[QueryFeatureMLST], features)
mlst_alleles = self._get_mlst_samples_by_mlst_features(features)
return {a.query_id: self.find_samples_by_ids(a.sample_ids) for a in mlst_alleles}
else:
raise Exception(f'Invalid feature type {feature_type}')
def count_samples_by_features(self, features: List[QueryFeature]) -> Dict[str, List[Sample]]:
feature_type = self._get_feature_type(features)
if issubclass(feature_type, QueryFeatureMutation):
features = cast(List[QueryFeatureMutation], features)
variants_dict = self.get_variants_samples_by_variation_features(features)
return {id: len(variants_dict[id].sample_ids) for id in variants_dict}
elif issubclass(feature_type, QueryFeatureMLST):
features = cast(List[QueryFeatureMLST], features)
mlst_alleles = self._get_mlst_samples_by_mlst_features(features)
allele_id_to_count = {a.query_id: len(a.sample_ids) for a in mlst_alleles}
for f in features:
if f.id not in allele_id_to_count:
allele_id_to_count[f.id] = 0
return allele_id_to_count
else:
raise Exception(f'Invalid feature type {feature_type}')
def find_sample_name_ids(self, sample_names: Set[str]) -> Dict[str, int]:
"""
Given a list of sample names, returns a dictionary mapping the sample names to sample IDs.
:param sample_names: The sample names to search.
:return: A dictionary linking the sample names to IDs.
"""
query_batcher = SQLQueryInBatcherDict(in_data=list(sample_names), batch_size=self._sql_select_limit)
def handle_batch(sample_names_batch: List[str]) -> Dict[str, int]:
sample_tuples = self._connection.get_session().query(Sample.name, Sample.id) \
.filter(Sample.name.in_(sample_names_batch)) \
.all()
return dict(sample_tuples)
return query_batcher.process(handle_batch)
def get_sample_set_by_names(self, sample_names: Union[List[str], Set[str]],
ignore_not_found: bool = False) -> SampleSet:
"""
Given a collection of sample names, get a SampleSet of the corresponding IDs.
:param sample_names: The names to convert to an ID set.
:param ignore_not_found: Whether or not to ignore sample names that were not found.
:return: A SampleSet with all the corresponding samples by the passed names. If ignore_not_found is false,
raises an exception if some sample names have no ids.
"""
if isinstance(sample_names, list):
sample_names = set(sample_names)
elif not isinstance(sample_names, set):
raise Exception(f'Invalid type=[{type(sample_names)}] for passed sample_names. Must be list or set.')
sample_ids_tuples = self._connection.get_session().query(Sample.id) \
.filter(Sample.name.in_(sample_names)) \
.all()
sample_ids = {i for i, in sample_ids_tuples}
sample_set = SampleSet(sample_ids=sample_ids)
if ignore_not_found or len(sample_names) == len(sample_set):
return sample_set
else:
# Find matching sample names to ids we did find for a nicer error message
found_sample_names = {s.name for s in self.find_samples_by_ids(sample_set)}
names_not_found = sample_names - found_sample_names
if len(names_not_found) > 10:
small_not_found = list(names_not_found)[:10]
msg = f'[{", ".join(small_not_found)}, ...]'
else:
msg = f'{names_not_found}'
raise Exception(f'Did not find an equal number of sample names and ids. '
f'Number sample_names={len(sample_names)}. Number returned sample_ids={len(sample_ids)}. '
f'Sample names with missing ids {msg}')
|
StarcoderdataPython
|
1790923
|
<filename>bindings/python/setup.py
try:
import distribute_setup
distribute_setup.use_setuptools()
except:
pass
from setuptools import setup, find_packages
import os
import re
with open(os.path.join(os.path.dirname(__file__), 'opentick',
'__init__.py')) as f:
version = re.search("__version__ = '([^']+)'", f.read()).group(1)
with open('requirements.txt', 'r') as f:
requires = [x.strip() for x in f if x.strip()]
with open('README.rst', 'r') as f:
readme = f.read()
setup(
name='opentick',
version=version,
author='<NAME>',
description='OpenTick SDK',
author_email='<EMAIL>',
long_description=readme,
url='https://github.com/opentradesolutions/opentick',
license='Apache License',
packages=find_packages(exclude=['tests']),
install_requires=requires,
classifiers=(
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Database :: Database Engines/Servers',
'Intended Audience :: Information Technology',
'Intended Audience :: Financial and Insurance Industry'
),
)
|
StarcoderdataPython
|
3301305
|
<reponame>scp2/pythonpep8typing
class filanormal:
codigo
fila = []
clintesatendidos = []
senhaatual
def gerasenhaatual(self)->None:
self.senhaatual = f'NM{self.codigo}'
def resetafila(self)->None:
if self.codigo >= 100:
self.codigo = 0
else:
self.codigo += 1
def atualizafila(self)->None:
self.resetafila()
self.gerasenhaatual()
self.fila.append(self.senhaatual)
def chamacliente(self, caixa)->str:
clienteatual = self.fila.pop(0)
self.clintesatendidos.append(clienteatual)
return f'Cliente atual: {clienteatual}, dirija-se ao caixa {caixa}'
|
StarcoderdataPython
|
131301
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..qt1 import FitQt1
def test_FitQt1_inputs():
input_map = dict(
acceptance=dict(argstr="-acceptance %f",),
args=dict(argstr="%s",),
b1map=dict(argstr="-b1map %s", extensions=None,),
comp_file=dict(
argstr="-comp %s",
extensions=None,
name_source=["source_file"],
name_template="%s_comp.nii.gz",
),
environ=dict(nohash=True, usedefault=True,),
error_file=dict(
argstr="-error %s",
extensions=None,
name_source=["source_file"],
name_template="%s_error.nii.gz",
),
flips=dict(argstr="-flips %s", sep=" ",),
flips_list=dict(argstr="-fliplist %s", extensions=None,),
gn_flag=dict(argstr="-gn", position=8,),
ir_flag=dict(argstr="-IR", position=13,),
lm_val=dict(argstr="-lm %f %f", position=7,),
m0map_file=dict(
argstr="-m0map %s",
extensions=None,
name_source=["source_file"],
name_template="%s_m0map.nii.gz",
),
mask=dict(argstr="-mask %s", extensions=None, position=2,),
maxit=dict(argstr="-maxit %d", position=11,),
mcmap_file=dict(
argstr="-mcmap %s",
extensions=None,
name_source=["source_file"],
name_template="%s_mcmap.nii.gz",
),
mcmaxit=dict(argstr="-mcmaxit %d",),
mcout=dict(argstr="-mcout %s", extensions=None,),
mcsamples=dict(argstr="-mcsamples %d",),
nb_comp=dict(argstr="-nc %d", position=6,),
prior=dict(argstr="-prior %s", extensions=None, position=3,),
res_file=dict(
argstr="-res %s",
extensions=None,
name_source=["source_file"],
name_template="%s_res.nii.gz",
),
slice_no=dict(argstr="-slice %d", position=9,),
source_file=dict(
argstr="-source %s", extensions=None, mandatory=True, position=1,
),
spgr=dict(argstr="-SPGR",),
sr_flag=dict(argstr="-SR", position=12,),
syn_file=dict(
argstr="-syn %s",
extensions=None,
name_source=["source_file"],
name_template="%s_syn.nii.gz",
),
t1_list=dict(argstr="-T1list %s", extensions=None,),
t1map_file=dict(
argstr="-t1map %s",
extensions=None,
name_source=["source_file"],
name_template="%s_t1map.nii.gz",
),
t1max=dict(argstr="-T1max %f",),
t1min=dict(argstr="-T1min %f",),
te_value=dict(argstr="-TE %f", position=4,),
tis=dict(argstr="-TIs %s", position=14, sep=" ",),
tis_list=dict(argstr="-TIlist %s", extensions=None,),
tr_value=dict(argstr="-TR %f", position=5,),
voxel=dict(argstr="-voxel %d %d %d", position=10,),
)
inputs = FitQt1.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_FitQt1_outputs():
output_map = dict(
comp_file=dict(extensions=None,),
error_file=dict(extensions=None,),
m0map_file=dict(extensions=None,),
mcmap_file=dict(extensions=None,),
res_file=dict(extensions=None,),
syn_file=dict(extensions=None,),
t1map_file=dict(extensions=None,),
)
outputs = FitQt1.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
StarcoderdataPython
|
1617889
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Evento(models.Model):
titulo = models.CharField(max_length=100, verbose_name='Título')
descricao = models.TextField(blank=True,null=True, verbose_name='Descrição')
data_evento = models.DateTimeField(verbose_name='Data do Evento')
data_criacao = models.DateTimeField(auto_now=True, verbose_name='Data de criação do Evento')
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
local = models.CharField(choices=[
('Sampa','São Paulo'),
('Rio', 'Rio de Janeiro'),
('Santos', 'Santos')],
null=True, blank=True, max_length=50)
def get_data_evento(self):
return self.data_evento.strftime('%d/%m/%Y às %H:%M')
class Meta:
db_table = 'Evento'
def __str__(self):
return self.titulo
def get_data_input_evento(self):
return self.data_evento.strftime('%Y-%m-%dT%H:%M')
|
StarcoderdataPython
|
3223498
|
<gh_stars>0
# -*- coding: utf-8 -*-
import json
from typing import Type
from zvt.contract.api import del_data, get_db_session
from zvt.contract.schema import Mixin
class StatefulService(object):
state_schema: Type[Mixin] = None
name = None
def __init__(self) -> None:
assert self.state_schema is not None
self.state_session = get_db_session(data_schema=self.state_schema, provider='zvt')
self.state_domain = self.state_schema.get_one(id=self.get_state_entity_id())
if self.state_domain:
self.state: dict = self.decode_state(self.state_domain.state)
else:
self.state = None
def get_state_entity_id(self):
if self.name is not None:
return self.name
return type(self).__name__.lower()
def clear_state_data(self):
del_data(self.state_schema, filters=[self.state_schema.entity_id == self.get_state_entity_id()])
def decode_state(self, state: str):
# 反序列化
return json.loads(state, object_hook=self.state_object_hook())
def encode_state(self, state: object):
# 序列化
return json.dumps(state, cls=self.state_encoder())
def state_object_hook(self):
return None
def state_encoder(self):
return None
def persist_state(self):
state_str = self.encode_state(self.state)
if not self.state_domain:
self.state_domain = self.state_schema(id=self.get_state_entity_id(), entity_id=self.get_state_entity_id())
self.state_domain.state = state_str
self.state_session.add(self.state_domain)
self.state_session.commit()
# the __all__ is generated
__all__ = ['StatefulService']
|
StarcoderdataPython
|
1709438
|
import re
import os
import shutil
import tempfile
try:
from tempfile import TemporaryDirectory
except ImportError:
class TemporaryDirectory(object):
def __init__(self):
self.name = tempfile.mkdtemp()
def __del__(self):
self.cleanup()
def __enter__(self):
return self.name
def __exit__(self,A,B,C):
self.cleanup()
def cleanup(self):
if self.name is not None:
shutil.rmtree(self.name, ignore_errors=True)
self.name = None
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from distutils.errors import DistutilsExecError, CompileError
from distutils import log
__all__ = (
'ProbeToolchain',
)
class ProbeToolchain(object):
"""Inspection of compiler
:param bool verbose: If True, enable additional prints
:param str compiler: If not None, select non-default compiler toolchain
:param list headers: List of headers to include during all test compilations
:param list define_macros: List of (macro, value) tuples to define during all test compilations
"""
def __init__(self, verbose=False,
compiler=None,
headers=[], define_macros=[]):
self.verbose = verbose
self.headers = list(headers)
self.define_macros = list(define_macros)
self.compiler = new_compiler(compiler=compiler,
verbose=self.verbose,
dry_run=False,
force=True)
customize_compiler(self.compiler)
# TODO: quiet compile errors?
# clang '-flto' produces LLVM bytecode instead of ELF object files.
# LLVM has a funny encoding for string constants which is hard to
# parse for sizeof() detecton. So we omit '-flto' for test compiles
for name in ('compiler', 'compiler_so', 'compiler_cxx') + ('compile_options', 'compile_options_debug'):
ccmd = getattr(self.compiler, name, None)
if ccmd is not None:
ccmd = [arg for arg in ccmd if arg!='-flto']
setattr(self.compiler, name, ccmd)
self._tdir = TemporaryDirectory()
self.tempdir = self._tdir.name
def compile(self, src, language='c', define_macros=[], **kws):
"""Compile provided source code and return path to resulting object file
:returns: Path string to object file in temporary location.
:param str src: Source code string
:param str language: Source code language: 'c' or 'c++'
:param list define_macros: Extra macro definitions.
:param list include_dirs: Extra directories to search for headers
:param list extra_compile_args: Extra arguments to pass to the compiler
"""
define_macros = self.define_macros + list(define_macros)
for ext, lang in self.compiler.language_map.items():
if lang==language:
srcname = os.path.join(self.tempdir, 'try_compile' + ext)
break
else:
raise ValueError('unknown language '+language)
log.debug('/* test compile */\n'+src)
with open(srcname, 'w') as F:
F.write(src)
objs = self.compiler.compile([srcname],
output_dir=self.tempdir,
macros=define_macros,
**kws
)
assert len(objs)==1, (srcname, objs)
return objs[0]
def try_compile(self, src, **kws):
"""Return True if provided source code compiles
:param str src: Source code string
:param str language: Source code language: 'c' or 'c++'
:param list define_macros: Extra macro definitions.
:param list include_dirs: Extra directories to search for headers
:param list extra_compile_args: Extra arguments to pass to the compiler
"""
try:
self.compile(src, **kws)
return True
except (DistutilsExecError, CompileError) as e:
return False
def check_includes(self, headers, **kws):
"""Return true if all of the headers may be included (in order)
:param list headers: List of header file names
:param str language: Source code language: 'c' or 'c++'
:param list define_macros: Extra macro definitions.
:param list include_dirs: Extra directories to search for headers
:param list extra_compile_args: Extra arguments to pass to the compiler
"""
src = ['#include <%s>'%h for h in self.headers+list(headers)]
ret = self.try_compile('\n'.join(src), **kws)
log.info('Probe includes %s -> %s', headers, 'Present' if ret else 'Absent')
return ret
def check_include(self, header, **kws):
"""Return true if the header may be included
:param str header: Header file name
:param str language: Source code language: 'c' or 'c++'
:param list define_macros: Extra macro definitions.
:param list include_dirs: Extra directories to search for headers
:param list extra_compile_args: Extra arguments to pass to the compiler
"""
return self.check_includes([header], **kws)
def sizeof(self, typename, headers=(), **kws):
"""Return size in bytes of provided typename
:param str typename: Header file name
:param list headers: List of headers to include during all test compilations
:param str language: Source code language: 'c' or 'c++'
:param list define_macros: Extra macro definitions.
:param list include_dirs: Extra directories to search for headers
:param list extra_compile_args: Extra arguments to pass to the compiler
"""
# borrow a trick from CMake. see Modules/CheckTypeSize.c.in
src = ['#include <%s>'%h for h in self.headers+list(headers)]
src += [
'#define PROBESIZE (sizeof(%s))'%typename,
"char probe_info[] = {'P','R','O','B','E','I','N','F','O','[',"
" ('0'+((PROBESIZE/10000)%10)),"
" ('0'+((PROBESIZE/1000)%10)),"
" ('0'+((PROBESIZE/100)%10)),"
" ('0'+((PROBESIZE/10)%10)),"
" ('0'+((PROBESIZE/1)%10)),"
"']'};",
""
]
obj = self.compile('\n'.join(src), **kws)
with open(obj, 'rb') as F:
raw = F.read()
if raw.find(b'\x01\x01\x01P\x01\x01\x01R\x01\x01\x01O\x01\x01\x01B\x01\x01\x01E\x01\x01\x01I\x01\x01\x01N\x01\x01\x01F\x01\x01\x01O')!=-1:
# MSVC
raw = raw.replace(b'\x01\x01\x01', b'')
M = re.match(b'.*PROBEINFO\\[(\\d+)\\].*', raw, re.DOTALL)
if M is None:
print(repr(raw))
raise RuntimeError('Unable to find PROBEINFO for %s'%typename)
size = int(M.group(1))
log.info('Probe sizeof(%s) = %d', typename, size)
return size
def check_symbol(self, symname, headers=(), **kws):
"""Return True if symbol name (macro, variable, or function) is defined/delcared
:param str symname: Symbol name
:param list headers: List of headers to include during all test compilations
:param str language: Source code language: 'c' or 'c++'
:param list define_macros: Extra macro definitions.
:param list include_dirs: Extra directories to search for headers
:param list extra_compile_args: Extra arguments to pass to the compiler
"""
src = ['#include <%s>'%h for h in self.headers+list(headers)]
src += [
'void* probe_symbol(void) {',
'#if defined(%s)'%symname,
' return 0;',
'#else',
' return (void*)&%s;'%symname,
'#endif',
'}',
''
]
ret = self.try_compile('\n'.join(src), **kws)
log.info('Probe Symbol %s -> %s', symname, 'Present' if ret else 'Absent')
return ret
def check_member(self, struct, member, headers=(), **kws):
"""Return True if the given structure has the named member
:param str struct: Structure name
:param str member: Member name
:param list headers: List of headers to include during all test compilations
:param str language: Source code language: 'c' or 'c++'
:param list define_macros: Extra macro definitions.
:param list include_dirs: Extra directories to search for headers
:param list extra_compile_args: Extra arguments to pass to the compiler
"""
src = ['#include <%s>'%h for h in self.headers+list(headers)]
src += [
'int probe_member(void) {',
' return (int)sizeof( ((%s *)0)->%s); '%(struct, member),
'}',
''
]
ret = self.try_compile('\n'.join(src), **kws)
log.info('Probe Member %s::%s -> %s', struct, member, 'Present' if ret else 'Absent')
return ret
|
StarcoderdataPython
|
3347582
|
<gh_stars>1-10
# Copyright 2020-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
def main():
settings = get_settings_from_env()
server = server_factory(**settings)
server.serve_forever()
def get_settings_from_env(controller_port=None,
visualization_server_image=None, frontend_image=None,
visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None,
minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None):
"""
Returns a dict of settings from environment variables relevant to the controller
Environment settings can be overridden by passing them here as arguments.
Settings are pulled from the all-caps version of the setting name. The
following defaults are used if those environment variables are not set
to enable backwards compatibility with previous versions of this script:
visualization_server_image: gcr.io/ml-pipeline/visualization-server
visualization_server_tag: value of KFP_VERSION environment variable
frontend_image: gcr.io/ml-pipeline/frontend
frontend_tag: value of KFP_VERSION environment variable
disable_istio_sidecar: Required (no default)
minio_access_key: Required (no default)
minio_secret_key: Required (no default)
"""
settings = dict()
settings["controller_port"] = \
controller_port or \
os.environ.get("CONTROLLER_PORT", "8080")
settings["visualization_server_image"] = \
visualization_server_image or \
os.environ.get("VISUALIZATION_SERVER_IMAGE", "registry.cn-zhangjiakou.aliyuncs.com/kubeflow-zhentian/visualization-server")
settings["frontend_image"] = \
frontend_image or \
os.environ.get("FRONTEND_IMAGE", "registry.cn-zhangjiakou.aliyuncs.com/kubeflow-zhentian/frontend")
# Look for specific tags for each image first, falling back to
# previously used KFP_VERSION environment variable for backwards
# compatibility
settings["visualization_server_tag"] = \
visualization_server_tag or \
os.environ.get("VISUALIZATION_SERVER_TAG") or \
os.environ["KFP_VERSION"]
settings["frontend_tag"] = \
frontend_tag or \
os.environ.get("FRONTEND_TAG") or \
os.environ["KFP_VERSION"]
settings["disable_istio_sidecar"] = \
disable_istio_sidecar if disable_istio_sidecar is not None \
else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
settings["minio_access_key"] = \
minio_access_key or \
base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
settings["minio_secret_key"] = \
minio_secret_key or \
base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
# KFP_DEFAULT_PIPELINE_ROOT is optional
settings["kfp_default_pipeline_root"] = \
kfp_default_pipeline_root or \
os.environ.get("KFP_DEFAULT_PIPELINE_ROOT")
return settings
def server_factory(visualization_server_image,
visualization_server_tag, frontend_image, frontend_tag,
disable_istio_sidecar, minio_access_key,
minio_secret_key, kfp_default_pipeline_root=None,
url="", controller_port=8080):
"""
Returns an HTTPServer populated with Handler with customized settings
"""
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("pipelines.kubeflow.org/enabled")
if pipeline_enabled != "true":
return {"status": {}, "children": []}
desired_configmap_count = 1
desired_resources = []
if kfp_default_pipeline_root:
desired_configmap_count = 2
desired_resources += [{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "kfp-launcher",
"namespace": namespace,
},
"data": {
"defaultPipelineRoot": kfp_default_pipeline_root,
},
}]
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready":
len(children["Secret.v1"]) == 1 and
len(children["ConfigMap.v1"]) == desired_configmap_count and
len(children["Deployment.apps/v1"]) == 2 and
len(children["Service.v1"]) == 2 and
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and
len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and
"True" or "False"
}
# Generate the desired child object(s).
desired_resources += [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image": f"{visualization_server_image}:{visualization_server_tag}",
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
"resources": {
"requests": {
"cpu": "50m",
"memory": "200Mi"
},
"limits": {
"cpu": "500m",
"memory": "1Gi"
},
}
}],
"serviceAccountName":
"default-editor",
},
},
},
},
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "DestinationRule",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"host": "ml-pipeline-visualizationserver",
"trafficPolicy": {
"tls": {
"mode": "ISTIO_MUTUAL"
}
}
}
},
{
"apiVersion": "security.istio.io/v1beta1",
"kind": "AuthorizationPolicy",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
}
},
"rules": [{
"from": [{
"source": {
"principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"]
}
}]
}]
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"ports": [{
"name": "http",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}],
"selector": {
"app": "ml-pipeline-visualizationserver",
},
},
},
# Artifact fetcher related resources below.
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-ui-artifact"
}
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"name":
"ml-pipeline-ui-artifact",
"image": f"{frontend_image}:{frontend_tag}",
"imagePullPolicy":
"IfNotPresent",
"ports": [{
"containerPort": 3000
}],
"resources": {
"requests": {
"cpu": "10m",
"memory": "70Mi"
},
"limits": {
"cpu": "100m",
"memory": "500Mi"
},
}
}],
"serviceAccountName":
"default-editor"
}
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
"labels": {
"app": "ml-pipeline-ui-artifact"
}
},
"spec": {
"ports": [{
"name":
"http", # name is required to let istio understand request protocol
"port": 80,
"protocol": "TCP",
"targetPort": 3000
}],
"selector": {
"app": "ml-pipeline-ui-artifact"
}
}
},
]
print('Received request:\n', json.dumps(parent, indent=2, sort_keys=True))
print('Desired resources except secrets:\n', json.dumps(desired_resources, indent=2, sort_keys=True))
# Moved after the print argument because this is sensitive data.
desired_resources.append({
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mlpipeline-minio-artifact",
"namespace": namespace,
},
"data": {
"accesskey": minio_access_key,
"secretkey": minio_secret_key,
},
})
return {"status": desired_status, "children": desired_resources}
def do_POST(self):
# Serve the sync() function as a JSON webhook.
observed = json.loads(
self.rfile.read(int(self.headers.get("content-length"))))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
return HTTPServer((url, int(controller_port)), Controller)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
198091
|
<reponame>kira78/meson
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as T
from .. import coredata
from ..mesonlib import MachineChoice, OptionKey
from .compilers import Compiler
from .mixins.clike import CLikeCompiler
from .mixins.gnu import GnuCompiler
from .mixins.clang import ClangCompiler
if T.TYPE_CHECKING:
from ..programs import ExternalProgram
from ..envconfig import MachineInfo
from ..environment import Environment
from ..linkers import DynamicLinker
class ObjCCompiler(CLikeCompiler, Compiler):
language = 'objc'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice,
is_cross: bool, info: 'MachineInfo',
exe_wrap: T.Optional['ExternalProgram'],
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
Compiler.__init__(self, exelist, version, for_machine, info,
is_cross=is_cross, full_version=full_version,
linker=linker)
CLikeCompiler.__init__(self, exe_wrap)
@staticmethod
def get_display_language() -> str:
return 'Objective-C'
def sanity_check(self, work_dir: str, environment: 'Environment') -> None:
code = '#import<stddef.h>\nint main(void) { return 0; }\n'
return self._sanity_check_impl(work_dir, environment, 'sanitycheckobjc.m', code)
class GnuObjCCompiler(GnuCompiler, ObjCCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice,
is_cross: bool, info: 'MachineInfo',
exe_wrapper: T.Optional['ExternalProgram'] = None,
defines: T.Optional[T.Dict[str, str]] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
ObjCCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
GnuCompiler.__init__(self, defines)
default_warn_args = ['-Wall', '-Winvalid-pch']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
class ClangObjCCompiler(ClangCompiler, ObjCCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice,
is_cross: bool, info: 'MachineInfo',
exe_wrapper: T.Optional['ExternalProgram'] = None,
defines: T.Optional[T.Dict[str, str]] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
ObjCCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
ClangCompiler.__init__(self, defines)
default_warn_args = ['-Wall', '-Winvalid-pch']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self) -> 'coredata.KeyedOptionDictType':
opts = super().get_options()
opts.update({
OptionKey('std', machine=self.for_machine, lang='c'): coredata.UserComboOption(
'C language standard to use',
['none', 'c89', 'c99', 'c11', 'c17', 'gnu99', 'gnu11'],
'none',
)
})
return opts
def get_option_compile_args(self, options: 'coredata.KeyedOptionDictType') -> T.List[str]:
args = []
std = options[OptionKey('std', machine=self.for_machine, lang='c')]
if std.value != 'none':
args.append('-std=' + std.value)
return args
class AppleClangObjCCompiler(ClangObjCCompiler):
"""Handle the differences between Apple's clang and vanilla clang."""
|
StarcoderdataPython
|
6085
|
#
# PySNMP MIB module MWORKS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MWORKS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:16:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, Unsigned32, ObjectIdentity, IpAddress, Bits, MibIdentifier, Integer32, enterprises, ModuleIdentity, TimeTicks, Counter32, NotificationType, iso, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Unsigned32", "ObjectIdentity", "IpAddress", "Bits", "MibIdentifier", "Integer32", "enterprises", "ModuleIdentity", "TimeTicks", "Counter32", "NotificationType", "iso", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tecElite = MibIdentifier((1, 3, 6, 1, 4, 1, 217))
meterWorks = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16))
mw501 = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1))
mwMem = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 1))
mwHeap = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 2))
mwMemCeiling = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwMemCeiling.setStatus('mandatory')
if mibBuilder.loadTexts: mwMemCeiling.setDescription('bytes of memory the agent memory manager will allow the agent to use.')
mwMemUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwMemUsed.setStatus('mandatory')
if mibBuilder.loadTexts: mwMemUsed.setDescription("bytes of memory that meterworks has malloc'ed. some of this may be in free pools.")
mwHeapTotal = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwHeapTotal.setStatus('mandatory')
if mibBuilder.loadTexts: mwHeapTotal.setDescription('bytes of memory given to the heap manager.')
mwHeapUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwHeapUsed.setStatus('mandatory')
if mibBuilder.loadTexts: mwHeapUsed.setDescription('bytes of available memory in the heap.')
mibBuilder.exportSymbols("MWORKS-MIB", mwHeap=mwHeap, mwHeapUsed=mwHeapUsed, mwMemCeiling=mwMemCeiling, meterWorks=meterWorks, tecElite=tecElite, mwMem=mwMem, mw501=mw501, mwHeapTotal=mwHeapTotal, mwMemUsed=mwMemUsed)
|
StarcoderdataPython
|
3281270
|
<gh_stars>0
"""
Create a JAR that can be used to deploy a topology to Storm.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
import sys
from fabric.api import hide, local, settings
from ..util import prepare_topology
from .common import add_simple_jar
def jar_for_deploy(simple_jar=False):
""" Build a jar to use for deploying the topology. """
# Create _resources folder which will contain Python code in JAR
prepare_topology()
# Use Leiningen to clean up and build JAR
jar_type = "JAR" if simple_jar else "Uber-JAR"
print("Cleaning from prior builds...")
sys.stdout.flush()
with hide('running', 'stdout'):
res = local("lein clean")
if not res.succeeded:
raise RuntimeError("Unable to run 'lein clean'!\nSTDOUT:\n{}"
"\nSTDERR:\n{}".format(res.stdout, res.stderr))
print("Creating topology {}...".format(jar_type))
sys.stdout.flush()
cmd = "lein jar" if simple_jar else "lein uberjar"
with hide('running'), settings(warn_only=True):
res = local(cmd, capture=True)
if not res.succeeded:
raise RuntimeError("Unable to run '{}'!\nSTDOUT:\n{}"
"\nSTDERR:\n{}".format(cmd, res.stdout,
res.stderr))
# XXX: This will fail if more than one JAR is built
lines = res.stdout.splitlines()
for line in lines:
line = line.strip()
if not line.startswith("Created"):
continue
line = line.replace("Created ", "")
# != is XOR
if simple_jar != line.endswith("standalone.jar"):
jar = line
break
else:
raise RuntimeError("Failed to find JAR in '{}' output\STDOUT:\n{}"
"STDERR:\n{}".format(cmd, res.stdout, res.stderr))
print("{} created: {}".format(jar_type, jar))
sys.stdout.flush()
print('Removing _resources temporary directory...', end='')
sys.stdout.flush()
resources_dir = os.path.join("_resources", "resources")
if os.path.isdir(resources_dir):
shutil.rmtree(resources_dir)
print('done')
return jar
def subparser_hook(subparsers):
""" Hook to add subparser for this command. """
subparser = subparsers.add_parser('jar',
description=__doc__,
help=main.__doc__)
subparser.set_defaults(func=main)
add_simple_jar(subparser)
def main(args):
""" Create a deployable JAR for a topology. """
jar_for_deploy(simple_jar=args.simple_jar)
|
StarcoderdataPython
|
52929
|
# coding=utf8
from base import ApiBase
from tornado.gen import coroutine, Return
from service.user import ServiceUser
class ApiUserBase(ApiBase):
def __init__(self, *args, **kwargs):
super(ApiUserBase, self).__init__(*args, **kwargs)
self.srv_user = ServiceUser()
class ApiUserLogin(ApiUserBase):
@coroutine
def post(self, *args, **kwargs):
username = self.get_argument('username')
password = self.get_argument('password')
user = self.srv_user.find_one_by_username_password(username, password)
if user:
pass
else:
pass
class ApiUserDetail(ApiUserBase):
@coroutine
def get(self, user_id):
user = yield self.srv_user.find_one_by_id(user_id)
self.json_success(data=user)
class ApiUserRegister(ApiUserBase):
@coroutine
def post(self, *args, **kwargs):
username = self.get_argument('username')
password = self.get_argument('password')
phone = self.get_argument('phone')
sex = self.get_argument('sex')
valid, msg = yield self.srv_user.check_register(username, password, phone, sex)
if not valid:
self.json_error(msg=msg)
else:
user_id = yield self.srv_user.create(username, password, phone, sex)
self.json_success(data=user_id)
|
StarcoderdataPython
|
1688072
|
from hypothesis import given
from tests.port_tests.hints import PortedDiagram
from . import strategies
@given(strategies.diagrams)
def test_basic(diagram: PortedDiagram) -> None:
result = diagram.clear()
assert result is None
|
StarcoderdataPython
|
3274904
|
<gh_stars>0
from . import __version__ as app_version
app_name = "masar_cost"
app_title = "Moving Average Cost"
app_publisher = "KCSC"
app_description = "Modifications on Moving Average Cost"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "<EMAIL>"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/masar_cost/css/masar_cost.css"
# app_include_js = "/assets/masar_cost/js/masar_cost.js"
# include js, css files in header of web template
# web_include_css = "/assets/masar_cost/css/masar_cost.css"
# web_include_js = "/assets/masar_cost/js/masar_cost.js"
# include custom scss in every website theme (without file extension ".scss")
# website_theme_scss = "masar_cost/public/scss/website"
# include js, css files in header of web form
# webform_include_js = {"doctype": "public/js/doctype.js"}
# webform_include_css = {"doctype": "public/css/doctype.css"}
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "masar_cost.install.before_install"
# after_install = "masar_cost.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "masar_cost.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# DocType Class
# ---------------
# Override standard doctype classes
# override_doctype_class = {
# "ToDo": "custom_app.overrides.CustomToDo"
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "masar_cost.tasks.all"
# ],
# "daily": [
# "masar_cost.tasks.daily"
# ],
# "hourly": [
# "masar_cost.tasks.hourly"
# ],
# "weekly": [
# "masar_cost.tasks.weekly"
# ]
# "monthly": [
# "masar_cost.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "masar_cost.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "masar_cost.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "masar_cost.task.get_dashboard_data"
# }
# exempt linked doctypes from being automatically cancelled
#
# auto_cancel_exempted_doctypes = ["Auto Repeat"]
# User Data Protection
# --------------------
user_data_fields = [
{
"doctype": "{doctype_1}",
"filter_by": "{filter_by}",
"redact_fields": ["{field_1}", "{field_2}"],
"partial": 1,
},
{
"doctype": "{doctype_2}",
"filter_by": "{filter_by}",
"partial": 1,
},
{
"doctype": "{doctype_3}",
"strict": False,
},
{
"doctype": "{doctype_4}"
}
]
# Authentication and authorization
# --------------------------------
# auth_hooks = [
# "masar_cost.auth.validate"
# ]
fixtures = [
{"dt": "Custom Field", "filters": [
[
"name", "in", [
"Bin-actual_quantity_for_all_warehouses",
"Stock Ledger Entry-qty_after_transaction_for_all_warehouses",
"Stock Ledger Entry-stock_value_for_all_warehouses",
"Stock Ledger Entry-stock_value_difference_for_all_warehouses"
]
]
]}
]
from masar_cost.override import _stock_ledger
from masar_cost.override import _bin
from masar_cost.override import _utils_stock
from masar_cost.override import _utils_accounts
from masar_cost.override import _stock_controller
from masar_cost.override._stock_controller import StockController
from erpnext.stock import stock_ledger as _base_stock_ledger
from erpnext.stock.doctype.bin import bin as _base_bin
from erpnext.stock import utils as _base_utils_stock
from erpnext.accounts import utils as _base_utils_accounts
from erpnext.controllers import stock_controller as _base_stock_controller
from erpnext.controllers.stock_controller import StockController
from erpnext.stock.stock_ledger import update_entries_after
_base_stock_ledger.make_sl_entries = _stock_ledger.make_sl_entries
_base_stock_ledger.repost_future_sle = _stock_ledger.repost_future_sle
update_entries_after = _stock_ledger.update_entries_after
_base_utils_stock.get_incoming_rate = _utils_stock.get_incoming_rate
_base_utils_accounts.get_future_stock_vouchers = _utils_accounts.get_future_stock_vouchers
_base_utils_accounts.check_if_stock_and_account_balance_synced = _utils_accounts.check_if_stock_and_account_balance_synced
_base_bin.update_qty = _bin.update_qty
_base_stock_controller.StockController.get_gl_entries = _stock_controller.StockController.get_gl_entries
_base_stock_controller.StockController.get_stock_ledger_details = _stock_controller.StockController.get_stock_ledger_details
#_stock_ledger.make_sl_entries --- Modified
#_stock_ledger.repost_current_voucher(args, allow_negative_stock=False, via_landed_cost_voucher=False): ---Not Modified
#_stock_ledger.get_args_for_future_sle(row): ---Not Modified
#_stock_ledger.make_entry(args, allow_negative_stock=False, via_landed_cost_voucher=False): ---Not Modified
# _stock_ledger.update_entries_after.initialize_previous_data --- Modified
# _stock_ledger.update_entries_after.process_sle --- Modified
# _stock_ledger.update_entries_after.get_moving_average_values --- Modified
#_stock_ledger.update_entries_after.update_bin --- Modified
#_stock_ledger.validate_negative_qty_in_future_sle --- Modified
# _stock_ledger.get_previous_sle_of_current_voucher(args, exclude_current_voucher=False): ---Not Modified
# _stock_ledger.get_previous_sle_of_current_voucher_for_all_warehouses(args, exclude_current_voucher=False): ---New
#_stock_ledger.update_qty_in_future_sle --- Modified
# _stock_ledger.get_future_sle_with_negative_qty_for_all_warehouses(args): ------New
# _stock_ledger.get_next_stock_reco(args): ---Not Modified
#_utils.get_incoming_rate ---Modified
#_bin.update_qty ---Modified
# _bin.get_bin_details(bin_name): ---Not Modified
#_StockController.get_gl_entries ---Modified
|
StarcoderdataPython
|
3254638
|
<gh_stars>1-10
# Copyright 2017 <NAME>, <NAME>, <NAME>, <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import z3
from backward import backward_coverability
from cpn import add_firing_constraints
from petri_from_vass import PetriFromVass
from vass_smt import build_zvass_solver, set_configurations, set_configuration, forbid_upward
def _petrinet_marking_vars(vass, petrinet, variables):
state_vars, vector_vars = variables
marking_vars = [None] * petrinet.num_places()
for state in vass.states():
place = petrinet.state_place_num(state)
marking_vars[place] = state_vars[state]
for i in range(vass.dim()):
place = petrinet.component_place_num(i)
marking_vars[place] = vector_vars[i]
return marking_vars
def _add_cpn_firing_constraints(vass, solver, variables):
petrinet = PetriFromVass(vass)
init_vars, target_vars, transitions_vars = variables
init_marking_vars = _petrinet_marking_vars(vass, petrinet, init_vars)
target_marking_vars = _petrinet_marking_vars(vass, petrinet, target_vars)
petri_transitions_vars = [None] * petrinet.num_transitions()
for t in transitions_vars:
num = petrinet.transition_num(t)
petri_transitions_vars[num] = transitions_vars[t]
add_firing_constraints(petrinet, init_marking_vars,
target_marking_vars, solver,
petri_transitions_vars)
def coverability(vass, init, targets):
system = (vass, init, targets)
solver, variables = build_zvass_solver(vass)
init_vars, target_vars, _ = variables
set_configuration(solver, init_vars, init)
# _add_cpn_firing_constraints(vass, solver, variables)
solver.check()
# Precondition
solver.push()
set_configurations(solver, target_vars, targets, ">=")
result = solver.check()
solver.pop()
if result == z3.unsat:
return False
# Pruning
def z_pruning(configs):
def pred(c):
solver.push()
set_configuration(solver, target_vars, c, ">=")
result = solver.check()
solver.pop()
return result == z3.unsat
pruned = {c for c in configs if pred(c)}
for config in pruned:
forbid_upward(solver, target_vars, config)
return pruned
return backward_coverability(system, z_pruning)
|
StarcoderdataPython
|
1647705
|
<reponame>ferjorosa/test-glfm<filename>demos/python/demo_completion_MNIST.py
# coding: utf-8
# # Introduction to the General Latent Feature Model (GLFM)
# # DEMO_MATRIX_COMPLETION
# In[1]:
## import necessary packages
import numpy as np # library to work with numpy arrays and math operations
from random import sample
import sys
sys.path.append('../../src/GLFMpython/')
import GLFM
import csv
import matplotlib.pyplot as plt
import pdb
# In[2]:
# ---------------------------------------------
# 1. LOAD DATA
# ---------------------------------------------
print 'Loading data...'
# DB can be downloaded here: http://pjreddie.com/projects/mnist-in-csv/
file = '../../datasets/mnist_train_small_100.csv'
N = 50
images = []
with open(file, 'r') as csv_file:
count = 0
for data in csv.reader(csv_file):
count = count + 1
# The first column is the label
label = data[0]
# The rest of columns are pixels
pixels = np.array(data[1:], dtype='float64')
images.append(pixels)
# Make those columns into a array of 8-bits pixels
# This array will be of 1D with length 784
# The pixel intensity values are integers from 0 to 255
pixels = np.array(pixels, dtype='uint8')
if count > N: # Taking only 1000 images
break
X = np.array(images).transpose() # D*N
Xtrue = X[:,sample(xrange(X.shape[1]),N)] + 1.0 # add one, since 'n' type cannot start in zero
C = np.tile('n',(1,Xtrue.shape[0]))[0].tostring()
# In[ ]:
# ---------------------------------------------
# 2. ADDING MISSING VALUES
# ---------------------------------------------
print 'Add missing values to the observation matrix...'
perc_missing = 0.3 #percentage of missing
missing_val = -100
mask_missing = np.random.rand(Xtrue.shape[0],Xtrue.shape[1]) < perc_missing
Xmiss = np.copy(Xtrue)
Xmiss[mask_missing] = missing_val
# In[ ]:
# ---------------------------------------------
# 3. CREATING DATA STRUCTURES
# ---------------------------------------------
print 'Creating data structures...'
data = dict()
hidden = dict()
params = dict()
data['X'] = Xmiss.transpose()
data['C'] = C
#Kinit = 2
#hidden['Z'] = np.random.randint(0,2,size=(N,Kinit)).astype('float64')
params['missing'] = -100
params['Niter'] = 10
# In[ ]:
# ---------------------------------------------
# 4. RUN ALGORITHM
# ---------------------------------------------
print 'Complete matrix...'
(Xcompl,hidden) = GLFM.complete(data, params = params)
# In[ ]:
# ---------------------------------------------
# 3. VISUALIZATION OF A RANDOM IMAGE
# ---------------------------------------------
print 'Visualizing ground truth example'
f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, sharex='col', sharey='row')
V = [ax1, ax2, ax3]
# Reshape the array into 28 x 28 array (2-dimensional array)
idx_ran = np.random.randint(0,data['X'].shape[0])
pixels = Xtrue[:,idx_ran]
pixels = np.array(pixels, dtype='uint8')
pixels = pixels.reshape((28, 28))
# Plot
V[0].imshow(pixels, interpolation='none') # cmap='gray',
print 'Visualizing a single example with missing...'
pixels = data['X'][idx_ran,:]
pixels = np.array(pixels, dtype='uint8')
pixels = pixels.reshape((28, 28))
V[1].imshow(pixels, interpolation='none')
print 'Visualizing a single example without missing...'
pixels = Xcompl[idx_ran,:]
pixels = np.array(pixels, dtype='uint8')
pixels = pixels.reshape((28, 28))
# Plot
V[2].imshow(pixels, interpolation='none')
#plt.ion() # interactive mode for plotting (script continues)
plt.show()
plt.pause(0.0001)
print('\n\n# -------------------')
print "# SUCCESSFUL"
print('# -------------------')
|
StarcoderdataPython
|
167634
|
<filename>model/simplebase_model.py
import tensorflow as tf # noqa
import numpy as np # noqa
from ..utils import nn # noqa
import transforms as trans
import conditionals as conds # noqa
import likelihoods as likes
from ..model import model as mod
class SimpleBaseModel(mod.Model):
# TODO: docstring.
def __init__(self, transformations,
preproc_func=None, base_distribution='gaussian',
sample_size=128, sample_size_n=1000,
trans_conditioning=True,
):
"""
Args:
transformations: list of transformation functions that take input
(and possibly conditioning) variables to transform and return
output, logdet of Jacobian, and inverse for transformation.
preproc_func:
base_distribution:
sample_size:
trans_conditioning:
"""
# Parameters
self.transformations = transformations
self.base_distribution = base_distribution
self.sample_size = sample_size
self.sample_size_n = sample_size_n
self.preproc_func = preproc_func
self.trans_conditioning = trans_conditioning
def build_graph(self, inputs, conditioning=None,
sampler_conditioning=None, forward_tensors=None):
print('Building {} Graph,\n\tconditioning {}'.format(
'SimpleBase', conditioning))
# Place holder for model input.
if self.preproc_func is not None:
inputs, inv_preproc = self.preproc_func(inputs)
else:
inv_preproc = None
N = tf.shape(inputs)[0]
self.d = int(inputs.get_shape()[2])
# Sampling extreneous coditioning values.
if sampler_conditioning is None:
sampler_conditioning = conditioning
else:
# Allows for sampling procedure to be independent from any
# placeholder/input.
assert conditioning is not None # Need to also train conditioning.
# Do transformation on input variables.
with tf.variable_scope('transformations') as trans_scope:
self.z, self.logdet, self.invmap = trans.transformer(
inputs, self.transformations,
conditioning if self.trans_conditioning else None,
forward_tensors=forward_tensors
)
# Get the likelihood of covariates all iid according to base distro
# Note: the 3 below is for the weight, mu, sigma param of mixture
# component and not dimensionality.
self.llikes = self.logdet
with tf.variable_scope('conditionals'):
# Treat as N x nd flat covariates
flat_z = tf.reshape(self.z, shape=(N, -1, 1))
std_params = tf.tile(tf.zeros_like(flat_z), [1, 1, 3])
# Get likelihood with base distribution
self.llikes += tf.reduce_sum(likes.mixture_likelihoods(
std_params, flat_z, self.base_distribution), -1)
# Sample all tensor dimensions iid from base distribution
total_dims = self.sample_size*self.sample_size_n*self.d
self.z_samples = tf.reshape(
conds.sample_mm(
tf.zeros(shape=(total_dims, 3), dtype=tf.float32),
self.base_distribution),
(self.sample_size, self.sample_size_n, self.d))
# Invert to get samples back in original space.
with tf.variable_scope(trans_scope, reuse=True):
self.sampler = self.invmap(
self.z_samples,
sampler_conditioning if self.trans_conditioning else None)
if inv_preproc is not None:
self.sampler = inv_preproc(self.sampler)
return self.llikes, self.sampler
|
StarcoderdataPython
|
3282973
|
<reponame>nishadg246/stripstream-ivan-nishad<filename>stripstream/fts/derived.py
from stripstream.pddl.logic.predicates import Predicate
from stripstream.pddl.objects import Constant
from stripstream.pddl.operators import STRIPSAxiom
from stripstream.fts.variable import FreeParameter, VarMember, Par
def create_axiom(con_form, var_names, params, inputs, var_map, axiom_map):
key = (con_form, tuple(var_names), tuple(params), tuple(inputs))
if key not in axiom_map:
conditions = []
for name, nested in zip(var_names, params):
if name is not None:
conditions.append(var_map[name].predicate(*nested))
con_params = [nested[-1] for nested in params]
conditions.append(con_form.predicate(*con_params))
DerPred = Predicate('_der_%s'%len(axiom_map), [par.type for par in inputs])
effect = DerPred(*inputs)
axiom_map[key] = STRIPSAxiom(conditions, [effect])
return axiom_map[key].effects[0]
def get_derived(con, var_map, axiom_map, constants):
# TODO - worry about repeats
# NOTE - difference between variable and dtype causes some issues here
# TODO - maybe it would just be easier to decompose into parameters first?
param_map = {}
num_params = [0]
def get_item(item, dtype):
if isinstance(item, FreeParameter) or isinstance(item, VarMember):
if item not in param_map:
num_params[0] += 1
param_map[item] = Par('%s'%num_params[0], dtype)
return param_map[item]
num_params[0] += 1
return Par('%s'%num_params[0], dtype)
var_names = []
params = []
inputs = []
constant_map = {}
for i, item in enumerate(con.values):
if isinstance(item, VarMember):
name, args = item.var[0], item.var[1:]
var_names.append(name)
assert len(var_map[name].args) == len(args)
nested = []
for j, arg in enumerate(args):
param = get_item(arg, var_map[name].args[j])
#if not isinstance(arg, Param):
if arg in constants:
inputs.append(param)
constant_map[param] = arg
nested.append(param)
nested.append(get_item(item, var_map[name].dtype))
params.append(tuple(nested))
else:
var_names.append(None)
param = get_item(item, con.constraint.types[i])
#if not isinstance(item, Param):
if item in constants:
inputs.append(param) # Not possible for param to be inputs already
constant_map[param] = item
params.append((param,))
# Do this uniquely given the vars and parameter order
# Parameter order will be unique
# NOTE - I could just cache on the parameter objects used. This is crude but it would work
# TODO - make an axiom for the form without a constant
# NOTE - the object and region should be a control parameter?
# NOTE - this would have been different if they used free parameters as well (i.e. for any object)
#assert len(params) == len(constants)
effect = create_axiom(con.constraint, var_names, params, inputs, var_map, axiom_map)
#new_args = [ty(constant_map[arg]) for ty, arg in zip(effect.predicate.types, effect.args)]
new_args = []
for ty, arg in zip(effect.predicate.types, effect.args):
value = constant_map[arg]
if not isinstance(value, FreeParameter) and not isinstance(value, Constant): # TODO - probably shouldn't include this here
value = ty(value)
new_args.append(value)
return effect.predicate(*new_args)
|
StarcoderdataPython
|
3390773
|
<gh_stars>0
# -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.visitors.wrong_name import (
BAD_MODULE_METADATA_VARIABLES,
WrongModuleMetadataViolation,
WrongModuleMetadataVisitor,
)
module_test = """
{0} = 'Nikita'
"""
nested_test = """
class ORM:
{0} = None
"""
@pytest.mark.parametrize('bad_name', BAD_MODULE_METADATA_VARIABLES)
@pytest.mark.parametrize('code', [
module_test,
])
def test_wrong_metadata(
assert_errors, parse_ast_tree, bad_name, code,
):
"""Testing that metadata can not have blacklisted names."""
tree = parse_ast_tree(code.format(bad_name))
visiter = WrongModuleMetadataVisitor()
visiter.visit(tree)
assert_errors(visiter, [WrongModuleMetadataViolation])
@pytest.mark.parametrize('correct_name', ['correct_name', 'xy', '_value'])
@pytest.mark.parametrize('code', [
module_test,
nested_test,
])
def test_correct_metadata(
assert_errors, parse_ast_tree, code, correct_name,
):
"""Testing that metadata can have normal names."""
tree = parse_ast_tree(code.format(correct_name))
visiter = WrongModuleMetadataVisitor()
visiter.visit(tree)
assert_errors(visiter, [])
|
StarcoderdataPython
|
3232216
|
<gh_stars>0
# https://github.com/TDD-Katas/frequent-words
# Given a list of words, print the list of top N most frequently occurring words in the list
# together with their frequency of occurrence.
# The output should be sorted in descending order by frequency of occurrence.
# If two words occur with the same frequency then they should be ordered in alphabetical order.
# Input:
#
# N = 3
# list = united, states, america, north, america, south, america, south, africa, north, korea
#
# Output:
#
# 3 america
# 2 north
# 2 south
from collections import Counter
def most_frequent(words, n):
return sorted(Counter(words).most_common(n))
def test_most_frequent():
assert most_frequent(['apple', 'banana', 'apple', 'yeti'], 2) == [('apple', 2), ('banana', 1)]
def test_most_frequent_same_frequency():
assert most_frequent(['banana', 'apple', 'apple', 'banana'], 2) == [('apple', 2), ('banana', 2)]
def test_most_frequent_empty():
assert most_frequent([], 2) == []
|
StarcoderdataPython
|
1707508
|
<gh_stars>10-100
class Solution:
def subarraysDivByK(self, A: List[int], K: int) -> int:
res = sm = 0
sums = collections.defaultdict(int)
sums[0] = 1
for a in A:
sm = (sm + a) % K
sums[sm] += 1
res += sums[sm] - 1
return res
|
StarcoderdataPython
|
3377120
|
from .nodes import Function, ActionNode, ConditionNode, SequenceNode, FallbackNode, DecoratorNode, add_child
import uuid
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
def controlNode(treeObj):
node = None
if treeObj.SUBTYPE == "SEQUENCE":
node = SequenceNode()
elif treeObj.SUBTYPE == "FALLBACK":
node = FallbackNode()
for c in treeObj.children:
add_child(node, create_bht(c))
return node
def decoratorNode(treeObj):
func = None
if treeObj.SUBTYPE == "INVERSE":
func = Function('returnable = NodeStates.SUCCESS if status == NodeStates.SUCCESS else NodeStates.FAILED')
elif treeObj.SUBTYPE == "FORCE_SUCCESS":
func = Function('returnable = NodeStates.SUCCESS')
elif treeObj.SUBTYPE == "FORCE_FAIL":
func = Function('returnable = NodeStates.FAILED')
elif treeObj.SUBTYPE == "LOOP":
func = Function(f'returnable = NodeStates.RUNNING if counter < {treeObj.COUNTER} else status')
node = DecoratorNode(func, treeObj.SUBTYPE)
for c in treeObj.children:
add_child(node, create_bht(c))
return node
def actionNode(treeObj):
node = ActionNode(Function(treeObj.CODE), treeObj.SUBTYPE)
return node
def conditionNode(treeObj):
node = ConditionNode(Function(treeObj.CODE, True), str(uuid.uuid4()))
return node
#############################################################################################
nodeTypes = {
"CONTROL": controlNode,
"DECORATOR": decoratorNode,
"ACTION": actionNode,
"CONDITION": conditionNode
}
#############################################################################################
def create_bht(treeObj):
func = nodeTypes[treeObj.TYPE]
return func(treeObj)
def string_bht_train(tree):
rootNode = FallbackNode()
sequences = tree.split('|')
for seq in sequences:
seqNode = SequenceNode()
conditions, actions = seq.split(';')
conditions = conditions.split(',')
for con in conditions:
add_child(seqNode, ConditionNode(Function(f'game.passCondition("{con}")', True), str(uuid.uuid4())))
actions = actions.split(',')
for act in actions:
loop, index = list(act)
node = ActionNode(Function(f'game.play({index})'), f'{index}')
if loop != '0':
auxNode = DecoratorNode(Function(f'returnable = NodeStates.RUNNING if counter < {loop} else status'), 'LOOP')
add_child(auxNode, node)
node = auxNode
add_child(seqNode, node)
add_child(rootNode, seqNode)
add_child(rootNode, ActionNode(Function('game.play(0)'), '0'))
return rootNode
def string_bht(tree):
rootNode = FallbackNode()
sequences = tree.split('|')
for seq in sequences:
seqNode = SequenceNode()
conditions, actions = seq.split(';')
conditions = conditions.split(',')
for con in conditions:
add_child(seqNode, ConditionNode(Function(f'({con})', True), str(uuid.uuid4())))
actions = actions.split(',')
for act in actions:
index = int(act)
actionNum = index % 4
loopNum = index // 4
node = ActionNode(Function(f'observation, reward, done, info = env.step({actionNum})\nx, y, vel_x, vel_y, vel_ang, ang, l_left, l_right = observation\nfitness += reward'), act)
if loopNum > 0:
auxNode = DecoratorNode(Function(f'returnable = NodeStates.RUNNING if counter < {loopNum} else status'), 'LOOP')
add_child(auxNode, node)
node = auxNode
add_child(seqNode, node)
add_child(rootNode, seqNode)
add_child(rootNode, ActionNode(Function('observation, reward, done, info = env.step(0)\nx, y, vel_x, vel_y, vel_ang, ang, l_left, l_right = observation\nfitness += reward'), '0'))
return rootNode
|
StarcoderdataPython
|
52416
|
<reponame>FranckLejzerowicz/routine_qiime2_analyses
# ----------------------------------------------------------------------------
# Copyright (c) 2020, <NAME>.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import glob
import pandas as pd
from os.path import dirname, isfile, splitext
from skbio.stats.ordination import OrdinationResults
from routine_qiime2_analyses.analyses_prep import AnalysisPrep
from routine_qiime2_analyses._routine_q2_cmds import (
get_case, get_new_meta_pd, run_import, run_export, filter_feature_table,
)
from routine_qiime2_analyses._routine_q2_io_utils import (
read_yaml_file, get_analysis_folder
)
from routine_qiime2_analyses._routine_q2_taxonomy import (
get_split_taxonomy
)
from routine_qiime2_analyses._routine_q2_mmbird import (
get_order_omics, get_pc_sb_correlations, edit_ordi_qzv, get_qzs,
get_biplot_commands, get_xmmvec_commands, get_paired_heatmaps_command
)
class PostAnalysis(object):
def __init__(self, config, project) -> None:
self.config = config
self.project = project
self.cmds = {}
self.mmvec_songbird_pd = pd.DataFrame()
self.taxo_pds = {}
self.metas = {}
self.mmvec_res = {}
self.mmvec_issues = set()
self.xmmvecs = read_yaml_file(config.xmmvec)
self.highlights = read_yaml_file(config.mmvec_highlights)
def merge_mmvec_songbird(self, songbird_pd):
rename_dic1 = dict(
(x, '%s_omic1_songbird_common_fp' % x) for x in songbird_pd.columns)
rename_dic1.update({'pair_omic_subset_filt': 'pair_omic_subset_filt1'})
self.mmvec_songbird_pd = self.mmvec_songbird_pd.merge(
songbird_pd.rename(columns=rename_dic1),
on='pair_omic_subset_filt1', how='left')
rename_dic2 = dict(
(x, '%s_omic2_songbird_common_fp' % x) for x in songbird_pd.columns)
rename_dic2.update({'pair_omic_subset_filt': 'pair_omic_subset_filt2'})
self.mmvec_songbird_pd = self.mmvec_songbird_pd.merge(
songbird_pd.rename(columns=rename_dic2),
on='pair_omic_subset_filt2', how='left')
def prep_mmvec(self, mmvec_pd):
mmvec_pd = mmvec_pd.set_index(mmvec_pd.columns.tolist()[:-1]).unstack()
mmvec_pd.columns = mmvec_pd.columns.droplevel()
mmvec_pd.reset_index(inplace=True)
mmvec_pd['filter1'] = mmvec_pd['pr1'] + '_' + mmvec_pd['ab1']
mmvec_pd['filter2'] = mmvec_pd['pr2'] + '_' + mmvec_pd['ab2']
mmvec_pd['omic_subset_filt1'] = mmvec_pd.apply(
lambda x: '__'.join(x[['omic1', 'subset', 'filter1']]), axis=1)
mmvec_pd['omic_subset_filt2'] = mmvec_pd.apply(
lambda x: '__'.join(x[['omic1', 'subset', 'filter2']]), axis=1)
mmvec_pd['pair_omic_subset_filt1'] = mmvec_pd.apply(
lambda x: '__'.join(x[['pair', 'omic_subset_filt1']]), axis=1)
mmvec_pd['pair_omic_subset_filt2'] = mmvec_pd.apply(
lambda x: '__'.join(x[['pair', 'omic_subset_filt2']]), axis=1)
self.mmvec_songbird_pd = mmvec_pd
@staticmethod
def prep_songbird(songbird_pd):
songbird_pd['pair_omic_subset_filt'] = songbird_pd.apply(
lambda x: '__'.join(x[['pair', 'dataset', 'subset', 'filter']]),
axis=1)
songbird_pd = songbird_pd[
['params', 'pair_omic_subset_filt', 'differentials']
].drop_duplicates().pivot(
columns='params', index='pair_omic_subset_filt')
songbird_pd.columns = songbird_pd.columns.droplevel()
songbird_pd = songbird_pd.reset_index()
return songbird_pd
def get_tax_fp(self, omic):
if isfile(self.project.datasets[omic].tax[-1]):
omic_tax_fp = self.project.datasets[omic].tax[-1]
else:
omic_tax_fp = ''
return omic_tax_fp
def get_taxo_pds(self):
for omicn in ['1', '2']:
for omic in self.mmvec_songbird_pd['omic%s' % omicn].unique():
omic_tax_fp = self.get_tax_fp(omic)
if isfile(omic_tax_fp):
omic_tax_pd = pd.read_csv(
omic_tax_fp, header=0, sep='\t', dtype=str)
omic_tax_pd.rename(
columns={omic_tax_pd.columns[0]: 'Feature ID'},
inplace=True)
else:
omic_tax_pd = pd.DataFrame()
self.taxo_pds[omic] = omic_tax_pd
def get_omics_songbirds_taxa(self):
for omicn in ['1', '2']:
pair_case_omics_filts = ['pair', 'subset', 'omic1',
'filter1', 'omic2', 'filter2']
all_omic_sb = [x for x in self.mmvec_songbird_pd.columns if
x.endswith('omic%s_songbird_common_fp' % omicn)]
omicn_songbirds = self.mmvec_songbird_pd[
(pair_case_omics_filts + all_omic_sb)].set_index(
pair_case_omics_filts).T.to_dict()
for (pair, subset, omic1, filt1, omic2,
filt2), sb_head_diff_fp in omicn_songbirds.items():
if omicn == '1':
omic = omic1
omic_ = omic2
filt = filt1
filt_ = filt2
else:
omic = omic2
omic_ = omic1
filt = filt2
filt_ = filt1
feats_diff_cols = []
cur_mmvec_folder = get_analysis_folder(
self.config.i_datasets_folder,
'mmvec/metadata/%s/%s' % (pair, subset))
omic_diff_list = []
if len(sb_head_diff_fp):
for sb_head, diff_fp in sb_head_diff_fp.items():
model = sb_head.replace(
'_omic%s_songbird_common_fp' % omicn, '')
if str(diff_fp) != 'nan' and isfile(diff_fp):
diff_pd = pd.read_csv(diff_fp, header=0, sep='\t',
dtype=str)
index_header = diff_pd.columns[0]
if diff_pd[index_header][0] == '#q2:types':
diff_pd = diff_pd[1:]
diff_pd = diff_pd.rename(
columns={index_header: 'Feature ID'}).set_index(
'Feature ID')
diff_pd = diff_pd.drop(
columns=[x for x in diff_pd.columns if
'Intercept' in x])
q2s = {}
diff_htmls = glob.glob(
'%s/*/tensorboard.html' % dirname(diff_fp))
if len(diff_htmls):
for diff_html in diff_htmls:
baseline = diff_html.split('/')[-2]
with open(diff_html) as f:
for line in f:
if 'Pseudo Q-squared' in line:
q2 = line.split(
'Pseudo Q-squared:</a></strong> ')[
-1].split('<')[0]
if float(q2) > 0.01:
q2s[baseline] = q2
break
if q2s:
diff_cols = ['%s__%s__%s' % (
model, x, '--'.join(
['%s-Q2=%s' % (b, q) if b else 'noQ2'
for b, q in q2s.items()])
) for x in diff_pd.columns]
diff_pd.columns = diff_cols
feats_diff_cols.extend(diff_cols)
omic_diff_list.append(diff_pd)
if len(omic_diff_list):
omic_songbird_ranks = pd.concat(
omic_diff_list, axis=1, sort=False).reset_index()
omic_songbird_ranks.rename(
columns={omic_songbird_ranks.columns[0]: 'Feature ID'},
inplace=True
)
else:
omic_common_fp = self.mmvec_songbird_pd.loc[
(self.mmvec_songbird_pd['pair'] == pair) &
(self.mmvec_songbird_pd['subset'] == subset) &
(self.mmvec_songbird_pd['omic1'] == omic1) &
(self.mmvec_songbird_pd['filter1'] == filt1) &
(self.mmvec_songbird_pd['omic2'] == omic2) &
(self.mmvec_songbird_pd['filter2'] == filt2),
'omic%s_common_fp' % omicn
].tolist()[0]
omic_tax_list = []
if not isfile(omic_common_fp):
continue
with open(omic_common_fp) as f:
for ldx, line in enumerate(f):
if ldx:
omic_tax_list.append([line.split('\t')[0]])
omic_songbird_ranks = pd.DataFrame(omic_tax_list,
columns=['Feature ID'])
if omic in self.taxo_pds:
omic_tax_pd = self.taxo_pds[omic]
if omic_tax_pd.shape[0]:
if 'Taxon' in omic_tax_pd.columns:
omic_split_taxa_pd = get_split_taxonomy(
omic_tax_pd.Taxon.tolist(), True)
omic_tax_pd = pd.concat(
[omic_tax_pd, omic_split_taxa_pd], axis=1,
sort=False)
omic_songbird_ranks = omic_songbird_ranks.merge(
omic_tax_pd, on='Feature ID',
how='left').drop_duplicates()
meta_omic_fp = '%s/feature_metadata_%s_%s__%s_%s.tsv' % (
cur_mmvec_folder, omic, filt, omic_, filt_)
drop_columns = [col for col in omic_songbird_ranks.columns if
omic_songbird_ranks[col].unique().size == 1]
meta_omic_pd = omic_songbird_ranks.drop(columns=drop_columns)
meta_omic_pd.to_csv(meta_omic_fp, index=False, sep='\t')
meta_omic_pd.set_index('Feature ID', inplace=True)
self.metas[(pair, subset, omic, filt, omic_, filt_)] = (
meta_omic_fp, meta_omic_pd, feats_diff_cols)
def get_mmvec_res(self):
mmvec_out_cols = [x for x in self.mmvec_songbird_pd.columns if
x.startswith('mmvec_out__')]
# for ech row of the main table that also
# contain the mmvec output folders
for r, row in self.mmvec_songbird_pd.iterrows():
pair = row['pair']
subset = row['subset']
omic1 = row['omic1']
omic2 = row['omic2']
filt1 = row['filter1']
filt2 = row['filter2']
omic1_common_fp = row['omic1_common_fp']
if str(omic1_common_fp) == 'nan':
continue
omic2_common_fp = row['omic2_common_fp']
n_common = row['n_common']
meta_fp = row['meta_common_fp']
# for each mmvec-parameters result
for mmvec_out_col in mmvec_out_cols:
# get the current parameters output result folder
mmvec_out = row[mmvec_out_col]
if str(mmvec_out) == 'nan':
continue
# get the ordination file and the ranks file and skip
# + warning if not performed
mmvec_out_ranks = mmvec_out + '/model/ranks.tsv'
mmvec_out_ordi = mmvec_out + '/model/ordination.txt'
if not isfile(mmvec_out_ranks) or not isfile(mmvec_out_ordi):
issue = '\t\t[run mmvec first] %s (%s) %s (%s)' % (
omic1, filt1, omic2, filt2)
self.mmvec_issues.add(issue)
continue
# collect the ranks + ordination
# + songbirds for each pair of omics and parameters
self.mmvec_res[
(pair, subset, omic1, omic2, filt1, filt2, n_common,
mmvec_out_col.replace('mmvec_out__', ''))
] = [mmvec_out_ranks, mmvec_out_ordi, meta_fp,
omic1_common_fp, omic2_common_fp]
def get_pair_cmds(self, omics_pairs):
crowdeds = [0, 1]
pc_sb_correlations = []
for keys, values in self.mmvec_res.items():
pair, case, omic1, omic2, filt1, filt2, sams, mmvec = keys
ranks_fp, ordi_fp, meta_fp, omic1_common, omic2_common = values
order_omics = get_order_omics(omic1, omic2, filt1, filt2, case,
omics_pairs)
omic1 = order_omics[0]
omic2 = order_omics[1]
filt1 = order_omics[2]
filt2 = order_omics[3]
omic_feature = order_omics[4]
omic_sample = order_omics[5]
omic_microbe = order_omics[6]
omic_metabolite = order_omics[7]
# get differentials
meta1, meta_pd1, diff_cols1 = self.metas[(pair, case, omic1,
filt1, omic2, filt2)]
meta2, meta_pd2, diff_cols2 = self.metas[(pair, case, omic2,
filt2, omic1, filt1)]
# features are biplot, samples are dots
ordi = OrdinationResults.read(ordi_fp)
cur_pc_sb_correlations, max_r = get_pc_sb_correlations(
pair, case, ordi, omic1, omic2, filt1, filt2, diff_cols1,
meta_pd1, diff_cols2, meta_pd2, meta_fp, omic1_common,
omic2_common, ranks_fp)
pc_sb_correlations.append(cur_pc_sb_correlations)
cmd = ''
if pair in self.highlights:
pair_highlights = self.highlights[pair]
for highlight, regexes_list in pair_highlights.items():
n_edit, meta_edit, ordi_edit_fp = edit_ordi_qzv(
ordi, ordi_fp, highlight, regexes_list, meta1, meta_pd1)
if n_edit:
qza, qzv = get_qzs(ordi_edit_fp)
cmd += get_biplot_commands(
ordi_edit_fp, qza, qzv, omic_feature, omic_sample,
meta_edit, meta2, n_edit, max_r)
ordi_edit_fp = ordi_fp
qza, qzv = get_qzs(ordi_edit_fp)
for crowded in crowdeds:
if crowded:
n_ordi_feats = ordi.features.shape[0]
qzv = qzv.replace('.qzv', '_crowded.qzv')
else:
n_ordi_feats = 15
# heat_qza, heat_qzv = get_heatmap_qzs(ranks_fp)
# cmd += get_heatmap_commands(
# ranks_fp, heat_qza, heat_qzv, meta1,
# meta2, meta_pd1, meta_pd2)
cmd += get_biplot_commands(
ordi_edit_fp, qza, qzv, omic_feature, omic_sample,
meta1, meta2, n_ordi_feats, max_r)
cmd += get_xmmvec_commands(
ordi_edit_fp, omic1, omic2, meta1, meta2, self.xmmvecs, pair)
topn = 5
features_names = []
if features_names:
heat = '%s_paired_heatmaps_custom.qzv' % splitext(ranks_fp)[0]
else:
heat = '%s_paired_heatmaps_top%s.qzv' % (splitext(ranks_fp)[0],
topn)
cmd += get_paired_heatmaps_command(
ranks_fp, omic1_common, omic2_common, meta1, features_names,
topn, heat)
self.cmds.setdefault(pair, []).append(cmd)
return pc_sb_correlations
def show_mmvec_issues(self):
if self.mmvec_issues:
for mmvec_issue in self.mmvec_issues:
print(mmvec_issue)
def mmbird(self, paired_datasets, differentials):
if not paired_datasets.mmvec_pd.shape[0]:
print('No mmvec output detected...')
return None
self.prep_mmvec(paired_datasets.mmvec_pd)
if differentials.songbird_pd.shape[0]:
songbird_pd = self.prep_songbird(differentials.songbird_pd)
self.merge_mmvec_songbird(songbird_pd)
self.get_taxo_pds()
self.get_omics_songbirds_taxa()
self.get_mmvec_res()
self.show_mmvec_issues()
omics_pairs = [tuple(x) for x in self.mmvec_songbird_pd[
['omic_subset_filt1', 'omic_subset_filt2']].values.tolist()]
pc_sb_correlations = self.get_pair_cmds(omics_pairs)
if len(pc_sb_correlations):
out_folder = get_analysis_folder(self.config.i_datasets_folder,
'mmbird')
out_correlations = '%s/pc_vs_songbird_correlations.tsv' % out_folder
pc_sb_correlations_pd = pd.concat(pc_sb_correlations)
if pc_sb_correlations_pd.shape[0]:
pc_sb_correlations_pd.to_csv(
out_correlations, index=False, sep='\t')
print('\t\t==> Written:', out_correlations)
else:
print('\t\t==> No good songbird model to '
'make correlations with mmvec PCs...')
self.register_command('mmbird')
def register_command(self, analysis):
AnalysisPrep.analyses_commands[analysis] = self.cmds
|
StarcoderdataPython
|
3333856
|
#!/usr/bin/env python
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
Word trend analyzer for the "Toxic Docs" repository from Columbia University
and the Center for Public Integrity. Data set consists of PDF files of
emails, memos, advertisements, news articles, scientific articles cited in
legal cases involving allegations of environmental harm from toxic substances.
"""
from processing import *
from modeling import *
from analyzing import *
import seaborn as sns
import time
from sklearn.svm import LinearSVC
class TrendAnalyzer():
def __init__(self, docs, num_chars=1000, init_now=False):
self.nlp = NLP(num_chars, replace_ne=False)
self.df = pd.DataFrame(docs)
# perform expensive parts of initialization
if init_now:
self.create_token_sets()
self.infer_doc_years()
"""
Uses the set tokenizer to retrieve unique words
"""
def create_token_sets(self):
self.df['tokens'] = self.df['text'].map(lambda x: self.nlp.set_tokenizer(x))
return
"""
Infers a year by taking of the max of year-like tokens
"""
def infer_year(self, tokens):
years = []
for token in tokens:
try:
if self.nlp.spacy(token)[0].is_digit:
try:
num = int(token)
if num < 2016 and num > 1890:
years.append(num)
except:
pass
except:
pass
if years:
return int(max(years))
else:
return None
"""
Stores inferred year of documents in data frame
"""
def infer_doc_years(self):
self.df['inferred_year'] = self.df['tokens'].map(lambda x: self.infer_year(x))
return
"""
Computes the document count of a topic over time
"""
def topic_trend(self, word, doc_type, colors, label_names,\
x_min=1900, x_max=2016, fname='trend.png'):
year_counts = self.df.groupby(['inferred_document_type',\
'inferred_year'])['tokens'].apply(lambda x: \
np.sum([word in y for y in x]))
x_vals = np.arange(x_min, x_max + 1, dtype=int)
y_vals = np.zeros((x_max - x_min + 1, len(doc_type)))
for i,dt in enumerate(doc_type):
try:
x = year_counts[dt].index.astype(int)
y = year_counts[dt].values
y = y[(x >= x_min) * (x <= x_max)]
x = x[(x >= x_min) * (x <= x_max)]
y_vals[x - x_min, i] = y
except Exception as e:
print(e)
y_sum = np.zeros_like(y_vals)
for i in range(y_vals.shape[1]-1,-1,-1):
y_sum[:,i] = np.sum(y_vals[:,:i+1], axis=1)
sns.set_style("whitegrid")
sns.set_context({"figure.figsize": (24, 10)})
for i in range(y_vals.shape[1]-1, -1, -1):
sbp = sns.barplot(x=x_vals, y=y_sum[:,i],\
color=colors[i], saturation=0.75)
plt.setp(sbp.patches, linewidth=0)
legends = []
for i in range(y_vals.shape[1]):
legends.append(plt.Rectangle((0,0),1,1,fc=colors[i], edgecolor='none'))
l = plt.legend(legends, label_names, loc=1, ncol=1, prop={'size':32})
l.draw_frame(False)
title = r'"' + word + r'" ' + str(x_min) + ' - ' + str(x_max)
sns.despine(left=True)
sbp.set_title(title, fontsize=48, fontweight='bold')
plt.xticks(rotation=45)
for item in ([sbp.xaxis.label] +
sbp.get_xticklabels()):
item.set_fontsize(24)
for item in ([sbp.yaxis.label] +
sbp.get_yticklabels()):
item.set_fontsize(28)
plt.savefig(fname)
"""
Computes indices of most similar documents in n-gram space
"""
def compute_sim_docs(self, X, num_docs):
self.sim_docs = np.zeros((X.shape[0], num_docs), dtype=int)
for i in range(X.shape[0]):
doc_sims = np.dot(X, X[i, :].transpose())
sim_ranking = np.argsort(-doc_sims.toarray().flatten())
self.sim_docs[i,:] = sim_ranking[1:(num_docs+1)]
if __name__ == '__main__':
bson_file = 'doc_extra.bson'
label_key = 'document_type'
text_key = 'text'
# Process the raw data
dp = DataProcessor(text_key, label_key, num_chars=300, replace_ne=True)
da = DataAnalyzer(text_key)
docs, y_regex, counts_regex = dp.load_bson(bson_file)
ta = TrendAnalyzer(docs, num_chars=1000, init_now=True)
t0 = time.time()
ta.create_token_sets()
tok_time = time.time() - t0
print('Tokenization time:', tok_time)
t0 = time.time()
vectorizer, X_all_ngram, feat_names = dp.vectorize(docs, min_df=5, max_ngram=2)
vec_time = time.time() - t0
# Replace regex labels with human labels
y_all = np.loadtxt('labels.txt', dtype=np.int32)
# Merge similar classes
y_merged = dp.merge_classes([(7,14), (15,19), (15, 12), (15, 18)], y_all)
counts = np.bincount(y_merged[y_merged != -1])
counts = [counts[i] for i in range(len(counts)) if i in dp.label_index_list]
print(counts)
# Add extra features from ToxicDocs to n-gram data matrix
key_list = ['num_pages']
feats = dp.get_feats(docs, key_list)
X_all = dp.stack_feats(X_all_ngram, feats)
key_list.append('length')
feat_names.extend(key_list)
print('Vectorization time:', vec_time)
print('Data matrix size:', X_all.shape)
y_train, X_train, ind_train, y_test, X_test, ind_test, X_unlab, ind_unlab =\
dp.split_data(y_merged, X_all, split=0.7, seed=0)
me = ModelEvaluator()
# LinearSVC (liblinear SVM implementation, one-v-all)
cross_validate = True
if cross_validate:
model = LinearSVC(penalty='l2', loss='squared_hinge', dual=True, tol=0.0001,\
C=1, multi_class='ovr', fit_intercept=True, intercept_scaling=1,\
class_weight='balanced', verbose=0, random_state=None, max_iter=1000)
param_grid = {'C':np.logspace(-1,1,24).tolist()}
grid_info, grid_best, grid_time = me.param_search(model, param_grid,\
np.concatenate((y_train, y_test)), sp.vstack((X_train, X_test)), num_folds=3)
C = grid_best['C']
else:
C = 1
print(C)
SVM = LinearSVC(penalty='l2', loss='squared_hinge', dual=True, tol=0.0001,\
C=C, multi_class='ovr', fit_intercept=True, intercept_scaling=1,\
class_weight='balanced', verbose=0, random_state=None, max_iter=1000)
SVM_train_acc, SVM_train_time = me.train(SVM,\
np.concatenate((y_train, y_test)), sp.vstack((X_train, X_test)))
# Perform semisupervised learning
ssl = SemiSupervisedLearner(SVM)
ave_conf = np.mean(ssl.confidence_scores(SVM.decision_function(\
sp.vstack((X_train, X_test)))))
print(ave_conf)
y_working = ssl.loop_learning(X_unlab,\
np.concatenate((y_train, y_test)), sp.vstack((X_train, X_test)),\
dp.label_index_list,num_loops=1, conf_thresh=1.6*ave_conf)
y_pred = ssl.model.predict(X_all)
ta.df['inferred_document_type'] = y_pred
print(time.time() - t0)
ta.df['inferred_year'].count()
compute_sim = False
if compute_sim:
t0 = time.time()
ta.compute_sim_docs(X_all, 100)
sim_time = time.time() - t0
print('Similarity computation time', sim_time)
x_min = 1957
x_max = 1992
doc_type = [1,8,9,6]
label_names = ['Internal Memo', 'Internal Study', 'Published Study', 'News']
colors = ["#003399", "#6699ff", "#ff99cc", "#ff0066"]
ta.topic_trend('vinyl', doc_type, colors, label_names,\
x_min, x_max, fname='trend.png')
|
StarcoderdataPython
|
39623
|
import sys
import os
if sys.platform == 'linux':
sys.path.append('/n/groups/patel/samuel/Aging')
elif sys.platform == 'darwin':
sys.path.append('/Users/samuel/Desktop/Aging')
from aging.model.environment_predictor import EnvironmentPredictor
name = sys.argv[1]
n_iter = int(sys.argv[2])
target_dataset = sys.argv[3]
input_dataset = sys.argv[4]
n_splits = int(sys.argv[5])
hyperparameters = dict()
hyperparameters['name'] = name
hyperparameters['n_splits'] = n_splits
hyperparameters['n_iter'] = n_iter
hyperparameters['target_dataset'] = target_dataset
hyperparameters['input_dataset'] = input_dataset
print(hyperparameters)
gp = EnvironmentPredictor(name, -1, n_splits, n_iter, target_dataset, input_dataset, -1)
print("Loading Dataset")
df = gp.load_dataset().dropna()
print("Dataset Loaded, optimizing hyper")
#df_scaled = gp.normalise_dataset(df)
feature_importance_cols = gp.feature_importance(df)
print("Feature importance over, saving file")
gp.save_features(feature_importance_cols)
print("task complete")
|
StarcoderdataPython
|
1603755
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Lists instances in a given project.
Lists instances in a given project in the alphabetical order of the
instance name.
"""
from apiclient import errors
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.sql import util
class List(base.Command):
"""Lists Cloud SQL instances in a given project.
Lists Cloud SQL instances in a given project in the alphabetical
order of the instance name.
"""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use it to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'--max-results',
'-m',
required=False,
default=30,
help='Maximum number of instances per response.')
parser.add_argument(
'--page-token',
'-p',
required=False,
help='A previously-returned page token representing part of the larger'
' set of results to view.')
def Run(self, args):
"""Lists Cloud SQL instances in a given project.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object of the list of instance resources if the command ran
successfully.
Raises:
HttpException: An http error response was received while executing api
request.
ToolException: An error other than an http error occured while executing
the command.
"""
sql = self.context['sql']
project_id = properties.VALUES.core.project.Get(required=True)
max_results = args.max_results
page_token = args.page_token
request = sql.instances().list(project=project_id,
maxResults=max_results,
pageToken=page_token)
try:
result = request.execute()
return result
except errors.HttpError as error:
raise exceptions.HttpException(util.GetError(error))
except errors.Error as error:
raise exceptions.ToolException(error)
def Display(self, unused_args, result):
"""Display prints information about what just happened to stdout.
Args:
unused_args: The same as the args in Run.
result: A dict object that has the list of instance resources if the
command ran successfully.
"""
PrettyPrintInstancesList(result)
def PrettyPrintInstancesList(instances_list):
"""Pretty prints a list of instances.
Args:
instances_list: A dict object representing the the list of instance
resources.
"""
printer = util.PrettyPrinter(0)
if 'nextPageToken' in instances_list:
page_token = instances_list['nextPageToken']
printer.Print('Next page-token : %s (use --page-token=%s to fetch the '
'next page)' % (page_token, page_token))
if instances_list.has_key('items'):
for instance in instances_list['items']:
printer.Print('%s' % instance['project'] + ':' + instance['instance'])
else:
printer.Print('No instances found for project.')
|
StarcoderdataPython
|
41365
|
from django.contrib.auth import views as auth_views
from django.urls import path
from django.urls.base import reverse_lazy
from django.views.decorators.csrf import csrf_exempt
from . import views, webhooks
from .forms.authorization import CosmosPasswordChangeForm, CosmosPasswordResetForm, CosmosSetPasswordForm
app_name = "cosmos_users"
urlpatterns = [
# auth urls
path("login/", views.CosmosLoginView.as_view(), name="login"),
path("logout/", auth_views.LogoutView.as_view(), name="logout"),
path(
"password_change/",
auth_views.PasswordChangeView.as_view(
form_class=CosmosPasswordChangeForm, success_url=reverse_lazy("cosmos_users:password_change_done")
),
name="password_change",
),
path("password_change/done/", auth_views.PasswordChangeDoneView.as_view(), name="password_change_done"),
path(
"password_reset/",
auth_views.PasswordResetView.as_view(
form_class=CosmosPasswordResetForm, success_url=reverse_lazy("cosmos_users:password_reset_done")
),
name="password_reset",
),
path("password_reset/done/", auth_views.PasswordResetDoneView.as_view(), name="password_reset_done"),
path(
"reset/<uidb64>/<token>/",
auth_views.PasswordResetConfirmView.as_view(
form_class=CosmosSetPasswordForm, success_url=reverse_lazy("cosmos_users:password_reset_complete")
),
name="password_reset_confirm",
),
path("reset/done/", auth_views.PasswordResetCompleteView.as_view(), name="password_reset_complete"),
# custom urls
path("profile/", views.profile, name="user_profile"),
path("delete/", views.delete, name="user_delete"),
path(
"register/",
views.RegistrationWizard.as_view(views.FORMS, condition_dict=views.CONDITION_DICT),
name="user_register",
),
path("register/done/", views.registration_done, name="registration_done"),
path("confirm/<uidb64>/<token>/", views.activate, name="confirm_registration"),
path("hook/", csrf_exempt(webhooks.SendGridWebhook.as_view()), name="email_hook"),
]
|
StarcoderdataPython
|
56800
|
<reponame>saravankumarpa/sbc-pay
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the FeeSchedule Service.
Test-Suite to ensure that the FeeSchedule Service is working as expected.
"""
import pytest
import uuid
from datetime import datetime
from pay_api.exceptions import BusinessException
from pay_api.models import FeeSchedule, Invoice, Payment, PaymentAccount, PaymentLineItem, PaymentTransaction
from pay_api.services.payment_transaction import PaymentTransaction as PaymentTransactionService
from pay_api.utils.enums import Status
from pay_api.utils.errors import Error
from tests import skip_in_pod
def factory_payment_account(corp_number: str = 'CP0001234', corp_type_code='CP', payment_system_code='PAYBC'):
"""Factory."""
return PaymentAccount(
corp_number=corp_number,
corp_type_code=corp_type_code,
payment_system_code=payment_system_code,
party_number='11111',
account_number='4101',
site_number='29921',
)
def factory_payment(payment_system_code: str = 'PAYBC', payment_method_code='CC', payment_status_code='DRAFT'):
"""Factory."""
return Payment(
payment_system_code=payment_system_code,
payment_method_code=payment_method_code,
payment_status_code=payment_status_code,
created_by='test',
created_on=datetime.now(),
)
def factory_invoice(payment_id: str, account_id: str):
"""Factory."""
return Invoice(
payment_id=payment_id,
invoice_status_code='DRAFT',
account_id=account_id,
total=0,
created_by='test',
created_on=datetime.now(),
)
def factory_payment_line_item(invoice_id: str, fee_schedule_id: int, filing_fees: int = 10, total: int = 10):
"""Factory."""
return PaymentLineItem(
invoice_id=invoice_id,
fee_schedule_id=fee_schedule_id,
filing_fees=filing_fees,
total=total,
line_item_status_code='CREATED',
)
def factory_payment_transaction(
payment_id: str,
status_code: str = 'DRAFT',
client_system_url: str = 'http://google.com/',
pay_system_url: str = 'http://google.com',
transaction_start_time: datetime = datetime.now(),
transaction_end_time: datetime = datetime.now(),
):
"""Factory."""
return PaymentTransaction(
payment_id=payment_id,
status_code=status_code,
client_system_url=client_system_url,
pay_system_url=pay_system_url,
transaction_start_time=transaction_start_time,
transaction_end_time=transaction_end_time,
)
def test_transaction_saved_from_new(session):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
payment_transaction = PaymentTransactionService()
payment_transaction.status_code = 'DRAFT'
payment_transaction.transaction_end_time = datetime.now()
payment_transaction.transaction_start_time = datetime.now()
payment_transaction.pay_system_url = 'http://google.com'
payment_transaction.client_system_url = 'http://google.com'
payment_transaction.payment_id = payment.id
payment_transaction = payment_transaction.save()
transaction = PaymentTransactionService.find_by_id(payment.id, payment_transaction.id)
assert transaction is not None
assert transaction.id is not None
assert transaction.status_code is not None
assert transaction.payment_id is not None
assert transaction.client_system_url is not None
assert transaction.pay_system_url is not None
assert transaction.transaction_start_time is not None
assert transaction.transaction_end_time is not None
def test_transaction_create_from_new(session):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
assert transaction is not None
assert transaction.id is not None
assert transaction.status_code is not None
assert transaction.payment_id is not None
assert transaction.client_system_url is not None
assert transaction.pay_system_url is not None
assert transaction.transaction_start_time is not None
assert transaction.asdict() is not None
def test_transaction_create_from_invalid_payment(session):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
with pytest.raises(BusinessException) as excinfo:
PaymentTransactionService.create(999, 'http://google.com/', skip_auth_check=True)
assert excinfo.value.status == Error.PAY005.status
assert excinfo.value.message == Error.PAY005.message
assert excinfo.value.code == Error.PAY005.name
@skip_in_pod
def test_transaction_update(session, stan_server):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
transaction = PaymentTransactionService.update_transaction(payment.id, transaction.id, '123451',
skip_auth_check=True)
assert transaction is not None
assert transaction.id is not None
assert transaction.status_code is not None
assert transaction.payment_id is not None
assert transaction.client_system_url is not None
assert transaction.pay_system_url is not None
assert transaction.transaction_start_time is not None
assert transaction.transaction_end_time is not None
assert transaction.status_code == Status.COMPLETED.value
@skip_in_pod
def test_transaction_update_with_no_receipt(session, stan_server):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
transaction = PaymentTransactionService.update_transaction(payment.id, transaction.id, None, skip_auth_check=True)
assert transaction is not None
assert transaction.id is not None
assert transaction.status_code is not None
assert transaction.payment_id is not None
assert transaction.client_system_url is not None
assert transaction.pay_system_url is not None
assert transaction.transaction_start_time is not None
assert transaction.transaction_end_time is not None
assert transaction.status_code == Status.FAILED.value
assert transaction.asdict() is not None
@skip_in_pod
def test_transaction_update_completed(session, stan_server):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
transaction = PaymentTransactionService.update_transaction(payment.id, transaction.id, '123451',
skip_auth_check=True)
with pytest.raises(BusinessException) as excinfo:
PaymentTransactionService.update_transaction(payment.id, transaction.id, '123451', skip_auth_check=True)
assert excinfo.value.status == Error.PAY006.status
assert excinfo.value.message == Error.PAY006.message
assert excinfo.value.code == Error.PAY006.name
def test_transaction_create_new_on_completed_payment(session):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
PaymentTransactionService.update_transaction(payment.id, transaction.id, '123451', skip_auth_check=True)
with pytest.raises(BusinessException) as excinfo:
PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
assert excinfo.value.status == Error.PAY006.status
assert excinfo.value.message == Error.PAY006.message
assert excinfo.value.code == Error.PAY006.name
def test_multiple_transactions_for_single_payment(session):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
transaction = PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
assert transaction is not None
assert transaction.id is not None
assert transaction.status_code is not None
assert transaction.payment_id is not None
assert transaction.client_system_url is not None
assert transaction.pay_system_url is not None
assert transaction.transaction_start_time is not None
assert transaction.status_code == Status.CREATED.value
def test_transaction_invalid_lookup(session):
"""Invalid lookup.."""
with pytest.raises(BusinessException) as excinfo:
PaymentTransactionService.find_by_id(1, uuid.uuid4())
assert excinfo.value.status == Error.PAY008.status
assert excinfo.value.message == Error.PAY008.message
assert excinfo.value.code == Error.PAY008.name
def test_transaction_invalid_update(session):
"""Invalid update.."""
with pytest.raises(BusinessException) as excinfo:
PaymentTransactionService.update_transaction(1, uuid.uuid4(), None, skip_auth_check=True)
assert excinfo.value.status == Error.PAY008.status
assert excinfo.value.message == Error.PAY008.message
assert excinfo.value.code == Error.PAY008.name
def test_transaction_find_active_lookup(session):
"""Invalid lookup.."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id, Status.CREATED.value)
transaction.save()
transaction = PaymentTransactionService.find_active_by_payment_id(payment.id)
assert transaction is not None
assert transaction.id is not None
assert transaction.status_code is not None
assert transaction.payment_id is not None
assert transaction.client_system_url is not None
assert transaction.pay_system_url is not None
assert transaction.transaction_start_time is not None
assert transaction.status_code == Status.CREATED.value
def test_transaction_find_active_none_lookup(session):
"""Invalid lookup.."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id, Status.COMPLETED.value)
transaction.save()
transaction = PaymentTransactionService.find_active_by_payment_id(payment.id)
assert transaction is None
def test_transaction_find_by_payment_id(session):
"""Find all transactions by payment id.."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id, Status.CREATED.value)
transaction.save()
transaction = PaymentTransactionService.find_by_payment_id(payment.id)
assert transaction is not None
assert transaction.get('items') is not None
assert transaction.get('items')[0].get('_links') is not None
def test_no_existing_transaction(session):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = PaymentTransactionService.find_active_by_payment_id(payment.id)
assert transaction is None
@skip_in_pod
def test_transaction_update_on_paybc_connection_error(session, stan_server):
"""Assert that the payment is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = PaymentTransactionService.create(payment.id, 'http://google.com/', skip_auth_check=True)
from unittest.mock import patch
from requests.exceptions import ConnectTimeout, ConnectionError
# Mock here that the invoice update fails here to test the rollback scenario
with patch('pay_api.services.oauth_service.requests.post', side_effect=ConnectionError('mocked error')):
transaction = PaymentTransactionService.update_transaction(payment.id, transaction.id, '123451',
skip_auth_check=True)
assert transaction.pay_system_reason_code == 'SERVICE_UNAVAILABLE'
with patch('pay_api.services.oauth_service.requests.post', side_effect=ConnectTimeout('mocked error')):
transaction = PaymentTransactionService.update_transaction(payment.id, transaction.id, '123451',
skip_auth_check=True)
assert transaction.pay_system_reason_code == 'SERVICE_UNAVAILABLE'
assert transaction is not None
assert transaction.id is not None
assert transaction.status_code is not None
assert transaction.payment_id is not None
assert transaction.client_system_url is not None
assert transaction.pay_system_url is not None
assert transaction.transaction_start_time is not None
assert transaction.transaction_end_time is not None
assert transaction.status_code == Status.FAILED.value
|
StarcoderdataPython
|
1648204
|
<reponame>amackillop/DockerWorkshop<filename>simple_app/src/app.py<gh_stars>0
from flask import Flask, request
import requests
import tensorflow as tf
import numpy as np
import imghdr
from io import BytesIO
app = Flask(__name__)
MODEL = tf.keras.applications.MobileNetV2(
input_shape=None,
alpha=1.0,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
)
@app.route("/predict", methods=["POST"])
def predict():
url = request.json["url"]
return handle_request(url)
def handle_request(url: str):
img_bytes = download_image(url)
img = parse_image(img_bytes)
img_array = preprocess(img)
predictions = predict(img_array)
response = "\n".join(str(pred) for pred in predictions)
return response
def download_image(url: str) -> bytes:
"""Download and verify image from given URL."""
res = requests.get(url)
res.raise_for_status()
content = res.content
# Weak check that the page content is actually an image.
if imghdr.what(BytesIO(content)) is None:
msg = f"Not a valid image at {url}."
raise IOError(msg)
return content
def parse_image(img_bytes: bytes) -> np.array:
img = tf.image.decode_image(img_bytes, channels=3, dtype=tf.uint8)
img = tf.image.resize_with_pad(img, target_width=224, target_height=224)
return img
def preprocess(img: np.array) -> np.array:
img = np.array([img])
return tf.keras.applications.mobilenet.preprocess_input(img, data_format=None)
def predict(img_array: np.array):
prediction = MODEL.predict(img_array)
return tf.keras.applications.mobilenet.decode_predictions(prediction, top=5)[0]
|
StarcoderdataPython
|
1698552
|
<gh_stars>0
from django.conf.urls import url
from .views import SearchView
urlpatterns = [
url('^$', SearchView.as_view(), name='search'),
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.