max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
assgn3/python/project_cad.py | gray0018/CMU-16-385-Spring2020 | 0 | 12799151 | <reponame>gray0018/CMU-16-385-Spring2020<gh_stars>0
import numpy as np
# write your implementation here
| 1.148438 | 1 |
examples/use-cases/vuln_sync_csv_upload/vuln_upload.py | AutomoxCommunity/automox-console-sdk-python | 1 | 12799152 | <reponame>AutomoxCommunity/automox-console-sdk-python
"""Use case for automating the ingestion of CVE reports"""
import glob
import os
import sys
import time
from getpass import getpass
from io import FileIO
import requests
def upload_cve(file: FileIO) -> dict:
""" Uploads vulnerability list to Automox Vulnerability Sync endpoint.
Args:
file (FileIO): A CSV file containing vulnerability data.
Returns:
response_data (dict): API response from Automox Vulnerability Sync
https://developer.automox.com/openapi/vulnsync/operation/UploadCSVBatch/
"""
response_data = {}
task = "patch"
url = f"https://console.automox.com/api/orgs/{organization}/tasks/{task}/batches/upload"
filename = os.path.basename(file.name)
try:
headers = {
"Authorization": f"Bearer {api_secret}",
}
files = [
('file', (filename, file, 'text/csv'))
]
response = requests.request("POST", url, headers=headers, files=files)
response_data = response.json()
if "errors" in response_data and len(response_data['errors']) > 0:
msg = ""
msg = msg.join(response_data['errors'])
raise Exception(msg)
except (requests.RequestException, Exception) as error:
print(f"Error: Unable to complete CSV upload request. ({error})")
return response_data
def get_unprocessed_cves(directory: str) -> list:
"""Returns a list of CSV files to upload and process.
Args:
directory (str): Directory to look in for CSVs.
Returns:
cve_files (list): List of files to be processed and uploaded.
"""
cve_files = []
paths = glob.glob(f"{directory}/*.csv")
for path in paths:
try:
cve_file = open(path, "rb")
cve_files.append(cve_file)
except (OSError, IOError) as error:
print(f"Error: Could not open a CSV. {error}")
print(f"Found {len(cve_files)} file(s) to upload.")
return cve_files
def process_cves(unprocessed_cve_list: list) -> dict:
"""Handles uploading and moving the CSV file to the processed directory.
Args:
unprocessed_cve_list (list): List of files to process.
Returns:
uploaded_batches (dict): Dictionary of batch ids correlated to API batch upload responses.
"""
uploaded_batches = {}
for file in unprocessed_cve_list:
try:
# Make the request to upload the batch file
print(f"Sending {os.path.basename(file.name)} to Automox Vulnerability Sync...")
response = upload_cve(file)
if response['id']:
uploaded_batches[response['id']] = response
upload_output = (
"==============================\n"
f"BATCH ID: {response['id']}\n"
f"{response['source']} has been uploaded.\n"
"=============================="
)
print(upload_output)
path = os.path.realpath(file.name)
directory = os.path.dirname(path)
filename = os.path.basename(file.name)
new_path = f"{directory}/processed/{filename}"
print(f"Moving {filename} to {new_path}\n")
os.rename(path, new_path)
except OSError as error:
print(f"Error processing CVE: {error}")
return uploaded_batches
def update_batches(uploaded_batches: dict) -> dict:
"""Polls the Automox API for the status of batches contained in this dictionary.
When CSV files containing CVE information is uploaded to the Automox Vulnerability Sync API, a task list is built
Args:
uploaded_batches (dict): A dictionary of the latest responses from the Automox API about the status of a batch.
Returns:
uploaded_batches (dict): An updated dictionary of the latest responses from the Automox API about the status of a batch.
"""
for batch_id, batch in uploaded_batches.items():
try:
if batch['status'] != "awaiting_approval":
headers = {
"Authorization": f"Bearer {api_secret}",
}
response = requests.get(f"https://console.automox.com/api/orgs/{organization}/tasks/batches/{batch['id']}", headers=headers)
response_data = response.json()
if "errors" in response_data and len(response_data['errors']) > 0:
msg = ""
msg = msg.join(response_data['errors'])
raise Exception(msg)
uploaded_batches[batch_id] = response_data
except (requests.RequestException, Exception) as error:
print(f"Error: Unable to update batch {batch_id} status. ({error})")
return uploaded_batches
try:
# Directory to watch for new CVE CSVs
WATCH_DIR = os.getenv("WATCH_DIR") or "./cve_queue"
# Prompt for inputs
api_secret = os.getenv('AUTOMOX_API_KEY') or getpass('Enter your API Key: ')
organization = os.getenv('AUTOMOX_ORGANIZATION_ID') or input("Enter your Organization ID: ")
cve_list = get_unprocessed_cves(WATCH_DIR)
if len(cve_list) == 0:
sys.exit()
batches = process_cves(cve_list)
# Assumes the batches have not been built upon receipt.
batches_complete = len(batches) == 0
while not batches_complete:
print("Batches are still building... Checking for updates...")
batches = update_batches(batches)
for batch_id, batch in batches.items():
batches_complete = True
if not batches[batch_id]['status'] == "awaiting_approval":
batches_complete = False
time.sleep(10)
print("Batches are done processing!")
for batch_id, batch in batches.items():
output = (
"==============================\n"
f"BATCH ID: {batch['id']}\n"
f"{batch['source']} has been processed.\n"
f"Total Vulnerabilities: {batch['cve_count']}\n"
f"Devices Impacted: {batch['impacted_device_count']}\n"
f"Tasks Pending Creation: {batch['task_count']}\n"
f"Batch Issues: {batch['issue_count']}\n"
f"Unknown Hosts: {batch['unknown_host_count']}\n"
"=============================="
)
print(output)
except Exception as e:
print(f"Error: {e}\n")
raise
except KeyboardInterrupt:
print ("Ctrl+C Pressed. Shutting down.")
| 2.578125 | 3 |
08_testing/test_mean.py | nachrisman/PHY494 | 0 | 12799153 | from mean import mean
import pytest
def test_ints():
num_list = [1, 2, 3, 4, 5]
obs = mean(num_list)
assert obs == 3
def test_not_numbers():
values = [2, "lolcats"]
with pytest.raises(TypeError):
out = mean(values)
def test_zero():
num_list = [0, 2, 4, 6]
assert mean(num_list) == 3
def test_empty():
assert mean([]) == 0
def test_single_int():
with pytest.raises(TypeError):
mean(1)
| 3.15625 | 3 |
gitlab_migration/cli.py | inhumantsar/gitlab-migration | 0 | 12799154 | # -*- coding: utf-8 -*-
"""Console script for gitlab_migration."""
import os
import sys
import click
from gitlab_migration import gitlab_migration as glm
@click.group()
def cli():
pass
@cli.group()
def projects():
"""Commands for migrating projects."""
return 0
@projects.command()
@click.argument('csv', type=click.File('r'))
@click.argument('target_base_url', type=click.STRING)
@click.argument('target_token', type=click.STRING)
def from_csv(csv, target_base_url, target_token):
'''
read in repos to action from a csv, migrate to target_base_url
csv must contain two columns: source url in the first, and target base url in the second.
target base url MUST be fleshed out. eg: `https://gitlab.example.com/` or `<EMAIL>:`
target_token must be an API-level private token valid on the target server
'''
for line in csv.readlines():
old_url, target_group = [string.strip() for string in line.split(',')]
click.echo(f"working on {old_url}...")
glm.migrate_repo(old_url, target_base_url, target_group, target_token)
@projects.command()
@click.argument('csv', type=click.File('w'))
@click.argument('gitlab_url', type=click.STRING)
@click.argument('token', type=click.STRING)
def to_csv(csv, gitlab_url, token):
'''
get the SSH url for all projects (except archived projects) and write them to a (single-column) csv.
WARNING: this will silently overwrite the specified file if it already exists
'''
click.echo(f"Fetching all project SSH URLs from {gitlab_url}...")
csv.writelines([f"{url},\n" for url in glm.get_project_urls(gitlab_url, token)])
click.echo("Done.")
@projects.command()
@click.argument('path', type=click.STRING)
@click.argument('new_base_url', type=click.STRING)
@click.argument('old_base_url', type=click.STRING)
@click.argument('target_group', type=click.STRING)
@click.option('set_as_origin', '--set-as-origin/--set-as-new', default=True)
def update_local(path, new_base_url, old_base_url, target_group, set_as_origin):
for child_path in os.listdir(path):
if os.path.isdir(child_path) and os.path.isdir(f"{child_path}/.git"):
glm.update_local_repo(child_path, old_base_url, new_base_url, target_group, set_as_origin)
@cli.group()
def variables():
"""Commands for migrating group variables."""
return 0
@variables.command()
@click.option('src_group', '--source-group', default=None, type=click.STRING, help="Leave blank to migrate vars from all groups")
@click.argument('target_group', type=click.STRING)
@click.argument('src_gitlab_url', type=click.STRING)
@click.argument('target_gitlab_url', type=click.STRING)
@click.argument('src_token', type=click.STRING)
@click.argument('target_token', type=click.STRING)
def migrate(src_group, target_group, src_gitlab_url, target_gitlab_url, src_token, target_token):
'''
migrate group variables from 1+ groups on one host to a single group on another host
'''
if src_group:
src_group_id = glm._get_namespace_id(src_gitlab_url, src_group, src_token)
else:
src_group_id = None
target_group_id = glm._get_namespace_id(target_gitlab_url, target_group, target_token)
for var in glm.get_group_vars(src_gitlab_url, src_token, src_group_id):
glm.create_group_var(target_gitlab_url, target_token, var, target_group_id)
if __name__ == "__main__":
sys.exit(cli()) # pragma: no cover | 3.03125 | 3 |
juju/relation.py | radiator-software/python-libjuju | 0 | 12799155 | <filename>juju/relation.py
import logging
from . import model
log = logging.getLogger(__name__)
class Relation(model.ModelEntity):
async def destroy(self):
raise NotImplementedError()
# TODO: destroy a relation
| 2.109375 | 2 |
Tensorflow_Basics/tf16_classification/for_you_to_practice.py | LiJingkang/Python_Tensorflw_Learn | 0 | 12799156 |
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
import tensorflow as tf
def add_layer(inputs, in_size, out_size, activation_function=None, ):
# add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, )
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
return outputs
# define placeholder for inputs to network
# add output layer
# the error between prediction and real data
sess = tf.Session()
# important step
sess.run(tf.initialize_all_variables())
for i in range(1000):
pass
if i % 50 == 0:
pass
| 3.390625 | 3 |
test/integration/test_jsonrpc.py | agnicoin/sentinel | 0 | 12799157 | <gh_stars>0
import pytest
import sys
import os
import re
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import config
from agnid import AgniDaemon
from agni_config import AgniConfig
def test_agnid():
config_text = AgniConfig.slurp_config_file(config.agni_conf)
network = 'mainnet'
is_testnet = False
genesis_hash = u'00000054f31397c25fd692a80391aa238da20c5638171b0e89a8bbf6bf7e4288'
for line in config_text.split("\n"):
if line.startswith('testnet=1'):
network = 'testnet'
is_testnet = True
genesis_hash = u'0000085e2ddad54c7313a28c2e96f7437e2ba9df1d5b2f3cdc3c554bf60dbe76'
creds = AgniConfig.get_rpc_creds(config_text, network)
agnid = AgniDaemon(**creds)
assert agnid.rpc_command is not None
assert hasattr(agnid, 'rpc_connection')
# Agni testnet block 0 hash == 0000085e2ddad54c7313a28c2e96f7437e2ba9df1d5b2f3cdc3c554bf60dbe76
# test commands without arguments
info = agnid.rpc_command('getinfo')
info_keys = [
'blocks',
'connections',
'difficulty',
'errors',
'protocolversion',
'proxy',
'testnet',
'timeoffset',
'version',
]
for key in info_keys:
assert key in info
assert info['testnet'] is is_testnet
# test commands with args
assert agnid.rpc_command('getblockhash', 0) == genesis_hash
| 2.03125 | 2 |
Tests/RelationshipTest.py | mucsci-students/2021sp-420-team1 | 2 | 12799158 | import unittest
from ClassCollection import ClassCollection
# Todo
# Check if the classes exist in the classCollection (helper?)
# Check if relationship already exists (helper?)
# if it does, error
# if not, add parameter pair to the relationshipCollection
class RelationshipTest(unittest.TestCase):
def testAddRelationshipNoFirstClass(self):
collection = ClassCollection()
collection.addClass("foo")
self.assertRaises(KeyError, collection.addRelationship, "bar", "foo", "aggregation")
def testAddRelationshipNoSecondClass(self):
collection = ClassCollection()
collection.addClass("bar")
self.assertRaises(KeyError, collection.addRelationship, "bar", "foo", "aggregation")
def testAddRelationshipNeitherClassExist(self):
collection = ClassCollection()
self.assertRaises(KeyError, collection.addRelationship, "bar", "foo", "aggregation")
# Adding a relationship that already exists
def testAddRelationshipAlreadyExists(self):
collection = ClassCollection()
collection.addClass("foo")
collection.addClass("bar")
collection.addRelationship("bar", "foo", "aggregation")
self.assertRaises(KeyError, collection.addRelationship, "bar", "foo", "aggregation")
def testRelationshipAddedSuccesfully(self):
collection = ClassCollection()
collection.addClass("foo")
collection.addClass("bar")
collection.addRelationship("foo", "bar", "realization")
self.assertIsNotNone(collection.getRelationship("foo", "bar"))
def testDeleteRelationshipNoFirstClass(self):
collection = ClassCollection()
collection.addClass("foo")
self.assertRaises(KeyError, collection.deleteRelationship, "bar", "foo")
def testDeleteRelationshipNoSecondClass(self):
collection = ClassCollection()
collection.addClass("bar")
self.assertRaises(KeyError, collection.deleteRelationship, "bar", "foo")
def testDeleteRelationshipNeitherClassExist(self):
collection = ClassCollection()
self.assertRaises(KeyError, collection.deleteRelationship, "bar", "foo")
def testRelationshipDeletedSuccesfully(self):
collection = ClassCollection()
collection.addClass("foo")
collection.addClass("bar")
collection.addRelationship("foo", "bar", "inheritance")
collection.deleteRelationship("foo", "bar")
self.assertNotIn(("foo", "bar"), collection.relationshipDict)
self.assertRaises(KeyError, collection.deleteRelationship, "foo", "bar")
def testRenameRelationship(self):
collection = ClassCollection()
collection.addClass("foo")
collection.addClass("bar")
collection.addRelationship("foo", "bar", "inheritance")
collection.renameRelationship("foo", "bar", "composition")
self.assertEquals("composition",collection.relationshipDict[("foo", "bar")].typ)
if __name__ == '__main__':
unittest.main()
| 3.296875 | 3 |
greenbyteapi/http/auth/custom_header_auth.py | charlie9578/greenbyte-api-sdk | 0 | 12799159 | <reponame>charlie9578/greenbyte-api-sdk<filename>greenbyteapi/http/auth/custom_header_auth.py
# -*- coding: utf-8 -*-
"""
greenbyteapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
from greenbyteapi.configuration import Configuration
class CustomHeaderAuth:
@staticmethod
def apply(http_request):
""" Add custom authentication to the request.
Args:
http_request (HttpRequest): The HttpRequest object to which
authentication will be added.
"""
http_request.add_header("X-Api-Key", Configuration.x_api_key)
| 2.171875 | 2 |
ale/drivers/hyb2_drivers.py | tthatcher95/ale | 0 | 12799160 | <filename>ale/drivers/hyb2_drivers.py
import spiceypy as spice
import ale
from ale.base.data_naif import NaifSpice
from ale.base.label_isis import IsisLabel
from ale.base.type_sensor import Framer
from ale.base.base import Driver
class Hayabusa2IsisLabelNaifSpiceDriver(Framer, IsisLabel, NaifSpice, Driver):
@property
def sensor_model_version(self):
return 1
@property
def ikid(self):
return self.label['IsisCube']['Kernels']['NaifFrameCode']
@property
def spacecraft_name(self):
return super().spacecraft_name.replace('_', ' ')
@property
def pixel_size(self):
return spice.gdpool('INS{}_PIXEL_PITCH'.format(self.ikid), 0, 1)[0]
@property
def detector_center_sample(self):
return 499.5
@property
def detector_center_line(self):
return 499.5
@property
def ephemeris_start_time(self):
inital_time = spice.utc2et(self.utc_start_time.isoformat())
# To get shutter end (close) time, subtract 2 seconds from the start time
updated_time = inital_time - 2
# To get shutter start (open) time, take off the exposure duration from the end time.
start_time = updated_time - self.exposure_duration
return start_time
@property
def ephemeris_stop_time(self):
return self.ephemeris_start_time + self.exposure_duration
| 2.21875 | 2 |
Project Euler (HackerRank)/016. Power digit sum.py | XitizVerma/Data-Structures-and-Algorithms-Advanced | 1 | 12799161 | lookup = []
lookup.append(1);
for i in range((10**4)+1):
lookup.append(lookup[i]*2)
answer = []
for i in lookup:
anslist = [int(char) for char in str(i)]
answer.append(sum(anslist))
t = int(input())
while t:
t -= 1
n = int(input())
print(answer[n])
| 3.203125 | 3 |
tests/integration/CbfSubarray_test.py | jamesjiang52/mid-cbf-mcs | 0 | 12799162 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the csp-lmc-prototype project
#
#
#
# Distributed under the terms of the BSD-3-Clause license.
# See LICENSE.txt for more info.
"""Contain the tests for the CbfSubarray."""
# Standard imports
import sys
import os
import time
from datetime import datetime
import json
import logging
# Path
file_path = os.path.dirname(os.path.abspath(__file__))
# Tango imports
import tango
from tango import DevState
import pytest
# SKA specific imports
from ska_mid_cbf_mcs.commons.global_enum import freq_band_dict
from ska_tango_base.control_model import LoggingLevel, HealthState
from ska_tango_base.control_model import AdminMode, ObsState
from ska_tango_base.base_device import _DEBUGGER_PORT
@pytest.mark.usefixtures("proxies", "input_test_data")
class TestCbfSubarray:
def test_AddRemoveReceptors_valid(self, proxies):
"""
Test valid AddReceptors and RemoveReceptors commands
"""
timeout_millis = proxies.subarray[1].get_timeout_millis()
log_msg = "timeout_millis = {} ".format(timeout_millis)
#logging.info(log_msg)
#logging.info("start_time = {}".format(time.time()))
logging.info("start datetime = {}".format(datetime.now()))
if proxies.debug_device_is_on:
port = proxies.subarray[1].DebugDevice()
try:
proxies.clean_proxies()
if proxies.controller.State() == DevState.OFF:
proxies.controller.Init()
proxies.wait_timeout_dev([proxies.controller], DevState.STANDBY, 3, 1)
proxies.controller.On()
proxies.wait_timeout_dev([proxies.controller], DevState.ON, 3, 1)
proxies.clean_proxies()
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].State() == DevState.ON
assert proxies.subarray[1].obsState == ObsState.EMPTY
# receptor list should be empty right after initialization
assert len(proxies.subarray[1].receptors) == 0
assert all([proxies.vcc[i + 1].subarrayMembership == 0 for i in range(4)])
input_receptors = [1, 3, 4]
# add some receptors
proxies.subarray[1].AddReceptors(input_receptors)
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert [proxies.subarray[1].receptors[i] for i in range(3)] == input_receptors
assert all([proxies.vcc[proxies.receptor_to_vcc[i]].subarrayMembership == 1 for i in input_receptors])
assert proxies.subarray[1].obsState == ObsState.IDLE
# add more receptors...
proxies.subarray[1].AddReceptors([2])
time.sleep(1)
assert [proxies.subarray[1].receptors[i] for i in range(4)] == [1, 3, 4, 2]
assert proxies.vcc[proxies.receptor_to_vcc[2]].subarrayMembership == 1
# remove some receptors
proxies.subarray[1].RemoveReceptors([2, 1, 4])
time.sleep(1)
assert proxies.subarray[1].receptors == ([3])
assert all([proxies.vcc[proxies.receptor_to_vcc[i]].subarrayMembership == 0 for i in [1, 2, 4]])
assert proxies.vcc[proxies.receptor_to_vcc[3]].subarrayMembership == 1
# remove remaining receptors
proxies.subarray[1].RemoveReceptors([3])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.EMPTY, 1, 1)
assert len(proxies.subarray[1].receptors) == 0
assert proxies.vcc[proxies.receptor_to_vcc[3]].subarrayMembership == 0
assert proxies.subarray[1].obsState == ObsState.EMPTY
proxies.subarray[1].Off()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.OFF, 3, 1)
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
def test_AddRemoveReceptors_invalid_single(self, proxies):
"""
Test invalid AddReceptors commands involving a single subarray:
- when a receptor ID is invalid (e.g. out of range)
- when a receptor to be removed is not assigned to the subarray
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].State() == DevState.ON
assert proxies.subarray[1].obsState == ObsState.EMPTY
# receptor list should be empty right after initialization
assert len(proxies.subarray[1].receptors) == 0
assert all([proxies.vcc[i + 1].subarrayMembership == 0 for i in range(4)])
# add some receptors to subarray 1
proxies.subarray[1].AddReceptors([1, 3])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert proxies.subarray[1].receptors[0] == 1
assert proxies.subarray[1].receptors[1] == 3
assert all([proxies.vcc[proxies.receptor_to_vcc[i]].subarrayMembership == 1 for i in [1, 3]])
assert proxies.subarray[1].obsState == ObsState.IDLE
# TODO: fix this
# try adding an invalid receptor ID
# with pytest.raises(tango.DevFailed) as df:
# proxies.subarray[1].AddReceptors([5])
# time.sleep(1)
# assert "Invalid receptor ID" in str(df.value.args[0].desc)
# try removing a receptor not assigned to subarray 1
# doing this doesn't actually throw an error
proxies.subarray[1].RemoveReceptors([2])
assert proxies.subarray[1].receptors[0] == 1
assert proxies.subarray[1].receptors[1] == 3
proxies.subarray[1].RemoveAllReceptors()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.EMPTY, 1, 1)
proxies.subarray[1].Off()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.OFF, 3, 1)
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
@pytest.mark.skip(reason="Since there's only a single subarray, this test is currently broken.")
def test_AddRemoveReceptors_invalid_multiple(self, proxies):
"""
Test invalid AddReceptors commands involving multiple subarrays:
- when a receptor to be added is already in use by a different subarray
"""
# for proxy in vcc_proxies:
# proxy.Init()
# proxies.subarray[1].set_timeout_millis(60000)
# subarray_2_proxy.set_timeout_millis(60000)
# proxies.subarray[1].Init()
# subarray_2_proxy.Init()
# time.sleep(3)
# cbf_controller_proxy.set_timeout_millis(60000)
# cbf_controller_proxy.Init()
# time.sleep(60) # takes pretty long for CBF controller to initialize
# receptor_to_vcc = dict([*map(int, pair.split(":"))] for pair in
# cbf_controller_proxy.receptorToVcc)
# cbf_controller_proxy.On()
# time.sleep(3)
# # receptor list should be empty right after initialization
# assert proxies.subarray[1].receptors == ()
# assert subarray_2_proxy.receptors == ()
# assert all([proxy.subarrayMembership == 0 for proxy in vcc_proxies])
# assert proxies.subarray[1].State() == DevState.OFF
# assert subarray_2_proxy.State() == DevState.OFF
# # add some receptors to subarray 1
# proxies.subarray[1].AddReceptors([1, 3])
# time.sleep(1)
# assert proxies.subarray[1].receptors == (1, 3)
# assert all([vcc_proxies[receptor_to_vcc[i] - 1].subarrayMembership == 1 for i in [1, 3]])
# assert proxies.subarray[1].State() == DevState.ON
# # try adding some receptors (including an invalid one) to subarray 2
# with pytest.raises(tango.DevFailed) as df:
# subarray_2_proxy.AddReceptors([1, 2, 4])
# time.sleep(1)
# assert "already in use" in str(df.value.args[0].desc)
# assert subarray_2_proxy.receptors == (2, 4)
# assert all([vcc_proxies[receptor_to_vcc[i] - 1].subarrayMembership == 1 for i in [1, 3]])
# assert all([vcc_proxies[receptor_to_vcc[i] - 1].subarrayMembership == 2 for i in [2, 4]])
# assert subarray_2_proxy.State() == DevState.ON
def test_RemoveAllReceptors(self, proxies):
"""
Test RemoveAllReceptors command
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].State() == DevState.ON
assert proxies.subarray[1].obsState == ObsState.EMPTY
# receptor list should be empty right after initialization
assert len(proxies.subarray[1].receptors) == 0
assert all([proxies.vcc[i + 1].subarrayMembership == 0 for i in range(4)])
# add some receptors
proxies.subarray[1].AddReceptors([1, 3, 4])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[1].receptors[i] == j for i, j in zip(range(3), [1, 3, 4])])
assert all([proxies.vcc[proxies.receptor_to_vcc[i]].subarrayMembership == 1 for i in [1, 3, 4]])
assert proxies.subarray[1].obsState == ObsState.IDLE
# remove all receptors
proxies.subarray[1].RemoveAllReceptors()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.EMPTY, 1, 1)
assert len(proxies.subarray[1].receptors) == 0
assert all([proxies.vcc[proxies.receptor_to_vcc[i]].subarrayMembership == 0 for i in [1, 3, 4]])
assert proxies.subarray[1].obsState == ObsState.EMPTY
proxies.subarray[1].Off()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.OFF, 3, 1)
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
#TODO: fix; currently tests break if multiple scan configurations are tested
def test_ConfigureScan_basic(self, proxies):
"""
Test a successful scan configuration
"""
proxies.subarray[1].loggingLevel = LoggingLevel.DEBUG
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
# check initial value of attributes of CBF subarray
vcc_index = proxies.receptor_to_vcc[4]
logging.info("vcc_index = {}".format( vcc_index ))
assert len(proxies.subarray[1].receptors) == 0
assert proxies.subarray[1].configID == ''
# TODO in CbfSubarray, at end of scan, clear all private data
#assert proxies.subarray[1].frequencyBand == 0
assert proxies.subarray[1].obsState == ObsState.EMPTY
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[1].receptors[i] == j for i, j in zip(range(3), [1, 3, 4])])
# configure scan
config_file_name = "/../data/ConfigureScan_basic.json"
f = open(file_path + config_file_name)
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 15, 1)
# check configured attributes of CBF subarray
assert proxies.subarray[1].configID == "band:5a, fsp1, 744 channels average factor 8"
assert proxies.subarray[1].frequencyBand == 4 # means 5a
assert proxies.subarray[1].obsState == ObsState.READY
proxies.wait_timeout_obs([proxies.vcc[i + 1] for i in range(4)], ObsState.READY, 1, 1)
# check frequency band of VCCs, including states of
# frequency band capabilities
logging.info( ("proxies.vcc[vcc_index].frequencyBand = {}".
format( proxies.vcc[vcc_index].frequencyBand)) )
vcc_band_proxies = proxies.vccBand[vcc_index - 1]
assert proxies.vcc[proxies.receptor_to_vcc[4]].frequencyBand == 4
assert proxies.vcc[proxies.receptor_to_vcc[1]].frequencyBand == 4
for proxy in proxies.vccBand[proxies.receptor_to_vcc[4] - 1]:
logging.info("VCC proxy.State() = {}".format(proxy.State()))
assert [proxy.State() for proxy in proxies.vccBand[proxies.receptor_to_vcc[4] - 1]] == [
DevState.DISABLE, DevState.DISABLE, DevState.DISABLE, DevState.ON]
assert [proxy.State() for proxy in proxies.vccBand[proxies.receptor_to_vcc[1] - 1]] == [
DevState.DISABLE, DevState.DISABLE, DevState.DISABLE, DevState.ON]
# check the rest of the configured attributes of VCCs
# first for VCC belonging to receptor 10...
assert proxies.vcc[proxies.receptor_to_vcc[4]].subarrayMembership == 1
assert proxies.vcc[proxies.receptor_to_vcc[4]].band5Tuning[0] == 5.85
assert proxies.vcc[proxies.receptor_to_vcc[4]].band5Tuning[1] == 7.25
assert proxies.vcc[proxies.receptor_to_vcc[4]].frequencyBandOffsetStream1 == 0
assert proxies.vcc[proxies.receptor_to_vcc[4]].frequencyBandOffsetStream2 == 0
assert proxies.vcc[proxies.receptor_to_vcc[4]].rfiFlaggingMask == "{}"
# then for VCC belonging to receptor 1...
assert proxies.vcc[proxies.receptor_to_vcc[1]].subarrayMembership == 1
assert proxies.vcc[proxies.receptor_to_vcc[1]].band5Tuning[0] == 5.85
assert proxies.vcc[proxies.receptor_to_vcc[1]].band5Tuning[1] == 7.25
# check configured attributes of search windows
# first for search window 1...
# TODO - SearchWidow device test is disabled since the same
# functionality is implemented by the VccSearchWindow device;
# to be decide which one to keep.
# print("proxies.sw[1].State() = {}".format(proxies.sw[1].State()))
# print("proxies.sw[2].State() = {}".format(proxies.sw[2].State()))
# assert proxies.sw[1].State() == DevState.ON
# assert proxies.sw[1].searchWindowTuning == 6000000000
# assert proxies.sw[1].tdcEnable == True
# assert proxies.sw[1].tdcNumBits == 8
# assert proxies.sw[1].tdcPeriodBeforeEpoch == 5
# assert proxies.sw[1].tdcPeriodAfterEpoch == 25
# assert "".join(proxies.sw[1].tdcDestinationAddress.split()) in [
# "[{\"receptorID\":4,\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"]},{\"receptorID\":1,\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"]}]",
# "[{\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"],\"receptorID\":4},{\"receptorID\":1,\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"]}]",
# "[{\"receptorID\":4,\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"]},{\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"],\"receptorID\":1}]",
# "[{\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"],\"receptorID\":4},{\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"],\"receptorID\":1}]",
# ]
# # then for search window 2...
# assert proxies.sw[2].State() == DevState.DISABLE
# assert proxies.sw[2].searchWindowTuning == 7000000000
# assert proxies.sw[2].tdcEnable == False
time.sleep(1)
# check configured attributes of VCC search windows
# first for search window 1 of VCC belonging to receptor 10...
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][0].State() == DevState.ON
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][0].searchWindowTuning == 6000000000
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][0].tdcEnable == True
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][0].tdcNumBits == 8
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][0].tdcPeriodBeforeEpoch == 5
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][0].tdcPeriodAfterEpoch == 25
# TODO - re-enable and debug!
# assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][0].tdcDestinationAddress == (
# "foo", "bar", "8080"
# )
# then for search window 1 of VCC belonging to receptor 1...
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][0].State() == DevState.ON
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][0].searchWindowTuning == 6000000000
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][0].tdcEnable == True
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][0].tdcNumBits == 8
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][0].tdcPeriodBeforeEpoch == 5
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][0].tdcPeriodAfterEpoch == 25
# TODO - re-enable and debug!
# assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][0].tdcDestinationAddress == (
# "fizz", "buzz", "80"
# )
# then for search window 2 of VCC belonging to receptor 10...
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][1].State() == DevState.DISABLE
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][1].searchWindowTuning == 7000000000
assert proxies.vccTdc[proxies.receptor_to_vcc[4] - 1][1].tdcEnable == False
# and lastly for search window 2 of VCC belonging to receptor 1...
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][1].State() == DevState.DISABLE
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][1].searchWindowTuning == 7000000000
assert proxies.vccTdc[proxies.receptor_to_vcc[1] - 1][1].tdcEnable == False
# check configured attributes of FSPs, including states of function mode capabilities
assert proxies.fsp[1].functionMode == 1
assert 1 in proxies.fsp[1].subarrayMembership
assert [proxy.State() for proxy in proxies.fsp1FunctionMode] == [
DevState.ON, DevState.DISABLE, DevState.DISABLE, DevState.DISABLE
]
# TODO -
# assert [proxy.State() for proxy in fsp_2_function_mode_proxy] == [
# DevState.ON, DevState.DISABLE, DevState.DISABLE, DevState.DISABLE
# ]
# check configured attributes of FSP subarrays
# first for FSP 3 ... (this is a PSS fsp device)
assert proxies.fspSubarray[3].receptors[0] == 3
assert proxies.fspSubarray[3].receptors[1] == 1
assert proxies.fspSubarray[3].searchWindowID == 2
assert proxies.fspSubarray[3].searchBeamID[0] == 300
assert proxies.fspSubarray[3].searchBeamID[1] == 400
# TODO: currently searchBeams is stored by the device
# as a json string ( via attribute 'searchBeams');
# this has to be updated in FspPssSubarray
# to read/write individual members
searchBeam = proxies.fspSubarray[3].searchBeams
searchBeam0 = json.loads(searchBeam[0])
searchBeam1 = json.loads(searchBeam[1])
assert searchBeam0["search_beam_id"] == 300
assert searchBeam0["receptor_ids"][0] == 3
assert searchBeam0["enable_output"] == True
assert searchBeam0["averaging_interval"] == 4
# TODO - this does not pass - to debug & fix
#assert searchBeam0["searchBeamDestinationAddress"] == "10.05.1.1"
assert searchBeam1["search_beam_id"] == 400
assert searchBeam1["receptor_ids"][0] == 1
assert searchBeam1["enable_output"] == True
assert searchBeam1["averaging_interval"] == 2
# TODO - this does not pass - to debug & fix
#assert searchBeam1["searchBeamDestinationAddress"] == "10.05.2.1"
# check configured attributes of FSP subarrays
# first for FSP 1... (this is a CORR fsp device)
assert proxies.fspSubarray[1].obsState == ObsState.READY
assert proxies.fspSubarray[1].receptors == 4
assert proxies.fspSubarray[1].frequencyBand == 4
assert proxies.fspSubarray[1].band5Tuning[0] == 5.85
assert proxies.fspSubarray[1].band5Tuning[1] == 7.25
assert proxies.fspSubarray[1].frequencyBandOffsetStream1 == 0
assert proxies.fspSubarray[1].frequencyBandOffsetStream2 == 0
assert proxies.fspSubarray[1].frequencySliceID == 1
assert proxies.fspSubarray[1].corrBandwidth == 1
assert proxies.fspSubarray[1].zoomWindowTuning == 4700000
assert proxies.fspSubarray[1].integrationTime == 1
assert proxies.fspSubarray[1].fspChannelOffset == 14880
assert proxies.fspSubarray[1].channelAveragingMap[0][0] == 0
assert proxies.fspSubarray[1].channelAveragingMap[0][1] == 8
assert proxies.fspSubarray[1].channelAveragingMap[1][0] == 744
assert proxies.fspSubarray[1].channelAveragingMap[1][1] == 8
assert proxies.fspSubarray[1].channelAveragingMap[2][0] == 1488
assert proxies.fspSubarray[1].channelAveragingMap[2][1] == 8
assert proxies.fspSubarray[1].channelAveragingMap[3][0] == 2232
assert proxies.fspSubarray[1].channelAveragingMap[3][1] == 8
assert proxies.fspSubarray[1].channelAveragingMap[4][0] == 2976
assert proxies.fspSubarray[1].outputLinkMap[0][0] == 0
assert proxies.fspSubarray[1].outputLinkMap[0][1] == 4
assert proxies.fspSubarray[1].outputLinkMap[1][0] == 744
assert proxies.fspSubarray[1].outputLinkMap[1][1] == 8
assert proxies.fspSubarray[1].outputLinkMap[2][0] == 1488
assert proxies.fspSubarray[1].outputLinkMap[2][1] == 12
assert proxies.fspSubarray[1].outputLinkMap[3][0] == 2232
assert proxies.fspSubarray[1].outputLinkMap[3][1] == 16
assert str(proxies.fspSubarray[1].visDestinationAddress).replace('"',"'") == \
str({"outputHost": [[0, "192.168.0.1"], [8184, "192.168.0.2"]],
"outputMac": [[0, "06-00-00-00-00-01"]],
"outputPort": [[0, 9000, 1], [8184, 9000, 1]]}).replace('"',"'")
# Clean Up
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
@pytest.mark.skip(reason="pst not currently supported")
def test_ConfigureScan_onlyPst_basic(self, proxies):
"""
Test a successful PST-BF scan configuration
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
proxy.loggingLevel = "DEBUG"
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
# check initial value of attributes of CBF subarray
assert len(proxies.subarray[1].receptors) == 0
assert proxies.subarray[1].configID == ''
assert proxies.subarray[1].frequencyBand == 0
assert proxies.subarray[1].obsState == ObsState.EMPTY
# add receptors
proxies.subarray[1].AddReceptors([4, 1, 3, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[1].receptors[i] == j for i, j in zip(range(4), [4, 1, 3, 2])])
# configure scan
f = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 15, 1)
# check configured attributes of CBF subarray
assert proxies.subarray[1].configID == "band:5a, fsp1, 744 channels average factor 8"
assert proxies.subarray[1].frequencyBand == 4
assert proxies.subarray[1].obsState == ObsState.READY
proxies.wait_timeout_obs([proxies.vcc[i + 1] for i in range(4)], ObsState.READY, 1, 1)
# check frequency band of VCCs, including states of frequency band capabilities
assert proxies.vcc[proxies.receptor_to_vcc[2]].frequencyBand == 4
assert [proxy.State() for proxy in proxies.vccBand[proxies.receptor_to_vcc[2] - 1]] == [
DevState.DISABLE, DevState.DISABLE, DevState.DISABLE, DevState.ON]
# check the rest of the configured attributes of VCCs
# first for VCC belonging to receptor 2...
assert proxies.vcc[proxies.receptor_to_vcc[2]].subarrayMembership == 1
assert proxies.vcc[proxies.receptor_to_vcc[2]].frequencyBandOffsetStream1 == 0
assert proxies.vcc[proxies.receptor_to_vcc[2]].frequencyBandOffsetStream2 == 0
assert proxies.vcc[proxies.receptor_to_vcc[2]].rfiFlaggingMask == "{}"
# check configured attributes of FSPs, including states of function mode capabilities
assert proxies.fsp[2].State() == DevState.ON
assert proxies.fsp[2].functionMode == 3
assert 1 in proxies.fsp[2].subarrayMembership
assert [proxy.State() for proxy in proxies.fsp2FunctionMode] == [
DevState.DISABLE, DevState.DISABLE, DevState.ON, DevState.DISABLE
]
# check configured attributes of FSP subarrays
# FSP 2
assert proxies.fspSubarray[6].obsState == ObsState.READY
assert all([proxies.fspSubarray[6].receptors[i] == j for i, j in zip(range(1), [2])])
assert all([proxies.fspSubarray[6].timingBeamID[i] == j for i, j in zip(range(1), [10])])
# Clean Up
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
@pytest.mark.skip(reason="pst not currently supported")
def test_ConfigureScan_onlyPst_basic_FSP_scan_parameters(self, proxies):
"""
Test a successful transmission of PST-BF parameters to FSP
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
# check initial value of attributes of CBF subarray
assert len(proxies.subarray[1].receptors) == 0
assert proxies.subarray[1].configID == ''
assert proxies.subarray[1].frequencyBand == 0
assert proxies.subarray[1].obsState == ObsState.EMPTY
# add receptors
proxies.subarray[1].AddReceptors([4, 1, 3, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[1].receptors[i] == j for i, j in zip(range(4), [4, 1, 3, 2])])
# configure scan
f = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 15, 1)
# update jones matrices from tm emulator
f = open(file_path + "/../data/jonesmatrix_fsp.json")
jones_matrix = json.loads(f.read().replace("\n", ""))
epoch = str(int(time.time()))
for matrix in jones_matrix["jonesMatrix"]:
matrix["epoch"] = epoch
if matrix["destinationType"] == "fsp":
epoch = str(int(epoch) + 10)
# update Jones Matrix
proxies.tm.jonesMatrix = json.dumps(jones_matrix)
time.sleep(1)
for matrix in jones_matrix["jonesMatrix"]:
if matrix["destinationType"] == "fsp":
for receptor in matrix["matrixDetails"]:
rec_id = int(receptor["receptor"])
fs_id = receptor["receptorMatrix"][0]["fsid"]
for index, value in enumerate(receptor["receptorMatrix"][0]["matrix"]):
try:
assert proxies.fsp[fs_id].jonesMatrix[rec_id - 1][index] == value
except AssertionError as ae:
raise ae
except Exception as e:
raise e
time.sleep(10)
# update delay models from tm emulator
f = open(file_path + "/../data/delaymodel_fsp.json")
delay_model = json.loads(f.read().replace("\n", ""))
epoch = str(int(time.time()))
for model in delay_model["delayModel"]:
model["epoch"] = epoch
if model["destinationType"] == "fsp":
epoch = str(int(epoch) + 10)
# update delay model
proxies.tm.delayModel = json.dumps(delay_model)
time.sleep(1)
for model in delay_model["delayModel"]:
if model["destinationType"] == "fsp":
for receptor in model["delayDetails"]:
rec_id = int(receptor["receptor"])
fs_id = receptor["receptorDelayDetails"][0]["fsid"]
for index, value in enumerate(receptor["receptorDelayDetails"][0]["delayCoeff"]):
try:
assert proxies.fsp[fs_id].delayModel[rec_id - 1][index] == value
except AssertionError as ae:
raise ae
except Exception as e:
raise e
time.sleep(10)
# update timing beam weights from tm emulator
f = open(file_path + "/../data/timingbeamweights.json")
timing_beam_weights = json.loads(f.read().replace("\n", ""))
epoch = str(int(time.time()))
for weights in timing_beam_weights["beamWeights"]:
weights["epoch"] = epoch
epoch = str(int(epoch) + 10)
# update delay model
proxies.tm.beamWeights = json.dumps(timing_beam_weights)
time.sleep(1)
for weights in timing_beam_weights["beamWeights"]:
for receptor in weights["beamWeightsDetails"]:
rec_id = int(receptor["receptor"])
fs_id = receptor["receptorWeightsDetails"][0]["fsid"]
for index, value in enumerate(receptor["receptorWeightsDetails"][0]["weights"]):
try:
assert proxies.fsp[fs_id].timingBeamWeights[rec_id - 1][index] == value
except AssertionError as ae:
raise ae
except Exception as e:
raise e
time.sleep(10)
# Clean Up
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
def test_EndScan(self, proxies, input_test_data):
"""
Test the EndScan command
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].obsState == ObsState.EMPTY
# Input test data:
input_receptors = input_test_data[0]
config_file_name = input_test_data[1]
subarr_index = 1;
logging.info( "input_receptors = {}".format(input_receptors) )
logging.info( "config_file_name = {}".format(config_file_name) )
num_receptors = len(input_receptors)
vcc_ids = [None for _ in range(num_receptors)]
for receptor_id, ii in zip(input_receptors, range(num_receptors)):
vcc_ids[ii] = proxies.receptor_to_vcc[receptor_id]
proxies.subarray[subarr_index].AddReceptors(input_receptors)
proxies.wait_timeout_obs([proxies.subarray[subarr_index]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[subarr_index].receptors[i] == j for i, j in zip(range(num_receptors), input_receptors)])
assert proxies.subarray[subarr_index].obsState == ObsState.IDLE
# Check fsp obsState BEFORE scan configuration:
assert proxies.fspCorrSubarray[subarr_index-1].obsState == ObsState.IDLE
assert proxies.fspPssSubarray[subarr_index-1].obsState == ObsState.IDLE
assert proxies.fspPstSubarray[subarr_index-1].obsState == ObsState.IDLE
logging.info( "First vcc obsState BEFORE ConfigureScan = {}".
format(proxies.vcc[vcc_ids[0]].obsState) )
f = open(file_path + config_file_name)
json_string = f.read().replace("\n", "")
input_config_dict = json.loads(json_string)
proxies.subarray[subarr_index].ConfigureScan(json_string)
f.close()
proxies.wait_timeout_obs([proxies.subarray[subarr_index]], ObsState.READY, 15, 1)
logging.info( "First vcc obsState AFTER ConfigureScan = {}".
format(proxies.vcc[vcc_ids[0]].obsState) )
# check some configured attributes of CBF subarray
frequency_band = input_config_dict["common"]["frequency_band"]
input_band_index = freq_band_dict()[frequency_band]
assert proxies.subarray[subarr_index].configID == input_config_dict["common"]["config_id"]
assert proxies.subarray[subarr_index].frequencyBand == input_band_index
assert proxies.subarray[subarr_index].obsState == ObsState.READY
# Send the Scan command
f2 = open(file_path + "/../data/Scan1_basic.json")
json_string = f2.read().replace("\n", "")
input_scan_dict = json.loads(json_string)
proxies.subarray[subarr_index].Scan(json_string)
f2.close()
proxies.wait_timeout_obs([proxies.subarray[subarr_index]], ObsState.SCANNING, 1, 1)
# Note: scan_id is 1-based and of 'string' type
# scan_index is an index into an array, therefore 0-based
scan_index = int(input_scan_dict["scan_id"]) - 1
logging.info( "proxies.fspCorrSubarray[subarr_index-1].obsState = {}".
format(proxies.fspCorrSubarray[subarr_index-1].obsState) )
logging.info( "proxies.fspPssSubarray[subarr_index-1].obsState = {}".
format(proxies.fspPssSubarray[subarr_index-1].obsState) )
logging.info( "proxies.fspPstSubarray[subarr_index-1].obsState = {}".
format(proxies.fspPstSubarray[subarr_index-1].obsState) )
# Check obsStates BEFORE the EndScan() command
assert proxies.subarray[subarr_index].obsState == ObsState.SCANNING
assert proxies.vcc[vcc_ids[0]].obsState == ObsState.SCANNING
assert proxies.vcc[vcc_ids[num_receptors-1]].obsState == ObsState.SCANNING
for fsp in input_config_dict["cbf"]["fsp"]:
if fsp["function_mode"] == "CORR":
assert proxies.fspCorrSubarray[subarr_index-1].obsState == ObsState.SCANNING
elif fsp["function_mode"] == "PSS-BF":
assert proxies.fspPssSubarray[subarr_index-1].obsState == ObsState.SCANNING
# TODO: this check does not pass, to fix
#elif fsp["function_mode"] == "PST-BF":
# assert proxies.fspPstSubarray[subarr_index-1].obsState == ObsState.SCANNING
proxies.subarray[subarr_index].EndScan()
proxies.wait_timeout_obs([proxies.subarray[subarr_index]], ObsState.READY, 1, 1)
# Check obsStates AFTER the EndScan() command
assert proxies.subarray[subarr_index].obsState == ObsState.READY
assert proxies.vcc[vcc_ids[0]].obsState == ObsState.READY
assert proxies.vcc[vcc_ids[num_receptors -1]].obsState == ObsState.READY
assert proxies.fspCorrSubarray[subarr_index-1].obsState == ObsState.READY
for fsp in input_config_dict["cbf"]["fsp"]:
if fsp["function_mode"] == "CORR":
assert proxies.fspCorrSubarray[subarr_index-1].obsState == ObsState.READY
elif fsp["function_mode"] == "PSS-BF":
assert proxies.fspPssSubarray[subarr_index-1].obsState == ObsState.READY
# TODO: this check does not pass, to fix
#elif fsp["function_mode"] == "PST-BF":
# assert proxies.fspPstSubarray[subarr_index-1].obsState == ObsState.READY
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
#TODO refactor to verify delay model values against input json
@pytest.mark.skip(reason="test needs to be refactored")
def test_ConfigureScan_delayModel(self, proxies):
"""
Test the reception of delay models
"""
# Read delay model data from file
f = open(file_path + "/../data/delaymodel.json")
delay_model = json.loads(f.read().replace("\n", ""))
f.close()
aa = delay_model["delayModel"][0]["delayDetails"][0]["receptorDelayDetails"]
num_fsp_IDs = len(aa)
for jj in range(num_fsp_IDs):
logging.info( "delayCoeff = {}".format( aa[jj]["delayCoeff"]) )
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].obsState == ObsState.EMPTY
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[1].receptors[i] == j for i, j in zip(range(3), [1, 3, 4])])
# configure scan
f = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 30, 1)
assert proxies.subarray[1].obsState == ObsState.READY
# create a delay model
# Insert the epoch
delay_model["delayModel"][0]["epoch"] = str(int(time.time()) + 20)
delay_model["delayModel"][1]["epoch"] = "0"
delay_model["delayModel"][2]["epoch"] = str(int(time.time()) + 10)
# update delay model
proxies.tm.delayModel = json.dumps(delay_model)
time.sleep(1)
for jj in range(4):
logging.info((" proxies.vcc[{}].receptorID = {}".
format(jj+1, proxies.vcc[jj+1].receptorID)))
logging.info( ("Vcc, receptor 1, ObsState = {}".
format(proxies.vcc[proxies.receptor_to_vcc[1]].ObsState)) )
#proxies.vcc[0].receptorID
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][0] == 1.1
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][1] == 1.2
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][2] == 1.3
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][3] == 1.4
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][4] == 1.5
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][5] == 1.6
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][0] == 1.7
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][1] == 1.8
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][2] == 1.9
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][3] == 2.0
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][4] == 2.1
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][5] == 2.2
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][0] == 2.3
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][1] == 2.4
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][2] == 2.5
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][3] == 2.6
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][4] == 2.7
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][5] == 2.8
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][0] == 2.9
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][1] == 3.0
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][2] == 3.1
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][3] == 3.2
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][4] == 3.3
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][5] == 3.4
# transition to obsState=SCANNING
f2 = open(file_path + "/../data/Scan1_basic.json")
proxies.subarray[1].Scan(f2.read().replace("\n", ""))
f2.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.SCANNING, 1, 1)
assert proxies.subarray[1].obsState == ObsState.SCANNING
time.sleep(10)
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][0] == 2.1
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][1] == 2.2
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][2] == 2.3
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][3] == 2.4
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][4] == 2.5
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][5] == 2.6
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][0] == 2.7
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][1] == 2.8
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][2] == 2.9
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][3] == 3.0
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][4] == 3.1
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][5] == 3.2
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][0] == 3.3
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][1] == 3.4
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][2] == 3.5
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][3] == 3.6
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][4] == 3.7
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][5] == 3.8
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][0] == 3.9
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][1] == 4.0
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][2] == 4.1
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][3] == 4.2
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][4] == 4.3
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][5] == 4.4
time.sleep(10)
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][0] == 0.1
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][1] == 0.2
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][2] == 0.3
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][3] == 0.4
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][4] == 0.5
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[0][5] == 0.6
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][0] == 0.7
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][1] == 0.8
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][2] == 0.9
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][3] == 1.0
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][4] == 1.1
assert proxies.vcc[proxies.receptor_to_vcc[1]].delayModel[1][5] == 1.2
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][0] == 1.3
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][1] == 1.4
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][2] == 1.5
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][3] == 1.6
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][4] == 1.7
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[0][5] == 1.8
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][0] == 1.9
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][1] == 2.0
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][2] == 2.1
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][3] == 2.2
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][4] == 2.3
assert proxies.vcc[proxies.receptor_to_vcc[4]].delayModel[1][5] == 2.4
proxies.subarray[1].EndScan()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 1, 1)
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
def test_ConfigureScan_jonesMatrix(self, proxies):
"""
Test the reception of Jones matrices
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].obsState == ObsState.EMPTY
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[1].receptors[i] == j for i, j in zip(range(3), [1, 3, 4])])
# configure scan
f = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 30, 1)
assert proxies.subarray[1].obsState == ObsState.READY
#create a Jones matrix
f = open(file_path + "/../data/jonesmatrix.json")
jones_matrix = json.loads(f.read().replace("\n", ""))
f.close()
jones_matrix["jonesMatrix"][0]["epoch"] = str(int(time.time()) + 20)
jones_matrix["jonesMatrix"][1]["epoch"] = "0"
jones_matrix["jonesMatrix"][2]["epoch"] = str(int(time.time()) + 10)
# update Jones Matrix
proxies.tm.jonesMatrix = json.dumps(jones_matrix)
time.sleep(5)
for receptor in jones_matrix["jonesMatrix"][1]["matrixDetails"]:
for frequency_slice in receptor["receptorMatrix"]:
for index, value in enumerate(frequency_slice["matrix"]):
vcc_id = proxies.receptor_to_vcc[receptor["receptor"]]
fs_id = frequency_slice["fsid"]
try:
assert proxies.vcc[vcc_id].jonesMatrix[fs_id-1][index] == value
except AssertionError as ae:
logging.error("AssertionError; incorrect Jones matrix entry: epoch {}, VCC {}, i = {}, jonesMatrix[{}] = {}".format(
jones_matrix["jonesMatrix"][1]["epoch"], vcc_id, index, fs_id-1, proxies.vcc[vcc_id].jonesMatrix[fs_id-1])
)
raise ae
except Exception as e:
raise e
# transition to obsState == SCANNING
f2 = open(file_path + "/../data/Scan1_basic.json")
proxies.subarray[1].Scan(f2.read().replace("\n", ""))
f2.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.SCANNING, 1, 1)
assert proxies.subarray[1].obsState == ObsState.SCANNING
time.sleep(10)
for receptor in jones_matrix["jonesMatrix"][2]["matrixDetails"]:
for frequency_slice in receptor["receptorMatrix"]:
for index, value in enumerate(frequency_slice["matrix"]):
vcc_id = proxies.receptor_to_vcc[receptor["receptor"]]
fs_id = frequency_slice["fsid"]
try:
assert proxies.vcc[vcc_id].jonesMatrix[fs_id-1][index] == value
except AssertionError as ae:
logging.error("AssertionError; incorrect Jones matrix entry: epoch {}, VCC {}, i = {}, jonesMatrix[{}] = {}".format(
jones_matrix["jonesMatrix"][1]["epoch"], vcc_id, index, fs_id-1, proxies.vcc[vcc_id].jonesMatrix[fs_id-1])
)
raise ae
except Exception as e:
raise e
time.sleep(10)
for receptor in jones_matrix["jonesMatrix"][0]["matrixDetails"]:
for frequency_slice in receptor["receptorMatrix"]:
for index, value in enumerate(frequency_slice["matrix"]):
vcc_id = proxies.receptor_to_vcc[receptor["receptor"]]
fs_id = frequency_slice["fsid"]
try:
assert proxies.vcc[vcc_id].jonesMatrix[fs_id-1][index] == value
except AssertionError as ae:
logging.error("AssertionError; incorrect Jones matrix entry: epoch {}, VCC {}, i = {}, jonesMatrix[{}] = {}".format(
jones_matrix["jonesMatrix"][1]["epoch"], vcc_id, index, fs_id-1, proxies.vcc[vcc_id].jonesMatrix[fs_id-1])
)
raise ae
except Exception as e:
raise e
proxies.subarray[1].EndScan()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 1, 1)
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
def test_Scan(self, proxies):
"""
Test the Scan command
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].obsState == ObsState.EMPTY
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[1].receptors[i] == j for i, j in zip(range(3), [1, 3, 4])])
# configure scan
f1 = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f1.read().replace("\n", ""))
f1.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 30, 1)
# check initial states
assert proxies.subarray[1].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.READY
assert proxies.fspSubarray[1].obsState == ObsState.READY
assert proxies.fspSubarray[3].obsState == ObsState.READY
# send the Scan command
f2 = open(file_path + "/../data/Scan1_basic.json")
proxies.subarray[1].Scan(f2.read().replace("\n", ""))
f2.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.SCANNING, 1, 1)
# check scanID on VCC and FSP
assert proxies.fspSubarray[1].scanID == 1
assert proxies.vcc[proxies.receptor_to_vcc[4]].scanID ==1
# check states
assert proxies.subarray[1].obsState == ObsState.SCANNING
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.SCANNING
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.SCANNING
assert proxies.fspSubarray[1].obsState == ObsState.SCANNING
assert proxies.fspSubarray[3].obsState == ObsState.SCANNING
proxies.subarray[1].EndScan()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 1, 1)
assert proxies.subarray[1].obsState == ObsState.READY
# Clean Up
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
def test_Abort_Reset(self, proxies):
"""
Test abort reset
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].obsState == ObsState.EMPTY
############################# abort from READY ###########################
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
# configure scan
f = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 30, 1)
assert proxies.subarray[1].obsState == ObsState.READY
assert proxies.fspSubarray[1].obsState == ObsState.READY
assert proxies.fspSubarray[3].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.READY
# abort
proxies.subarray[1].Abort()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.ABORTED, 1, 1)
assert proxies.subarray[1].obsState == ObsState.ABORTED
# ObsReset
proxies.subarray[1].ObsReset()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert proxies.subarray[1].obsState == ObsState.IDLE
assert all([proxies.subarray[1].receptors[i] == j for i, j in zip(range(3), [1, 3, 4])])
assert proxies.fspSubarray[1].obsState == ObsState.IDLE
assert proxies.fspSubarray[3].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.IDLE
############################# abort from SCANNING ###########################
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
# configure scan
f = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 30, 1)
# scan
f2 = open(file_path + "/../data/Scan2_basic.json")
proxies.subarray[1].Scan(f2.read().replace("\n", ""))
f2.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.SCANNING, 1, 1)
assert proxies.subarray[1].obsState == ObsState.SCANNING
assert proxies.subarray[1].scanID == 2
assert proxies.fspSubarray[1].obsState == ObsState.SCANNING
assert proxies.fspSubarray[3].obsState == ObsState.SCANNING
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.SCANNING
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.SCANNING
# abort
proxies.subarray[1].Abort()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.ABORTED, 1, 1)
assert proxies.subarray[1].obsState == ObsState.ABORTED
assert proxies.fspSubarray[1].obsState == ObsState.READY
assert proxies.fspSubarray[3].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.READY
# ObsReset
proxies.subarray[1].ObsReset()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert proxies.subarray[1].obsState == ObsState.IDLE
assert proxies.subarray[1].scanID == 0
assert proxies.fspSubarray[1].obsState == ObsState.IDLE
assert proxies.fspSubarray[3].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.IDLE
# Clean Up
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
def test_Abort_Restart(self, proxies):
"""
Test abort restart
"""
try:
# turn on Subarray
if proxies.subarray[1].State() != DevState.ON:
proxies.subarray[1].On()
proxies.wait_timeout_dev([proxies.subarray[1]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
assert proxies.subarray[1].obsState == ObsState.EMPTY
############################# abort from IDLE ###########################
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert proxies.subarray[1].obsState == ObsState.IDLE
# abort
proxies.subarray[1].Abort()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.ABORTED, 1, 1)
assert proxies.subarray[1].obsState == ObsState.ABORTED
# Restart: receptors should be empty
proxies.subarray[1].Restart()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.EMPTY, 1, 1)
assert proxies.subarray[1].obsState == ObsState.EMPTY
assert len(proxies.subarray[1].receptors) == 0
assert proxies.fspSubarray[1].obsState == ObsState.IDLE
assert proxies.fspSubarray[3].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.IDLE
############################# abort from READY ###########################
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
# configure scan
f = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 30, 1)
assert proxies.subarray[1].obsState == ObsState.READY
assert proxies.fspSubarray[1].obsState == ObsState.READY
assert proxies.fspSubarray[3].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.READY
# abort
proxies.subarray[1].Abort()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.ABORTED, 1, 1)
assert proxies.subarray[1].obsState == ObsState.ABORTED
# ObsReset
proxies.subarray[1].Restart()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.EMPTY, 1, 1)
assert proxies.subarray[1].obsState == ObsState.EMPTY
assert len(proxies.subarray[1].receptors) == 0
assert proxies.fspSubarray[1].obsState == ObsState.IDLE
assert proxies.fspSubarray[3].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.IDLE
############################# abort from SCANNING ###########################
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4, 2])
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
# configure scan
f = open(file_path + "/../data/ConfigureScan_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.READY, 30, 1)
# scan
f2 = open(file_path + "/../data/Scan2_basic.json")
proxies.subarray[1].Scan(f2.read().replace("\n", ""))
f2.close()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.SCANNING, 1, 1)
assert proxies.subarray[1].obsState == ObsState.SCANNING
assert proxies.subarray[1].scanID == 2
assert proxies.fspSubarray[1].obsState == ObsState.SCANNING
assert proxies.fspSubarray[3].obsState == ObsState.SCANNING
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.SCANNING
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.SCANNING
# abort
proxies.subarray[1].Abort()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.ABORTED, 1, 1)
assert proxies.subarray[1].obsState == ObsState.ABORTED
assert proxies.fspSubarray[1].obsState == ObsState.READY
assert proxies.fspSubarray[3].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.READY
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.READY
# ObsReset
proxies.subarray[1].Restart()
proxies.wait_timeout_obs([proxies.subarray[1]], ObsState.IDLE, 1, 1)
assert len(proxies.subarray[1].receptors) == 0
assert proxies.fspSubarray[1].obsState == ObsState.IDLE
assert proxies.fspSubarray[3].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[1]].obsState == ObsState.IDLE
assert proxies.vcc[proxies.receptor_to_vcc[4]].obsState == ObsState.IDLE
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
def test_ConfigureScan_minimal(self, proxies):
try:
sub_id = 1
#TODO currently only support for 1 receptor per fsp
test_receptor_ids = [4, 1]
#test_receptor_ids = [1]
vcc_index = proxies.receptor_to_vcc[test_receptor_ids[0]]
logging.info("vcc_index = {}".format(vcc_index))
vcc_band_proxies = proxies.vccBand[vcc_index - 1]
# turn on Subarray
if proxies.subarray[sub_id].State() != DevState.ON:
proxies.subarray[sub_id].On()
proxies.wait_timeout_dev([proxies.subarray[sub_id]], DevState.ON, 3, 1)
for proxy in [proxies.vcc[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
for proxy in [proxies.fsp[i + 1] for i in range(4)]:
if proxy.State() == DevState.OFF:
proxy.On()
proxies.wait_timeout_dev([proxy], DevState.ON, 1, 1)
# check initial value of attributes of CBF subarray
assert len(proxies.subarray[sub_id].receptors) == 0
assert proxies.subarray[sub_id].configID == ''
# TODO in CbfSubarray, at end of scan, clear all private data
#assert proxies.subarray[sub_id].frequencyBand == 0
assert proxies.subarray[sub_id].obsState == ObsState.EMPTY
# add receptors
proxies.subarray[sub_id].AddReceptors(test_receptor_ids)
proxies.wait_timeout_obs([proxies.subarray[sub_id]], ObsState.IDLE, 1, 1)
assert all([proxies.subarray[sub_id].receptors[i] == j
for i, j in zip(range(len(test_receptor_ids)), test_receptor_ids)])
# configure scan
f = open(file_path + "/../data/Configure_TM-CSP_v2.json")
configuration = f.read().replace("\n", "")
f.close()
proxies.subarray[sub_id].ConfigureScan(configuration)
proxies.wait_timeout_obs([proxies.subarray[sub_id]], ObsState.READY, 15, 1)
configuration = json.loads(configuration)
band_index = freq_band_dict()[configuration["common"]["frequency_band"]]
# check configured attributes of CBF subarray
assert sub_id == int(configuration["common"]["subarray_id"])
assert proxies.subarray[sub_id].configID == configuration["common"]["config_id"]
assert proxies.subarray[sub_id].frequencyBand == band_index
assert proxies.subarray[sub_id].obsState == ObsState.READY
proxies.wait_timeout_obs([proxies.vcc[i + 1] for i in range(4)], ObsState.READY, 1, 1)
# check frequency band of VCCs, including states of
# frequency band capabilities
logging.info( ("proxies.vcc[vcc_index].frequencyBand = {}".
format( proxies.vcc[vcc_index].frequencyBand)) )
assert proxies.vcc[vcc_index].configID == configuration["common"]["config_id"]
assert proxies.vcc[vcc_index].frequencyBand == band_index
assert proxies.vcc[vcc_index].subarrayMembership == sub_id
#TODO fix these tests; issue with VccBand devices either not reconfiguring in between
# configurations or causing a fault within the Vcc device
# for proxy in vcc_band_proxies:
# logging.info("VCC proxy.State() = {}".format(proxy.State()))
# for i in range(4):
# if (i == 0 and band_index == 0) or (i == (band_index - 1)):
# assert vcc_band_proxies[i].State() == DevState.ON
# else:
# assert vcc_band_proxies[i].State() == DevState.DISABLE
# check configured attributes of FSPs, including states of function mode capabilities
fsp_function_mode_proxies = [proxies.fsp1FunctionMode, proxies.fsp2FunctionMode,
proxies.fsp3FunctionMode, proxies.fsp4FunctionMode]
for fsp in configuration["cbf"]["fsp"]:
fsp_id = fsp["fsp_id"]
logging.info("{}".format(fsp_id))
#TODO add function mode to enum or edit attribute to accept string in FSP
if fsp["function_mode"] == "CORR": function_mode = 1
elif fsp["function_mode"] == "PSS-BF": function_mode = 2
elif fsp["function_mode"] == "PST-BF": function_mode = 3
elif fsp["function_mode"] == "VLBI": function_mode = 4
assert proxies.fsp[fsp_id].functionMode == function_mode
assert sub_id in proxies.fsp[fsp_id].subarrayMembership
assert [proxy.State() for proxy in fsp_function_mode_proxies[fsp_id-1]] == [
DevState.ON, DevState.DISABLE, DevState.DISABLE, DevState.DISABLE
]
# check configured attributes of FSP subarray
#TODO align IDs of fspSubarrays to fsp_id in conftest; currently works for fsps 1 and 2
assert proxies.fspSubarray[fsp_id].obsState == ObsState.READY
assert proxies.fspSubarray[fsp_id].receptors == test_receptor_ids[0]
assert proxies.fspSubarray[fsp_id].frequencyBand == band_index
assert proxies.fspSubarray[fsp_id].frequencySliceID == fsp["frequency_slice_id"]
assert proxies.fspSubarray[fsp_id].integrationTime == fsp["integration_factor"]
assert proxies.fspSubarray[fsp_id].corrBandwidth == fsp["zoom_factor"]
if fsp["zoom_factor"] > 0:
assert proxies.fspSubarray[fsp_id].zoomWindowTuning == fsp["zoom_window_tuning"]
assert proxies.fspSubarray[fsp_id].fspChannelOffset == fsp["channel_offset"]
for i in range(len(fsp["channel_averaging_map"])):
for j in range(len(fsp["channel_averaging_map"][i])):
assert proxies.fspSubarray[fsp_id].channelAveragingMap[i][j] == fsp["channel_averaging_map"][i][j]
for i in range(len(fsp["output_link_map"])):
for j in range(len(fsp["output_link_map"][i])):
assert proxies.fspSubarray[fsp_id].outputLinkMap[i][j] == fsp["output_link_map"][i][j]
proxies.clean_proxies()
except AssertionError as ae:
proxies.clean_proxies()
raise ae
except Exception as e:
proxies.clean_proxies()
raise e
'''
def test_ConfigureScan_onlyPss_basic(
self,
cbf_master_proxy,
proxies.subarray[1],
sw_1_proxy,
sw_2_proxy,
vcc_proxies,
vcc_band_proxies,
vcc_tdc_proxies,
fsp_1_proxy,
fsp_2_proxy,
fsp_1_function_mode_proxy,
fsp_2_function_mode_proxy,
fsp_3_proxies.subarray[1],
tm_telstate_proxy
):
"""
Test a minimal successful configuration
"""
for proxy in vcc_proxies:
proxy.Init()
fsp_3_proxies.subarray[1].Init()
fsp_1_proxy.Init()
fsp_2_proxy.Init()
proxies.subarray[1].set_timeout_millis(60000) # since the command takes a while
proxies.subarray[1].Init()
time.sleep(3)
cbf_master_proxy.set_timeout_millis(60000)
cbf_master_proxy.Init()
time.sleep(60) # takes pretty long for CBF Master to initialize
tm_telstate_proxy.Init()
time.sleep(1)
receptor_to_vcc = dict([*map(int, pair.split(":"))] for pair in
cbf_master_proxy.receptorToVcc)
cbf_master_proxy.On()
time.sleep(3)
# check initial value of attributes of CBF subarray
# assert proxies.subarray[1].receptors == ()
# assert proxies.subarray[1].configID == 0
assert proxies.subarray[1].frequencyBand == 0
assert proxies.subarray[1].obsState.value == ObsState.IDLE.value
# assert tm_telstate_proxy.visDestinationAddress == "{}"
assert tm_telstate_proxy.receivedOutputLinks == False
# add receptors
proxies.subarray[1].RemoveAllReceptors()
proxies.subarray[1].AddReceptors([1, 3, 4])
time.sleep(1)
assert proxies.subarray[1].receptors[0] == 1
assert proxies.subarray[1].receptors[1] == 3
assert proxies.subarray[1].receptors[2] == 4
# configure scan
f = open(file_path + "/test_json/test_ConfigureScan_onlyPss_basic.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
time.sleep(15)
# check configured attributes of CBF subarray # def test_ConfigureScan_basic(
# self,
# cbf_master_proxy,
# proxies.subarray[1],
# sw_1_proxy,
# sw_2_proxy,
# vcc_proxies,
# vcc_band_proxies,
# vcc_tdc_proxies,
# fsp_1_proxy,
# fsp_2_proxy,
# fsp_1_function_mode_proxy,
# fsp_2_function_mode_proxy,
# fsp_1_proxies.subarray[1],
# fsp_2_proxies.subarray[1],
# fsp_3_proxies.subarray[1],
# tm_telstate_proxy
# ):
# """
# Test a minimal successful configuration
# """
# for proxy in vcc_proxies:
# proxy.Init()
# fsp_1_proxies.subarray[1].Init()
# fsp_2_proxies.subarray[1].Init()
# fsp_3_proxies.subarray[1].Init()
# fsp_1_proxy.Init()
# fsp_2_proxy.Init()
# proxies.subarray[1].set_timeout_millis(60000) # since the command takes a while
# proxies.subarray[1].Init()
# time.sleep(3)
# cbf_master_proxy.set_timeout_millis(60000)
# cbf_master_proxy.Init()
# time.sleep(60) # takes pretty long for CBF Master to initialize
# tm_telstate_proxy.Init()
# time.sleep(1)
# receptor_to_vcc = dict([*map(int, pair.split(":"))] for pair in
# cbf_master_proxy.receptorToVcc)
# cbf_master_proxy.On()
# time.sleep(60)
# # turn on Subarray
# assert proxies.subarray[1].state()==DevState.OFF
# proxies.subarray[1].On()
# time.sleep(10)
# # check initial value of attributes of CBF subarray
# assert len(proxies.subarray[1].receptors) == 0
# assert proxies.subarray[1].configID == 0
# assert proxies.subarray[1].frequencyBand == 0
# assert proxies.subarray[1].State() == DevState.ON
# assert proxies.subarray[1].ObsState == ObsState.EMPTY
# # assert tm_telstate_proxy.visDestinationAddress == "{}"
# assert tm_telstate_proxy.receivedOutputLinks == False
# # add receptors
# proxies.subarray[1].RemoveAllReceptors()
# proxies.subarray[1].AddReceptors([1, 3, 4])
# time.sleep(1)
# assert proxies.subarray[1].receptors[0] == 1
# assert proxies.subarray[1].receptors[1] == 3
# assert proxies.subarray[1].receptors[2] == 4
# # configure scan
# f = open(file_path + "/test_json/test_ConfigureScan_basic.json")
# proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
# f.close()
# time.sleep(15)
# # check configured attributes of CBF subarray
# assert proxies.subarray[1].configID == "band:5a, fsp1, 744 channels average factor 8"
# assert proxies.subarray[1].frequencyBand == 4 # means 5a?
# assert proxies.subarray[1].obsState.value == ObsState.READY.value
# # check frequency band of VCCs, including states of frequency band capabilities
# assert vcc_proxies[receptor_to_vcc[4] - 1].frequencyBand == 4
# assert vcc_proxies[receptor_to_vcc[1] - 1].frequencyBand == 4
# assert [proxy.State() for proxy in vcc_band_proxies[receptor_to_vcc[4] - 1]] == [
# DevState.DISABLE, DevState.DISABLE, DevState.DISABLE, DevState.ON]
# assert [proxy.State() for proxy in vcc_band_proxies[receptor_to_vcc[1] - 1]] == [
# DevState.DISABLE, DevState.DISABLE, DevState.DISABLE, DevState.ON]
# # check the rest of the configured attributes of VCCs
# # first for VCC belonging to receptor 10...
# assert vcc_proxies[receptor_to_vcc[4] - 1].subarrayMembership == 1
# assert vcc_proxies[receptor_to_vcc[4] - 1].band5Tuning[0] == 5.85
# assert vcc_proxies[receptor_to_vcc[4] - 1].band5Tuning[1] == 7.25
# assert vcc_proxies[receptor_to_vcc[4] - 1].frequencyBandOffsetStream1 == 0
# assert vcc_proxies[receptor_to_vcc[4] - 1].frequencyBandOffsetStream2 == 0
# assert vcc_proxies[receptor_to_vcc[4] - 1].rfiFlaggingMask == "{}"
# # then for VCC belonging to receptor 1...
# assert vcc_proxies[receptor_to_vcc[1] - 1].subarrayMembership == 1
# assert vcc_proxies[receptor_to_vcc[1] - 1].band5Tuning[0] == 5.85
# assert vcc_proxies[receptor_to_vcc[1] - 1].band5Tuning[1] == 7.25
# assert vcc_proxies[receptor_to_vcc[1] - 1].frequencyBandOffsetStream1 == 0
# assert vcc_proxies[receptor_to_vcc[1] - 1].frequencyBandOffsetStream2 == 0
# assert vcc_proxies[receptor_to_vcc[1] - 1].rfiFlaggingMask == "{}"
# # check configured attributes of search windows
# # first for search window 1...
# assert sw_1_proxy.State() == DevState.ON
# assert sw_1_proxy.searchWindowTuning == 6000000000
# assert sw_1_proxy.tdcEnable == True
# assert sw_1_proxy.tdcNumBits == 8
# assert sw_1_proxy.tdcPeriodBeforeEpoch == 5
# assert sw_1_proxy.tdcPeriodAfterEpoch == 25
# assert "".join(sw_1_proxy.tdcDestinationAddress.split()) in [
# "[{\"receptorID\":4,\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"]},{\"receptorID\":1,\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"]}]",
# "[{\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"],\"receptorID\":4},{\"receptorID\":1,\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"]}]",
# "[{\"receptorID\":4,\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"]},{\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"],\"receptorID\":1}]",
# "[{\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"],\"receptorID\":4},{\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"],\"receptorID\":1}]",
# ]
# # then for search window 2...
# assert sw_2_proxy.State() == DevState.DISABLE
# assert sw_2_proxy.searchWindowTuning == 7000000000
# assert sw_2_proxy.tdcEnable == False
# # check configured attributes of VCC search windows
# # first for search window 1 of VCC belonging to receptor 10...
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].State() == DevState.ON
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].searchWindowTuning == 6000000000
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcEnable == True
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcNumBits == 8
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcPeriodBeforeEpoch == 5
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcPeriodAfterEpoch == 25
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcDestinationAddress == (
# "foo", "bar", "8080"
# )
# # then for search window 1 of VCC belonging to receptor 1...
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].State() == DevState.ON
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].searchWindowTuning == 6000000000
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcEnable == True
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcNumBits == 8
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcPeriodBeforeEpoch == 5
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcPeriodAfterEpoch == 25
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcDestinationAddress == (
# "fizz", "buzz", "80"
# )
# # then for search window 2 of VCC belonging to receptor 10...
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][1].State() == DevState.DISABLE
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][1].searchWindowTuning == 7000000000
# assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][1].tdcEnable == False
# # and lastly for search window 2 of VCC belonging to receptor 1...
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][1].State() == DevState.DISABLE
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][1].searchWindowTuning == 7000000000
# assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][1].tdcEnable == False
# # check configured attributes of FSPs, including states of function mode capabilities
# assert fsp_1_proxy.functionMode == 1
# assert 1 in fsp_1_proxy.subarrayMembership
# # assert 1 in fsp_2_proxy.subarrayMembership
# assert [proxy.State() for proxy in fsp_1_function_mode_proxy] == [
# DevState.ON, DevState.DISABLE, DevState.DISABLE, DevState.DISABLE
# ]
# # assert [proxy.State() for proxy in fsp_2_function_mode_proxy] == [
# # DevState.ON, DevState.DISABLE, DevState.DISABLE, DevState.DISABLE
# # ]
# # check configured attributes of FSP subarrays
# # first for FSP 1...
# assert fsp_1_proxies.subarray[1].obsState == ObsState.EMPTY
# assert fsp_1_proxies.subarray[1].receptors == 4
# assert fsp_1_proxies.subarray[1].frequencyBand == 4
# assert fsp_1_proxies.subarray[1].band5Tuning[0] == 5.85
# assert fsp_1_proxies.subarray[1].band5Tuning[1] == 7.25
# assert fsp_1_proxies.subarray[1].frequencyBandOffsetStream1 == 0
# assert fsp_1_proxies.subarray[1].frequencyBandOffsetStream2 == 0
# assert fsp_1_proxies.subarray[1].frequencySliceID == 1
# assert fsp_1_proxies.subarray[1].corrBandwidth == 1
# assert fsp_1_proxies.subarray[1].zoomWindowTuning == 4700000
# assert fsp_1_proxies.subarray[1].integrationTime == 140
# assert fsp_1_proxies.subarray[1].fspChannelOffset == 14880
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[0][0] == 0
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[0][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[1][0] == 744
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[1][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[2][0] == 1488
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[2][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[3][0] == 2232
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[3][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[4][0] == 2976
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[4][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[5][0] == 3720
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[5][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[6][0] == 4464
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[6][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[7][0] == 5208
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[7][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[8][0] == 5952
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[8][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[9][0] == 6696
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[9][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[10][0] == 7440
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[10][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[11][0] == 8184
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[11][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[12][0] == 8928
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[12][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[13][0] == 9672
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[13][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[14][0] == 10416
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[14][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[15][0] == 11160
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[15][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[16][0] == 11904
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[16][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[17][0] == 12648
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[17][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[18][0] == 13392
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[18][1] == 8
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[19][0] == 14136
# # assert fsp_1_proxies.subarray[1].channelAveragingMap[19][1] == 8
# assert fsp_1_proxies.subarray[1].outputLinkMap[0][0] == 0
# assert fsp_1_proxies.subarray[1].outputLinkMap[0][1] == 4
# assert fsp_1_proxies.subarray[1].outputLinkMap[1][0] == 744
# assert fsp_1_proxies.subarray[1].outputLinkMap[1][1] == 8
# assert fsp_1_proxies.subarray[1].outputLinkMap[2][0] == 1488
# assert fsp_1_proxies.subarray[1].outputLinkMap[2][1] == 12
# assert fsp_1_proxies.subarray[1].outputLinkMap[3][0] == 2232
# assert fsp_1_subarray_1_proroxy.receptors[2] == 4
# # assert fsp_2_proxies.subarray[1].frequencyBand == 4
# # assert fsp_2_proxies.subarray[1].band5Tuning[0] == 5.85
# # assert fsp_2_proxies.subarray[1].band5Tuning[1] == 7.25
# # assert fsp_2_proxies.subarray[1].frequencyBandOffsetStream1 == 0
# # assert fsp_2_proxies.subarray[1].frequencyBandOffsetStream2 == 0
# # assert fsp_2_proxies.subarray[1].frequencySliceID == 20
# # assert fsp_2_proxies.subarray[1].corrBandwidth == 0
# # assert fsp_2_proxies.subarray[1].integrationTime == 1400
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[0][0] == 1
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[0][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[1][0] == 745
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[1][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[2][0] == 1489
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[2][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[3][0] == 2233
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[3][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[4][0] == 2977
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[4][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[5][0] == 3721
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[5][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[6][0] == 4465
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[6][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[7][0] == 5209
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[7][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[8][0] == 5953
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[8][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[9][0] == 6697
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[9][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[10][0] == 7441
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[10][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[11][0] == 8185
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[11][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[12][0] == 8929
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[12][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[13][0] == 9673
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[13][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[14][0] == 10417
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[14][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[15][0] == 11161
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[15][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[16][0] == 11905
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[16][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[17][0] == 12649
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[17][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[18][0] == 13393
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[18][1] == 0
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[19][0] == 14137
# # assert fsp_2_proxies.subarray[1].channelAveragingMap[19][1] == 0
# # then for FSP 3...
# assert fsp_3_proxies.subarray[1].receptors[0] == 3
# assert fsp_3_proxies.subarray[1].receptors[1] == 1
# assert fsp_3_proxies.subarray[1].searchWindowID == 2
# assert fsp_3_proxies.subarray[1].searchBeamID[0] == 300
# assert fsp_3_proxies.subarray[1].searchBeamID[1] == 400
# searchBeam = fsp_3_proxies.subarray[1].searchBeams
# searchBeam300 = json.loads(searchBeam[0])
# searchBeam400 = json.loads(searchBeam[1])
# assert searchBeam300["searchBeamID"] == 300
# assert searchBeam300["receptors"][0] == 3
# assert searchBeam300["outputEnable"] == True
# assert searchBeam300["averagingInterval"] == 4
# assert searchBeam300["searchBeamDestinationAddress"] == "10.05.1.1"
# assert searchBeam400["searchBeamID"] == 400
# assert searchBeam400["receptors"][0] == 1
# assert searchBeam400["outputEnable"] == True
# assert searchBeam400["averagingInterval"] == 2
# assert searchBeam400["searchBeamDestinationAddress"] == "10.05.2.1"
# proxies.subarray[1].GoToIdle()
# time.sleep(3)
# assert proxies.subarray[1].obsState == ObsState.IDLE
# proxies.subarray[1].RemoveAllReceptors()
# time.sleep(3)
# assert proxies.subarray[1].state() == tango.DevState.OFFequency band capabilities
assert vcc_proxies[receptor_to_vcc[4] - 1].frequencyBand == 4
assert vcc_proxies[receptor_to_vcc[1] - 1].frequencyBand == 4
assert [proxy.State() for proxy in vcc_band_proxies[receptor_to_vcc[4] - 1]] == [
DevState.DISABLE, DevState.DISABLE, DevState.DISABLE, DevState.ON]
assert [proxy.State() for proxy in vcc_band_proxies[receptor_to_vcc[1] - 1]] == [
DevState.DISABLE, DevState.DISABLE, DevState.DISABLE, DevState.ON]
# check the rest of the configured attributes of VCCs
# first for VCC belonging to receptor 10...
assert vcc_proxies[receptor_to_vcc[4] - 1].subarrayMembership == 1
assert vcc_proxies[receptor_to_vcc[4] - 1].band5Tuning[0] == 5.85
assert vcc_proxies[receptor_to_vcc[4] - 1].band5Tuning[1] == 7.25
assert vcc_proxies[receptor_to_vcc[4] - 1].frequencyBandOffsetStream1 == 0
assert vcc_proxies[receptor_to_vcc[4] - 1].frequencyBandOffsetStream2 == 0
assert vcc_proxies[receptor_to_vcc[4] - 1].rfiFlaggingMask == "{}"
# then for VCC belonging to receptor 1...
assert vcc_proxies[receptor_to_vcc[1] - 1].subarrayMembership == 1
assert vcc_proxies[receptor_to_vcc[1] - 1].band5Tuning[0] == 5.85
assert vcc_proxies[receptor_to_vcc[1] - 1].band5Tuning[1] == 7.25
assert vcc_proxies[receptor_to_vcc[1] - 1].frequencyBandOffsetStream1 == 0
assert vcc_proxies[receptor_to_vcc[1] - 1].frequencyBandOffsetStream2 == 0
assert vcc_proxies[receptor_to_vcc[1] - 1].rfiFlaggingMask == "{}"
# check configured attributes of search windows
# first for search window 1...
assert sw_1_proxy.State() == DevState.ON
assert sw_1_proxy.searchWindowTuning == 6000000000
assert sw_1_proxy.tdcEnable == True
assert sw_1_proxy.tdcNumBits == 8
assert sw_1_proxy.tdcPeriodBeforeEpoch == 5
assert sw_1_proxy.tdcPeriodAfterEpoch == 25
assert "".join(sw_1_proxy.tdcDestinationAddress.split()) in [
"[{\"receptorID\":4,\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"]},{\"receptorID\":1,\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"]}]",
"[{\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"],\"receptorID\":4},{\"receptorID\":1,\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"]}]",
"[{\"receptorID\":4,\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"]},{\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"],\"receptorID\":1}]",
"[{\"tdcDestinationAddress\":[\"foo\",\"bar\",\"8080\"],\"receptorID\":4},{\"tdcDestinationAddress\":[\"fizz\",\"buzz\",\"80\"],\"receptorID\":1}]",
]
# then for search window 2...
assert sw_2_proxy.State() == DevState.DISABLE
assert sw_2_proxy.searchWindowTuning == 7000000000
assert sw_2_proxy.tdcEnable == False
# check configured attributes of VCC search windows
# first for search window 1 of VCC belonging to receptor 10...
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].State() == DevState.ON
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].searchWindowTuning == 6000000000
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcEnable == True
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcNumBits == 8
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcPeriodBeforeEpoch == 5
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcPeriodAfterEpoch == 25
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][0].tdcDestinationAddress == (
"foo", "bar", "8080"
)
# then for search window 1 of VCC belonging to receptor 1...
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].State() == DevState.ON
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].searchWindowTuning == 6000000000
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcEnable == True
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcNumBits == 8
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcPeriodBeforeEpoch == 5
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcPeriodAfterEpoch == 25
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][0].tdcDestinationAddress == (
"fizz", "buzz", "80"
)
# then for search window 2 of VCC belonging to receptor 10...
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][1].State() == DevState.DISABLE
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][1].searchWindowTuning == 7000000000
assert vcc_tdc_proxies[receptor_to_vcc[4] - 1][1].tdcEnable == False
# and lastly for search window 2 of VCC belonging to receptor 1...
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][1].State() == DevState.DISABLE
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][1].searchWindowTuning == 7000000000
assert vcc_tdc_proxies[receptor_to_vcc[1] - 1][1].tdcEnable == False
assert fsp_3_proxies.subarray[1].receptors[0] == 3
assert fsp_3_proxies.subarray[1].receptors[1] == 1
assert fsp_3_proxies.subarray[1].searchWindowID == 2
assert fsp_3_proxies.subarray[1].searchBeamID[0] == 300
assert fsp_3_proxies.subarray[1].searchBeamID[1] == 400
searchBeam = fsp_3_proxies.subarray[1].searchBeams
searchBeam300 = json.loads(searchBeam[0])
searchBeam400 = json.loads(searchBeam[1])
assert searchBeam300["searchBeamID"] == 300
assert searchBeam300["receptors"][0] == 3
assert searchBeam300["outputEnable"] == True
assert searchBeam300["averagingInterval"] == 4
assert searchBeam300["searchBeamDestinationAddress"] == "10.05.1.1"
assert searchBeam400["searchBeamID"] == 400
assert searchBeam400["receptors"][0] == 1
assert searchBeam400["outputEnable"] == True
assert searchBeam400["averagingInterval"] == 2
assert searchBeam400["searchBeamDestinationAddress"] == "10.05.2.1"
proxies.subarray[1].GoToIdle()
time.sleep(3)
assert proxies.subarray[1].obsState == ObsState.IDLE
proxies.subarray[1].RemoveAllReceptors()
time.sleep(3)
assert proxies.subarray[1].state() == tango.DevState.OFF
def test_band1(
self,
cbf_master_proxy,
proxies.subarray[1],
sw_1_proxy,
sw_2_proxy,
vcc_proxies,
vcc_band_proxies,
vcc_tdc_proxies,
fsp_1_proxy,
fsp_2_proxy,
fsp_1_function_mode_proxy,
fsp_2_function_mode_proxy,
fsp_1_proxies.subarray[1],
fsp_2_proxies.subarray[1],
fsp_3_proxies.subarray[1],
tm_telstate_proxy
):
"""
Test a minimal successful configuration
"""
for proxy in vcc_proxies:
proxy.Init()
fsp_1_proxies.subarray[1].Init()
fsp_2_proxies.subarray[1].Init()
fsp_3_proxies.subarray[1].Init()
fsp_1_proxy.Init()
fsp_2_proxy.Init()
time.sleep(3)
cbf_master_proxy.set_timeout_millis(60000)
cbf_master_proxy.Init()
time.sleep(60) # takes pretty long for CBF Master to initialize
tm_telstate_proxy.Init()
time.sleep(1)
receptor_to_vcc = dict([*map(int, pair.split(":"))] for pair in
cbf_master_proxy.receptorToVcc)
cbf_master_proxy.On()
time.sleep(3)
# check initial value of attributes of CBF subarray
assert len(proxies.subarray[1].receptors) == 0
assert proxies.subarray[1].configID == ''
assert proxies.subarray[1].frequencyBand == 0
assert proxies.subarray[1].obsState.value == ObsState.IDLE.value
# assert tm_telstate_proxy.visDestinationAddress == "{}"
assert tm_telstate_proxy.receivedOutputLinks == False
# add receptors
proxies.subarray[1].AddReceptors([1, 3, 4])
time.sleep(1)
assert proxies.subarray[1].receptors[0] == 1
assert proxies.subarray[1].receptors[1] == 3
assert proxies.subarray[1].receptors[2] == 4
# configure scan
f = open(file_path + "/test_json/data_model_confluence.json")
proxies.subarray[1].ConfigureScan(f.read().replace("\n", ""))
f.close()
time.sleep(15)
# check configured attributes of CBF subarray
assert proxies.subarray[1].configID == "sbi-mvp01-20200325-00001-science_A"
assert proxies.subarray[1].frequencyBand == 0 # means 1
assert proxies.subarray[1].obsState.value == ObsState.READY.value
# check frequency band of VCCs, including states of frequency band capabilities
assert vcc_proxies[receptor_to_vcc[4] - 1].frequencyBand == 0
assert vcc_proxies[receptor_to_vcc[1] - 1].frequencyBand == 0
# check the rest of the configured attributes of VCCs
# first for VCC belonging to receptor 10...
assert vcc_proxies[receptor_to_vcc[4] - 1].subarrayMembership == 1
# then for VCC belonging to receptor 1...
assert vcc_proxies[receptor_to_vcc[1] - 1].subarrayMembership == 1
# check configured attributes of FSPs, including states of function mode capabilities
assert fsp_1_proxy.functionMode == 1
assert 1 in fsp_1_proxy.subarrayMembership
# assert 1 in fsp_2_proxy.subarrayMembership
assert [proxy.State() for proxy in fsp_1_function_mode_proxy] == [
DevState.ON, DevState.DISABLE, DevState.DISABLE, DevState.DISABLE
]
# assert [proxy.State() for proxy in fsp_2_function_mode_proxy] == [
# DevState.ON, DevState.DISABLE, DevState.DISABLE, DevState.DISABLE
# ]
# check configured attributes of FSP subarrays
# first for FSP 1...
assert fsp_1_proxies.subarray[1].obsState == ObsState.READY
assert fsp_1_proxies.subarray[1].frequencyBand == 0
assert fsp_1_proxies.subarray[1].frequencySliceID == 1
assert fsp_1_proxies.subarray[1].corrBandwidth == 0
assert fsp_1_proxies.subarray[1].integrationTime == 1400
assert fsp_1_proxies.subarray[1].outputLinkMap[0][0] == 1
assert fsp_1_proxies.subarray[1].outputLinkMap[0][1] == 0
assert fsp_1_proxies.subarray[1].outputLinkMap[1][0] == 201
assert fsp_1_proxies.subarray[1].outputLinkMap[1][1] == 1
proxies.subarray[1].GoToIdle()
time.sleep(3)
assert proxies.subarray[1].obsState == ObsState.IDLE
proxies.subarray[1].RemoveAllReceptors()
time.sleep(1)
proxies.subarray[1].Off()
assert proxies.subarray[1].state() == tango.DevState.OFF
'''
| 1.898438 | 2 |
src/mmgroup/tests/spaces/spaces.py | Martin-Seysen/mmgroup | 14 | 12799163 | from mmgroup.mm_space import MMSpace, MMV, MMVector
from mmgroup.mm_space import characteristics
from mmgroup.structures.mm0_group import MM0Group
from mmgroup.tests.spaces.sparse_mm_space import SparseMmSpace
from mmgroup.tests.spaces.sparse_mm_space import SparseMmV
from mmgroup.tests.groups.mgroup_n import MGroupN
#print("module mmgroup.tests.spaces.spaces is deprecated!!")
spaces_dict = {}
g = MM0Group()
ref_g = MGroupN()
class TestSpace:
def __init__(self, p):
self.p = p
self.space = MMV(p)
self.ref_space = SparseMmV(p)
self.group = g
self.ref_group = ref_g
def __call__(self, *args):
return self.space(*args)
def MMTestSpace(p):
global spaces_dict
try:
return spaces_dict[p]
except KeyError:
spaces_dict[p] = TestSpace(p)
return spaces_dict[p]
| 2.265625 | 2 |
arxiv2md.py | michamos/vim-arxivist | 2 | 12799164 | <gh_stars>1-10
#!/usr/bin/python
# arxiv2md.py: fetch the latest arXiv listings and transform them to markdown
# Copyright (C) 2014 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import feedparser
import subprocess
import time
import re
def parse_archive(archive, updates=True, link_to="pdf"):
out = u""
d = feedparser.parse("http://export.arxiv.org/rss/{}".format(archive))
day = time.strftime("%F", d.feed.updated_parsed)
if updates:
update_string=u"with replacements"
else:
update_string=u""
out=out + u"<h1>{} arXiv of {} {}</h1>\n".format(a, day, update_string)
for entry in d.entries:
if (not updates) and entry.title.endswith("UPDATED)"):
break
out = out + u"<h2>{}</h2>\n".format(entry.title)
out = out + u"<p>Authors: {}</p>\n".format(entry.author)
out = out + u"<a href='{}'>Link</a>\n".format(entry.link.replace("abs",link_to,1))
out = out + entry.summary+u"\n"
pandoc = subprocess.Popen("pandoc -R -f html -t markdown --atx-headers".split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(result,error) = pandoc.communicate(input=out)
pandoc.stdout.close()
# Pandoc conversion to markdown escapes LaTeX, we need to unescape it
result = re.sub(r"\\([\\$^_*<>])", r"\1", result)
if error:
result = result + u"*ERROR: Pandoc conversion failed with error {}*\n".format(error)
return result
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="fetch the latest arXiv listings and transform them to markdown")
parser.add_argument("archive", help="an archive to fetch", nargs="+")
parser.add_argument("-r", "--replacements", help="also fetch the replacements", action="store_true", default=False)
parser.add_argument("-a", "--link-to-abstract", help="make the links point to the abstracts rather than the PDFs", action="store_true", default=False)
args = parser.parse_args()
if args.link_to_abstract:
link_to = "abs"
else:
link_to = "pdf"
for a in args.archive:
print(parse_archive(a,args.replacements,link_to))
| 1.828125 | 2 |
Templates - Script/AdvancedFilter_DB.py | mitevpi/pyRevitCrucible | 8 | 12799165 | """Advanced Collection of Data: Collects all the walls of height 10"""
__author__ = '<NAME>'
import Autodesk.Revit.DB as DB
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
height_param_id = DB.ElementId(DB.BuiltInParameter.WALL_USER_HEIGHT_PARAM)
height_param_prov = DB.ParameterValueProvider(height_param_id)
param_equality = DB.FilterNumericEquals()
heigh_value_rule = DB.FilterDoubleRule(height_param_prov,
param_equality,
10.0,
1E-6)
param_filter = DB.ElementParameterFilter(heigh_value_rule)
walls = DB.FilteredElementCollector(doc) \
.WherePasses(param_filter) \
.ToElementIds()
uidoc.Selection.SetElementIds(walls) | 2.296875 | 2 |
algorithms/math/prime.py | laiseaquino/python-ds | 2 | 12799166 | def prime(limit):
count = 1
while(count<limit):
flag = 0
for i in range(3,count,2):
if (count%i==0):
flag = 1
if (flag==0):
print(count)
count+=2
prime(100)
| 3.640625 | 4 |
big_fiubrother_sampler/store_video_chunk.py | BigFiuBrother/big-fiubrother-sampler | 0 | 12799167 | from big_fiubrother_core import QueueTask
from big_fiubrother_core.db import Database, VideoChunk
from big_fiubrother_core.storage import raw_storage
from big_fiubrother_core.synchronization import ProcessSynchronizer
from os import path
import logging
class StoreVideoChunk(QueueTask):
def __init__(self, configuration, input_queue, output_queue):
super().__init__(input_queue)
self.output_queue = output_queue
self.configuration = configuration
def init(self):
self.db = Database(self.configuration['db'])
self.storage = raw_storage(self.configuration['storage'])
self.process_synchronizer = ProcessSynchronizer(
self.configuration['synchronization'])
def execute_with(self, message):
video_chunk = VideoChunk(camera_id=message.camera_id,
timestamp=message.timestamp)
self.db.add(video_chunk)
logging.info(f"{video_chunk.id} created in DB. Sampling starting!")
self.process_synchronizer.register_video_task(str(video_chunk.id))
filepath = path.join('tmp', '{}.h264'.format(video_chunk.id))
with open(filepath, 'wb') as file:
file.write(message.payload)
self.storage.store_file(str(video_chunk.id), filepath)
self.output_queue.put({
'video_chunk_id': video_chunk.id,
'path': filepath
})
def close(self):
self.db.close()
self.process_synchronizer.close()
| 2.390625 | 2 |
python_modules/dagster/dagster/core/definitions/solid_invocation.py | drewsonne/dagster | 0 | 12799168 | <reponame>drewsonne/dagster<filename>python_modules/dagster/dagster/core/definitions/solid_invocation.py
import inspect
from typing import TYPE_CHECKING, Any, Dict, Generator, Optional, cast
from dagster import check
from dagster.core.definitions.events import AssetMaterialization, RetryRequested
from dagster.core.errors import (
DagsterInvalidConfigError,
DagsterInvalidInvocationError,
DagsterInvariantViolationError,
DagsterSolidInvocationError,
user_code_error_boundary,
)
if TYPE_CHECKING:
from dagster.core.definitions import SolidDefinition
from dagster.core.execution.context.invocation import DirectSolidExecutionContext
def solid_invocation_result(
solid_def: "SolidDefinition", context: Optional["DirectSolidExecutionContext"], *args, **kwargs
) -> Any:
context = _check_invocation_requirements(solid_def, context)
input_dict = _resolve_inputs(solid_def, args, kwargs)
outputs = _execute_and_retrieve_outputs(solid_def, context, input_dict)
if len(outputs) == 1:
return outputs[0]
return outputs
def _check_invocation_requirements(
solid_def: "SolidDefinition", context: Optional["DirectSolidExecutionContext"]
) -> "DirectSolidExecutionContext":
"""Ensure that provided context fulfills requirements of solid definition.
If no context was provided, then construct an enpty DirectSolidExecutionContext
"""
from dagster.core.execution.context.invocation import DirectSolidExecutionContext
from dagster.config.validate import validate_config
# Check resource requirements
if solid_def.required_resource_keys and context is None:
raise DagsterInvalidInvocationError(
f'Solid "{solid_def.name}" has required resources, but no context was provided. Use the'
"`build_solid_context` function to construct a context with the required "
"resources."
)
if context is not None and solid_def.required_resource_keys:
resources_dict = cast( # type: ignore[attr-defined]
"DirectSolidExecutionContext",
context,
).resources._asdict()
for resource_key in solid_def.required_resource_keys:
if resource_key not in resources_dict:
raise DagsterInvalidInvocationError(
f'Solid "{solid_def.name}" requires resource "{resource_key}", but no resource '
"with that key was found on the context."
)
# Check config requirements
if not context and solid_def.config_schema.as_field().is_required:
raise DagsterInvalidInvocationError(
f'Solid "{solid_def.name}" has required config schema, but no context was provided. '
"Use the `build_solid_context` function to create a context with config."
)
config = None
if solid_def.config_field:
solid_config = check.opt_dict_param(
context.solid_config if context else None, "solid_config"
)
config_evr = validate_config(solid_def.config_field.config_type, solid_config)
if not config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for solid ", config_evr.errors, solid_config
)
mapped_config_evr = solid_def.apply_config_mapping({"config": solid_config})
if not mapped_config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for solid ", mapped_config_evr.errors, solid_config
)
config = mapped_config_evr.value.get("config", {})
return (
context
if context
else DirectSolidExecutionContext(solid_config=config, resources_dict=None, instance=None)
)
def _resolve_inputs(solid_def, args, kwargs):
input_defs = solid_def.input_defs
# Fail early if too many inputs were provided.
if len(input_defs) < len(args) + len(kwargs):
raise DagsterInvalidInvocationError(
f"Too many input arguments were provided for solid '{solid_def.name}'. This may be because "
"an argument was provided for the context parameter, but no context parameter was defined "
"for the solid."
)
input_dict = {
input_def.name: input_val for input_val, input_def in zip(args, input_defs[: len(args)])
}
for input_def in input_defs[len(args) :]:
if not input_def.has_default_value and input_def.name not in kwargs:
raise DagsterInvalidInvocationError(
f'No value provided for required input "{input_def.name}".'
)
input_dict[input_def.name] = (
kwargs[input_def.name] if input_def.name in kwargs else input_def.default_value
)
return input_dict
def _execute_and_retrieve_outputs(
solid_def: "SolidDefinition", context: "DirectSolidExecutionContext", input_dict: Dict[str, Any]
) -> tuple:
output_values = {}
output_names = {output_def.name for output_def in solid_def.output_defs}
for output in _core_generator(solid_def, context, input_dict):
if not isinstance(output, AssetMaterialization):
if output.output_name in output_values:
raise DagsterInvariantViolationError(
f'Solid "{solid_def.name}" returned an output "{output.output_name}" multiple '
"times"
)
elif output.output_name not in output_names:
raise DagsterInvariantViolationError(
f'Solid "{solid_def.name}" returned an output "{output.output_name}" that does '
f"not exist. The available outputs are {list(output_names)}"
)
else:
output_values[output.output_name] = output.value
else:
context.record_materialization(output)
# Check to make sure all non-optional outputs were yielded.
for output_def in solid_def.output_defs:
if output_def.name not in output_values and output_def.is_required:
raise DagsterInvariantViolationError(
f'Solid "{solid_def.name}" did not return an output for non-optional '
f'output "{output_def.name}"'
)
# Explicitly preserve the ordering of output defs
return tuple([output_values[output_def.name] for output_def in solid_def.output_defs])
def _core_generator(
solid_def: "SolidDefinition", context: "DirectSolidExecutionContext", input_dict: Dict[str, Any]
) -> Generator[Any, None, None]:
from dagster.core.execution.plan.compute import gen_from_async_gen
with user_code_error_boundary(
DagsterSolidInvocationError,
control_flow_exceptions=[RetryRequested],
msg_fn=lambda: f'Error occurred while invoking solid "{solid_def.name}":',
):
compute_iterator = solid_def.compute_fn(context, input_dict)
if inspect.isasyncgen(compute_iterator):
compute_iterator = gen_from_async_gen(compute_iterator)
yield from compute_iterator
| 2.046875 | 2 |
rl_groundup/envs/__init__.py | TristanBester/rl_groundup | 1 | 12799169 | from .k_armed_bandit import KArmedBandit
from .grid_world import GridWorld
from .race_track import RaceTrack
from .windy_grid_world import WindyGridWorld
from .maze import Maze
from .mountain_car import MountainCar
from .random_walk import RandomWalk
from .short_corridor import ShortCorridor
| 1.101563 | 1 |
Desafios/desafio074.py | josivantarcio/Desafios-em-Python | 0 | 12799170 | from random import randint
for i in range(5):
n = (randint(0,10))
if(i == 0):
m = n
M = n
if (n > M):
M = n
elif (n < m):
m = n
print(n, end=' ')
print(f'\nO maior número foi {M}\nE o Menor foi {m}') | 3.625 | 4 |
client/views/editors.py | omerk2511/dropbox | 4 | 12799171 | <reponame>omerk2511/dropbox
from Tkinter import *
from common import Codes
from ..controllers import FileController # EditorController (?)
from ..handlers.data import Data
class Editors(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.elements = {}
title_frame = Frame(self)
title_frame.pack(expand=True, fill=BOTH, padx=70, pady=(30, 20))
self.elements['title'] = Label(title_frame, text='Editors',
fg='#003399', font=('Arial', 28))
self.elements['title'].pack(side=TOP)
self.elements['editors_frame'] = Frame(self)
self.elements['editors_frame'].pack(side=TOP, padx=120, pady=30,
expand=False, fill=BOTH)
self.elements['new_editor_frame'] = Frame(self)
self.elements['new_editor_frame'].pack(side=TOP, padx=120, pady=30,
expand=False, fill=BOTH)
self.elements['editor_frames'] = []
self.current_file = None
def initialize(self):
self.current_file = Data().get_current_file()
self.editors = FileController.get_file_editors(self.current_file, Data().get_token())
for editor_frame in self.elements['editor_frames']:
editor_frame.pack_forget()
self.elements['editor_frames'] = []
self.elements['editors_frame'].pack_forget()
self.elements['editors_frame'].pack(side=TOP, padx=120, pady=30,
expand=False, fill=BOTH)
self.elements['new_editor_frame'].pack_forget()
self.elements['new_editor_frame'].pack(side=TOP, padx=120, pady=(10, 30),
expand=False, fill=BOTH)
if not self.editors:
no_editors_label = Label(self.elements['editors_frame'], bg='gray',
text='There are no editors for this file.', font=('Arial', 22), anchor='w')
no_editors_label.pack(side=LEFT, expand=True, fill=X)
self.elements['editor_frames'].append(no_editors_label)
for editor in self.editors:
editor_frame = Frame(self.elements['editors_frame'], bg='gray')
editor_frame.pack(side=TOP, expand=False, fill=X, pady=10)
editor_label = Label(editor_frame, font=('Arial', 18), bg='gray',
text='%s (%s)' % (editor['user']['username'], editor['user']['full_name']))
editor_label.pack(side=LEFT, padx=20, pady=10)
remove_editor_button = Button(editor_frame, text='Remove',
font=('Arial', 16), bg='#990000', fg='#ffffff', activebackground='#b30000',
activeforeground='#ffffff', command=self.generate_remove_editor(editor['id']))
remove_editor_button.pack(side=RIGHT, padx=20, pady=10)
self.elements['editor_frames'].append(editor_frame)
if 'editor_entry' in self.elements:
self.elements['editor_entry'].pack_forget()
self.elements['editor_entry'] = Entry(self.elements['new_editor_frame'],
font=('Arial', 18))
self.elements['editor_entry'].pack(side=LEFT, padx=(0, 10), expand=True, fill=BOTH)
if 'add_editor_button' in self.elements:
self.elements['add_editor_button'].pack_forget()
self.elements['add_editor_button'] = Button(self.elements['new_editor_frame'],
text='Add Editor', font=('Arial', 18), bg='#003399', activebackground='#002266',
fg='#ffffff', activeforeground='#ffffff', command=self.add_editor)
self.elements['add_editor_button'].pack(side=LEFT, expand=False, fill=X)
def add_editor(self):
editor_username = self.elements['editor_entry'].get()
self.elements['editor_entry'].delete(0, END)
if not editor_username:
self.parent.display_error('You have to enter an editor username.')
return
response = FileController.add_file_editor(self.current_file, editor_username,
Data().get_token())
if response.code == Codes.SUCCESS:
self.parent.display_info('Editor added successfully!')
self.initialize()
else:
self.parent.display_error(response.payload['message'])
def generate_remove_editor(self, editor_id):
return lambda: self.remove_editor(editor_id)
def remove_editor(self, editor_id):
response = FileController.remove_file_editor(editor_id, Data().get_token())
if response.code == Codes.SUCCESS:
self.parent.display_info('Editor removed successfully!')
self.initialize()
else:
self.parent.display_error(response.payload['message']) | 2.859375 | 3 |
apispec_swaggerinherit.py | timakro/apispec-swaggerinher | 3 | 12799172 | # apispec-swaggerinherit - Plugin for apispec adding support for Swagger-style
# inheritance using `allOf`
# Copyright (C) 2018 <NAME>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from apispec.ext.marshmallow import swagger
from marshmallow import Schema
std_bases = [Schema]
try:
from marshmallow_oneofschema import OneOfSchema
std_bases.append(OneOfSchema)
except ImportError:
pass
def swaggerinherit_definition_helper(spec, name, schema, definition, **kwargs):
"""Definition helper that modifies the schema definition to make use of
swagger-style inheritance using `allOf`. Uses the `schema` parameter.
"""
parents = [b for b in schema.__bases__ if b not in std_bases]
if not parents:
return
ref_path = swagger.get_ref_path(spec.openapi_version.version[0])
try:
refs = ['#/{}/{}'.format(ref_path,
spec.plugins['apispec.ext.marshmallow']['refs'][schema_cls])
for schema_cls in parents]
except KeyError:
raise ValueError("Parent schemas must be added to the spec before the "
"child schema")
child_def = definition.copy()
for parent in parents:
for name in parent._declared_fields.keys():
del child_def['properties'][name]
try:
child_def['required'].remove(name)
except ValueError:
pass
if not child_def['required']:
del child_def['required']
definition.clear()
return {
'allOf': [{'$ref': ref} for ref in refs] + [child_def]
}
def setup(spec):
spec.register_definition_helper(swaggerinherit_definition_helper)
| 1.8125 | 2 |
migrations/versions/035_DemographicsRequestColumnDefinition_Add_UhlSystemNumber.py | LCBRU/identity | 0 | 12799173 | <gh_stars>0
from sqlalchemy import (
MetaData,
Table,
Column,
Integer,
)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("demographics_request_column_definition", meta, autoload=True)
uhl_system_number_column_id = Column("uhl_system_number_column_id", Integer)
uhl_system_number_column_id.create(t)
def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("demographics_request_column_definition", meta, autoload=True)
t.c.uhl_system_number_column_id.drop()
| 2.234375 | 2 |
aao/test/spiders/test_sports.py | rkenny2/aao | 27 | 12799174 | import pytest
from aao.spiders import spiders
pytestmark = pytest.mark.sports
class TestSport():
"""Nothing to test. """
pass
class TestSoccer():
"""Test the Soccer ABC across all bookmakers. """
@pytest.fixture(scope='class', params=spiders.values())
def spider(self, request):
s = request.param()
yield s
s.quit()
competitions = {
'england': 'premier_league',
'italy': 'serie_a',
'spain': 'la_liga',
}
def test_countries(self, spider):
countries = spider.soccer.countries()
assert set(self.competitions.keys()) <= set(countries)
assert isinstance(countries, list)
def test_countries_full(self, spider):
countries = spider.soccer.countries(full=True)
assert set(self.competitions.keys()) <= set(countries.keys())
assert isinstance(countries, dict)
@pytest.mark.parametrize('country', competitions.keys())
def test_leagues(self, spider, country):
leagues = spider.soccer.leagues(country)
assert self.competitions[country] in leagues
assert isinstance(leagues, list)
@pytest.mark.parametrize('country', competitions.keys())
def test_leagues_full(self, spider, country):
leagues = spider.soccer.leagues(country, full=True)
assert self.competitions[country] in leagues.keys()
assert isinstance(leagues, dict)
def test_league_not_supported(self, spider):
country = 'foo_country'
with pytest.raises(KeyError, match=f'{country} is not supported *'):
spider.soccer.leagues(country)
@pytest.mark.parametrize('country,league', competitions.items())
def test_teams(self, spider, country, league):
teams = spider.soccer.teams(country, league)
assert isinstance(teams, list)
@pytest.mark.parametrize('country,league', competitions.items())
def test_teams_full(self, spider, country, league):
teams = spider.soccer.teams(country, league, full=True)
assert isinstance(teams, dict)
def test_teams_not_supported(self, spider):
country, league = 'serie_a', 'foo_league'
with pytest.raises(KeyError, match=f'{league} is not supported *'):
spider.soccer.teams(country, league)
@pytest.mark.parametrize('country,league', competitions.items())
def test_setattr_competiton(self, spider, country, league):
spider.soccer._setattr_competiton(country, league)
assert spider.soccer._country
assert spider.soccer.country
assert spider.soccer._league
assert spider.soccer.league
@pytest.mark.parametrize(
'country,league', [next(iter(competitions.items()))])
def test_events(self, spider, country, league):
events = spider.soccer.events(country, league)
assert isinstance(events, list)
assert events
@pytest.mark.parametrize(
'country,league', [next(iter(competitions.items()))])
def test_odds(self, spider, country, league):
events, odds = spider.soccer.odds(country, league)
assert isinstance(events, list)
assert events
assert isinstance(odds, list)
assert odds
| 2.578125 | 3 |
tests/test_easter.py | cccntu/dateutil | 0 | 12799175 | from bs_dateutil.easter import easter
from bs_dateutil.easter import EASTER_WESTERN, EASTER_ORTHODOX, EASTER_JULIAN
from datetime import date
import pytest
# List of easters between 1990 and 2050
western_easter_dates = [
date(1990, 4, 15),
date(1991, 3, 31),
date(1992, 4, 19),
date(1993, 4, 11),
date(1994, 4, 3),
date(1995, 4, 16),
date(1996, 4, 7),
date(1997, 3, 30),
date(1998, 4, 12),
date(1999, 4, 4),
date(2000, 4, 23),
date(2001, 4, 15),
date(2002, 3, 31),
date(2003, 4, 20),
date(2004, 4, 11),
date(2005, 3, 27),
date(2006, 4, 16),
date(2007, 4, 8),
date(2008, 3, 23),
date(2009, 4, 12),
date(2010, 4, 4),
date(2011, 4, 24),
date(2012, 4, 8),
date(2013, 3, 31),
date(2014, 4, 20),
date(2015, 4, 5),
date(2016, 3, 27),
date(2017, 4, 16),
date(2018, 4, 1),
date(2019, 4, 21),
date(2020, 4, 12),
date(2021, 4, 4),
date(2022, 4, 17),
date(2023, 4, 9),
date(2024, 3, 31),
date(2025, 4, 20),
date(2026, 4, 5),
date(2027, 3, 28),
date(2028, 4, 16),
date(2029, 4, 1),
date(2030, 4, 21),
date(2031, 4, 13),
date(2032, 3, 28),
date(2033, 4, 17),
date(2034, 4, 9),
date(2035, 3, 25),
date(2036, 4, 13),
date(2037, 4, 5),
date(2038, 4, 25),
date(2039, 4, 10),
date(2040, 4, 1),
date(2041, 4, 21),
date(2042, 4, 6),
date(2043, 3, 29),
date(2044, 4, 17),
date(2045, 4, 9),
date(2046, 3, 25),
date(2047, 4, 14),
date(2048, 4, 5),
date(2049, 4, 18),
date(2050, 4, 10),
]
orthodox_easter_dates = [
date(1990, 4, 15),
date(1991, 4, 7),
date(1992, 4, 26),
date(1993, 4, 18),
date(1994, 5, 1),
date(1995, 4, 23),
date(1996, 4, 14),
date(1997, 4, 27),
date(1998, 4, 19),
date(1999, 4, 11),
date(2000, 4, 30),
date(2001, 4, 15),
date(2002, 5, 5),
date(2003, 4, 27),
date(2004, 4, 11),
date(2005, 5, 1),
date(2006, 4, 23),
date(2007, 4, 8),
date(2008, 4, 27),
date(2009, 4, 19),
date(2010, 4, 4),
date(2011, 4, 24),
date(2012, 4, 15),
date(2013, 5, 5),
date(2014, 4, 20),
date(2015, 4, 12),
date(2016, 5, 1),
date(2017, 4, 16),
date(2018, 4, 8),
date(2019, 4, 28),
date(2020, 4, 19),
date(2021, 5, 2),
date(2022, 4, 24),
date(2023, 4, 16),
date(2024, 5, 5),
date(2025, 4, 20),
date(2026, 4, 12),
date(2027, 5, 2),
date(2028, 4, 16),
date(2029, 4, 8),
date(2030, 4, 28),
date(2031, 4, 13),
date(2032, 5, 2),
date(2033, 4, 24),
date(2034, 4, 9),
date(2035, 4, 29),
date(2036, 4, 20),
date(2037, 4, 5),
date(2038, 4, 25),
date(2039, 4, 17),
date(2040, 5, 6),
date(2041, 4, 21),
date(2042, 4, 13),
date(2043, 5, 3),
date(2044, 4, 24),
date(2045, 4, 9),
date(2046, 4, 29),
date(2047, 4, 21),
date(2048, 4, 5),
date(2049, 4, 25),
date(2050, 4, 17),
]
# A random smattering of Julian dates.
# Pulled values from http://www.kevinlaughery.com/east4099.html
julian_easter_dates = [
date(326, 4, 3),
date(375, 4, 5),
date(492, 4, 5),
date(552, 3, 31),
date(562, 4, 9),
date(569, 4, 21),
date(597, 4, 14),
date(621, 4, 19),
date(636, 3, 31),
date(655, 3, 29),
date(700, 4, 11),
date(725, 4, 8),
date(750, 3, 29),
date(782, 4, 7),
date(835, 4, 18),
date(849, 4, 14),
date(867, 3, 30),
date(890, 4, 12),
date(922, 4, 21),
date(934, 4, 6),
date(1049, 3, 26),
date(1058, 4, 19),
date(1113, 4, 6),
date(1119, 3, 30),
date(1242, 4, 20),
date(1255, 3, 28),
date(1257, 4, 8),
date(1258, 3, 24),
date(1261, 4, 24),
date(1278, 4, 17),
date(1333, 4, 4),
date(1351, 4, 17),
date(1371, 4, 6),
date(1391, 3, 26),
date(1402, 3, 26),
date(1412, 4, 3),
date(1439, 4, 5),
date(1445, 3, 28),
date(1531, 4, 9),
date(1555, 4, 14),
]
@pytest.mark.parametrize("easter_date", western_easter_dates)
def test_easter_western(easter_date):
assert easter_date == easter(easter_date.year, EASTER_WESTERN)
@pytest.mark.parametrize("easter_date", orthodox_easter_dates)
def test_easter_orthodox(easter_date):
assert easter_date == easter(easter_date.year, EASTER_ORTHODOX)
@pytest.mark.parametrize("easter_date", julian_easter_dates)
def test_easter_julian(easter_date):
assert easter_date == easter(easter_date.year, EASTER_JULIAN)
def test_easter_bad_method():
with pytest.raises(ValueError):
easter(1975, 4)
| 2.21875 | 2 |
crumpitmanagerapi/config.py | fossabot/crumpitmanagerAPIs | 0 | 12799176 | <gh_stars>0
#! /usr/bin/python3
#Functionality to for reading and using config file
#
#Author: <NAME>
#Date: May 2019
import os
import subprocess
import pathlib
import shlex
import datetime
import yaml
import cerberus
class validateYAML:
def validate_yaml(self, schemaFile: str, yamlFile: str):
schema = eval(open(schemaFile, 'r').read())
v = cerberus.Validator(schema)
doc = yaml.safe_load(open(yamlFile, 'r').read())
r = v.validate(doc, schema)
return r, v.errors
class Config:
"""
Configuration parsed directly from a YAML file
"""
def __init__(self):
self.config = None
def load(self, config_file: str):
try:
validator = validateYAML()
ok, errs = validator.validate_yaml('configs/schema.yaml', config_file)
if ok:
print(config_file, "validated")
with open(config_file, 'r') as stream:
self.config = yaml.safe_load(stream)
else:
print(config_file, "validation failed")
print(errs)
except Exception as e:
print('ERROR: Couldn\'t setup config parameters')
print(e)
def load_str(self, config_str: str):
self.config = yaml.load(config_str)
def get(self, field: str):
return self.config[field]
| 2.796875 | 3 |
tests/test_patterns.py | abs-tudelft/vhdeps | 17 | 12799177 | """Tests the GHDL backend."""
from unittest import TestCase
import os
import re
from plumbum import local
from .common import run_vhdeps
DIR = os.path.realpath(os.path.dirname(__file__))
class TestPatterns(TestCase):
"""Tests the test case pattern matching logic (also used by the vsim
backend)."""
def test_no_patterns(self):
"""Test the default test case pattern (`*.tc`)"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps('ghdl', '-i', DIR+'/simple/multiple-ok')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_positive_name(self):
"""Test positive entity name test case patterns"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps(
'ghdl', '-i', DIR+'/simple/multiple-ok', '-pfoo_tc', '-pbaz')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_negative_name(self):
"""Test negative entity name test case patterns"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps(
'ghdl', '-i', DIR+'/simple/multiple-ok', '-p*_tc', '-p!foo*')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_positive_filename(self):
"""Test positive filename test case patterns"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps(
'ghdl', '-i', DIR+'/simple/multiple-ok', '-p:*_tc.vhd', '-pbaz')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_negative_filename(self):
"""Test negative filename test case patterns"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps(
'ghdl', '-i', DIR+'/simple/multiple-ok', '-p:*.vhd', '-p:!*baz.vhd')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_multi_tc_per_file(self):
"""Test multiple test cases per file"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps('ghdl', '-i', DIR+'/complex/multi-tc-per-file')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*test_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*baz', out)))
| 2.515625 | 3 |
python_src/ROCstory_classification.py | Joyce-yanqiongzhang/proj2_storytelling | 0 | 12799178 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
import sys
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import ParameterGrid, GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
#print(__doc__)
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=Warning) #DeprecationWarning)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class ItemSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class CustomFeatures(BaseEstimator):
def __init__(self):
pass
def get_feature_names(self):
return np.array(['sent_len']) #, 'lang_prob'])
def fit(self, documents, y=None):
return self
def transform(self, x_dataset):
X_num_token = list()
#X_count_nouns = list()
for sentence in x_dataset:
# takes raw text and calculates type token ratio
X_num_token.append(len(sentence))
# takes pos tag text and counts number of noun pos tags (NN, NNS etc.)
# X_count_nouns.append(count_nouns(sentence))
X = np.array([X_num_token]).T #, X_count_nouns]).T
if not hasattr(self, 'scalar'):
self.scalar = StandardScaler().fit(X)
return self.scalar.transform(X)
class FeatureExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('words', object), ('meta', object)]) #('length', object), ('condscore', object), ('score', object), ('normscore', object), ('langpred', bool)])
for i, text in enumerate(posts):
elems = text.split('\t')
words, cs, s, ns, lp = elems[:5]
#print(elems)
features['words'][i] = words
features['meta'][i] = {'length': len(words.split()),
'condscore': float(cs), 'score': float(s),
'normscore': float(ns), 'langpred': bool(lp)}
if len(elems) > 5:
ecs, es, ens, ep = elems[5:]
features['meta'][i].update({'event_condscore': float(ecs),
'event_score': float(es), 'event_normscore': float(ens), 'event_pred': bool(ep)})
return features
# #############################################################################
# Load data test
def load_data(filename, suffix):
contents, labels = [], []
#data = StoryData()
with open(filename+'.true.'+suffix) as tinf, open(filename+'.false.'+suffix) as finf:
for line in tinf:
elems = line.strip()#.split('\t')
contents.append(elems)
labels.append(1)
for line in finf:
elems = line.strip()#.split('\t')
contents.append(elems)
labels.append(0)
print("data size:", len(contents))
return [contents, labels]
def event_orig_mapping(orig_idx_file, event_idx_file):
orig_idx_array = []
event_idx_dict = {}
with open(orig_idx_file) as oinf, open(event_idx_file) as einf:
oinf.readline()
einf.readline()
for line in oinf:
elems = line.strip().split()
orig_idx_array.append(elems[0])
counter = 0
for line in einf:
elems = line.strip().split()
event_idx_dict[elems[0]] = counter
counter += 1
origin_to_event = {}
for i, oidx in enumerate(orig_idx_array):
if oidx in event_idx_dict:
origin_to_event[i] = event_idx_dict[oidx]
print ('map dictionary size:', len(origin_to_event))
return origin_to_event
def add_e2e_scores(original_data_array, event_data_array, origin_to_event):
assert len(event_data_array) == 2 * len(origin_to_event), (len(event_data_array), len(origin_to_event))
assert len(original_data_array) >= len(event_data_array)
half_len = len(original_data_array) / 2
for i, elems in enumerate(original_data_array):
if i in origin_to_event:
original_data_array[i] = elems + '\t' + event_data_array[origin_to_event[i]]
if i - half_len in origin_to_event:
#print(i, origin_to_event[i-half_len], len(origin_to_event))
original_data_array[i] = elems + '\t' + event_data_array[origin_to_event[i-half_len] + len(origin_to_event)]
return original_data_array
def pairwise_eval(probs):
mid = int(len(probs) / 2)
print('middle point: %d' % mid)
pos = probs[:mid]
neg = probs[mid:]
assert len(pos) == len(neg)
count = 0.0
for p, n in zip(pos, neg):
if p[1] > n[1]:
count += 1.0
# print('True')
# else:
# print('False')
acc = count/mid
print('Test result: %.3f' % acc)
return acc
train_data = load_data(sys.argv[1], sys.argv[3])
test_data = load_data(sys.argv[2], sys.argv[3])
#train_event = load_data(sys.argv[4], sys.argv[6])
#test_event = load_data(sys.argv[5], sys.argv[6])
#train_e2o = event_orig_mapping(sys.argv[7], sys.argv[8])
#test_e2o = event_orig_mapping(sys.argv[9], sys.argv[10])
# add event-to-event info
#train_data[0] = add_e2e_scores(train_data[0], train_event[0], train_e2o)
#test_data[0] = add_e2e_scores(test_data[0], test_event[0], test_e2o)
print('Finished data loading!!')
for elem in train_data[0][:10]:
print (elem)
# #############################################################################
# Define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('featextract', FeatureExtractor()),
('union', FeatureUnion(
transformer_list=[
('meta', Pipeline([
('selector', ItemSelector(key='meta')),
('vect', DictVectorizer()),
('scale', StandardScaler(with_mean=False)),
])),
('word', Pipeline([
('selector', ItemSelector(key='words')),
('vect', CountVectorizer(ngram_range=(1,5), max_df=0.9)),
('tfidf', TfidfTransformer()),
])),
('char', Pipeline([
('selector', ItemSelector(key='words')),
('vect', CountVectorizer(ngram_range=(1,5), analyzer='char', max_df=0.8)),
('tfidf', TfidfTransformer()),
])),
],
transformer_weights={
'meta': 0.3,
'word': 1.0,
'char': 1.0,
},
)),
('clf', SGDClassifier(loss='log', alpha=0.0005, tol=0.005, random_state=0)),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'union__transformer_weights': ({'meta': 0.6, 'word': 1.0, 'char': 1.0},
# {'meta': 1.0, 'word': 1.0, 'char': 0.75},
# {'meta': 1.0, 'word': 1.0, 'char': 0.5},
# {'meta': 1.0, 'word': 0.75, 'char': 1.0},
# {'meta': 1.0, 'word': 0.75, 'char': 0.75},
# {'meta': 1.0, 'word': 0.75, 'char': 0.5},
# {'meta': 1.0, 'word': 0.5, 'char': 1.0},
# {'meta': 1.0, 'word': 0.5, 'char': 0.75},
# {'meta': 1.0, 'word': 0.5, 'char': 0.5},
{'meta': 0.7, 'word': 1.0, 'char': 1.0},
{'meta': 0.5, 'word': 1.0, 'char': 1.0},
{'meta': 0.4, 'word': 1.0, 'char': 1.0},
{'meta': 0.3, 'word': 1.0, 'char': 1.0},
# {'meta': 0.75, 'word': 1.0, 'char': 0.75},
# {'meta': 0.75, 'word': 1.0, 'char': 0.5},
# {'meta': 0.75, 'word': 0.75, 'char': 1.0},
# {'meta': 0.75, 'word': 0.75, 'char': 0.75},
# {'meta': 0.75, 'word': 0.75, 'char': 0.5},
# {'meta': 0.75, 'word': 0.5, 'char': 1.0},
# {'meta': 0.75, 'word': 0.5, 'char': 0.75},
# {'meta': 0.75, 'word': 0.5, 'char': 0.5},
# {'meta': 0.5, 'word': 1.0, 'char': 1.0},
# {'meta': 0.5, 'word': 1.0, 'char': 0.75},
# {'meta': 0.5, 'word': 1.0, 'char': 0.5},
# {'meta': 0.5, 'word': 0.75, 'char': 1.0},
# {'meta': 0.5, 'word': 0.75, 'char': 0.75},
# {'meta': 0.5, 'word': 0.75, 'char': 0.5},
# {'meta': 0.5, 'word': 0.5, 'char': 1.0},
# {'meta': 0.5, 'word': 0.5, 'char': 0.75},
# {'meta': 0.5, 'word': 0.5, 'char': 0.5},
),
'union__word__vect__max_df': (0.7, 0.8, 0.9, 1.0), #0.5,
'union__char__vect__max_df': (0.7, 0.8, 0.9, 1.0), #0.5,
#'vect__max_features': (None, 5000, 10000, 50000),
#'union__word__vect__ngram_range': ((1, 4), (1, 5)), # trigram or 5-grams (1, 4),
#'union__char__vect__ngram_range': ((1, 4), (1, 5)), # trigram or 5-grams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.001, 0.0005, 0.0001),
#'clf__penalty': ('l2', 'l1'),
'clf__tol': (5e-3, 1e-3, 5e-4),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
# pipeline.fit(train_data[0], train_data[1])
# probs = pipeline.predict_proba(test_data[0])
# acc = pairwise_eval(probs)
# exit(0)
#grid_params = list(ParameterGrid(parameters))
grid_search = GridSearchCV(pipeline, parameters, cv=5, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
#pipeline.fit(train_data[0], train_data[1]) #.contents, train_data.labels)
'''for params in grid_params:
print('Current parameters:', params)
pipeline.set_params(**params)
pipeline.fit(train_data[0], train_data[1])
probs = pipeline.predict_proba(test_data[0])
acc = pairwise_eval(probs)
exit(0)
'''
grid_search.fit(train_data[0], train_data[1])
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
print('predicting on the test data...')
score = grid_search.score(test_data[0], test_data[1])
print('Test score: %.3f' % score)
probs = grid_search.predict_proba(test_data[0])
pairwise_eval(probs)
| 3.03125 | 3 |
library/comb_mod.py | harurunrunrun/python-my-library | 0 | 12799179 | <gh_stars>0
def comb_mod(n,r,mod):
if n-r<r:
r=n-r
N=n
R=r
u=1
d=1
for i in range(r):
u*=N
u%=mod
N-=1
d*=R
d%=mod
R-=1
return u*pow(d,mod-2,mod)%mod
| 2.953125 | 3 |
blog/forms.py | Emiliemorais/ido | 0 | 12799180 | from django import forms
from django.utils.translation import ugettext, ugettext_lazy as _
from models import Message, Questionnaire
class MessageForm(forms.ModelForm):
"""MessageForm Class. TThis class contains the treatments
of the existents forms on create message page.
"""
class Meta:
"""Meta Class. This class defines the informations
that will be used based on existent set
from Message Model.
"""
model = Message
fields = '__all__'
class QuestionnarieForm(forms.ModelForm):
"""QuestionnarieForm Class. TThis class contains the treatments
of the existents forms on create message page.
"""
class Meta:
"""Meta Class. This class defines the informations
that will be used based on existent set
from Questionnarie Model.
"""
model = Questionnaire
fields = '__all__' | 2.375 | 2 |
sign_checker.py | DineshJas/python_tasks | 0 | 12799181 | def func():
a = int(input("enter a number : "))
if a < 0:
print("Negative")
elif a > 0:
print("Positive")
else:
print("zero")
func()
| 3.984375 | 4 |
ml_layer/sentiment/server/server/res_manager.py | indranildchandra/UltimateCryptoChallenge2018 | 1 | 12799182 | <gh_stars>1-10
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.csrf import csrf_exempt
from django.apps import AppConfig
import os
import numpy as np
import json
import re
from enum import Enum
import tensorflow as tf
from tensorflow.contrib import learn
class EmailClasses(Enum):
recall = 0
status = 1
def getNoOfClasses():
return len(list(EmailClasses))
print(EmailClasses(0))
class Sentiment(AppConfig):
def __init__(self):
print("initialising module")
self.checkpoint_dir = "/home/anson/Desktop/hackathons/crypto/sentiment/runs/1528569664/checkpoints"
self.allow_soft_placement = True
self.log_device_placement = False
checkpoint_file = tf.train.latest_checkpoint(self.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=self.allow_soft_placement,
log_device_placement=self.log_device_placement)
self.sess = tf.Session(config=session_conf)
with self.sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(self.sess, checkpoint_file)
# Get the placeholders from the graph by name
self.input_x = graph.get_operation_by_name("input_x").outputs[0]
self.scores = graph.get_operation_by_name("output/scores").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
self.dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
self.predictions = graph.get_operation_by_name("output/predictions").outputs[0]
def ready(self):
""" Called by Django only once during startup
"""
# Initialize the auto reply model(should be launched only once)
# if not any(x in sys.argv for x in ['makemigrations', 'migrate']): # HACK: Avoid initialisation while migrate
#do something
print("In ready")
@csrf_exempt
def getResponse(self, request):
if request.method == "POST":
print("request")
print(request.body)
print(request.POST)
reqStr = str(request.body,'utf-8')
reqStrArr = reqStr.split()
reqStr = ' '.join(reqStrArr)
print("reqStr")
print(reqStr)
requestBody = json.loads(reqStr)
print(requestBody)
if requestBody['message'] is not None:
query = requestBody['message']
# Map data into vocabulary
vocab_path = os.path.join(self.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform([query])))
# batch_predictions = self.sess.run(self.predictions, {self.input_x: x_test, self.dropout_keep_prob: 1.0})
batch_scores = self.sess.run(self.scores, {self.input_x: x_test, self.dropout_keep_prob: 1.0})
# answer = batch_predictions[0]
scores = batch_scores[0]
scores = scores-np.min(scores)
print(scores)
scores = (scores*scores)/sum(scores*scores)
print(scores)
pred = np.argmax(scores)
score = scores[0] + scores[1]*-1
else:
score = 0
res = "Unable to classify request"
# return HttpResponse(answer)
# print(HttpResponse(res,content_type="text/plain",charset="utf-8"))
# print(HttpResponse(res))
# print(HttpResponse(res,content_type="text/plain",charset="utf-8").getvalue())
return HttpResponse(score,content_type="text/plain",charset="utf-8")
def getTemplate(self,_class, utr):
template = ""
if utr is None:
template = "No utr found"
else:
m = re.match(r'SBI', str(utr))
print("utr")
print(utr)
print("m")
print(m)
if m is None:
if _class == 1:
template = """
Dear Sir,\\n\\n
The status of your inward transaction is as follows:\\n
1. UTR No. =>XXXXXxxxxxxxxxxx\\n
2. Date =>15-03-2017\\n
3. Amount =>1234.00\\n
4. Sending IFSC =>XXXXX0176600\\n
5. Remitter A/c =>1111111111116452\\n
6. Remitter Name =>E456946\\n
7. Remitter Details =>ABC Cop DC Nagar\\n\\n\\n
Regards."""
else:
template = """
Dear Sir,\\n\\n
The amount credited to the account xxxxxx mentioned in the mail trail has been remitted back to the remitter account as requested.\\n
The details of the inward transaction are as follows:\\n
1. UTR No. =>XXXXXxxxxxxxxxxx\\n
2. Date =>15-03-2017\\n
3. Amount =>1234.00\\n
4. Sending IFSC =>XXXXX0176600\\n
5. Remitter A/c =>1111111111116452\\n
6. Remitter Name =>E456946\\n
7. Remitter Details =>ABC Cop DC Nagar\\n\\n
Regards."""
else:
if _class == 1:
template = """
Dear Sir,\\n\\n
The status of your outward transaction is as follows:\\n
1. UTR No. =>XXXXXxxxxxxxxxxx\\n
2. Date =>15-03-2017\\n
3. Amount =>1234.00\\n
4. Receiving IFSC =>XXXXX0176600\\n
5. Beneficiary A/c =>1111111111116452\\n
6. Beneficiary Name =>E456946\\n
7. Beneficiary Details =>ABC Cop DC Nagar\\n\\n
Regards."""
else:
template = """
Dear Sir,\\n\\n
The transaction to the account mentioned in the mail trail has been recalled.\\n
The details of the outward transaction are as follows:\\n
1. UTR No. =>XXXXXxxxxxxxxxxx\\n
2. Date =>15-03-2017\\n
3. Amount =>1234.00\\n
4. Receiving IFSC =>XXXXX0176600\\n
5. Beneficiary A/c =>1111111111116452\\n
6. Beneficiary Name =>E456946\\n
7. Beneficiary Details =>ABC Cop DC Nagar\\n\\n
Regards."""
return template
def saveLog(self, query, _class):
logFileQ= "server/Log/query.txt"
logFileL = "server/Log/labels.txt"
try:
with open(logFileQ,"a") as f:
f.write(query+"\n")
except Exception as e:
raise e
try:
with open(logFileL,"a") as f:
# noOfClasses = EmailClasses.getNoOfClasses()
f.write(EmailClasses(_class).name+"\n")
except Exception as e:
raise e
@csrf_exempt
def log(self, request):
if request.method == "POST":
print("request")
print(request.body)
#Java sends string encoded in this format
reqStr = str(request.body,'ISO 8859-1')
print("reqStr ISO")
print(reqStr)
# reqStr.replace(u'\xa0', u' ').encode('utf-8')
# reqStr = str(request.body,'utf-8')
reqStrArr = reqStr.split()
reqStr = ' '.join(reqStrArr)
print("reqStr")
print(reqStr)
requestBody = json.loads(reqStr)
print(requestBody)
logFile= "server/Log/log.txt"
try:
with open(logFile,"a") as f:
f.write(reqStr+"\n")
except Exception as e:
raise e
return HttpResponse("Success")
| 1.984375 | 2 |
Interactive GUI.py | Wyvernhunter345/my-python-code | 0 | 12799183 | <filename>Interactive GUI.py
from tkinter import *
from time import sleep
window = Tk()
window.title("Diamond Clicker")
back = Canvas(window, height=30, width=30)
back.pack()
diamonds = 0
def morediamonds():
global diamonds
diamonds += 1
print ("You have " + str(diamonds) + " diamonds!")
def cursorworking():
global diamonds
for x in range(20):
if diamonds < 15:
print ("Not enough diamonds!")
break
diamonds -= 15
diamonds += 1
print ("You now have " + str(diamonds) + " diamonds!")
sleep(1)
def minerworking():
global diamonds
diamonds -=15
Cursor = Button(window, text="Cursor: Clicks every second (Cost: 15). Lasts 20 seconds.", command=cursorworking)
PlusOneDiamonds = Button(window, text="+1 Diamond", command=morediamonds)
PlusOneDiamonds.pack()
Cursor.pack()
| 3.765625 | 4 |
singleton/singleton.py | majianfei/practice | 1 | 12799184 | def Singleton(cls):
"""装饰器,依赖闭包,python3以前没有nonlocal,所以需要定义为可变对象,比如,_instance={}"""
_instance = None
def wrap(*args, **kwargs):
nonlocal _instance
if _instance is None:
_instance = cls(*args, **kwargs)
return _instance
return wrap
class Singleton2():
"""继承,依赖类变量_instance,"""
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "_instance"):
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
class Singleton3(type):
"""MetaClass"""
def __call__(cls, *args, **kwargs):
if not hasattr(cls, "_instance"):
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
#@Singleton
#class TestClass(Singleton2):
class TestClass(metaclass = Singleton3):
def __init__(self):
print("TestClass init")
if __name__ == "__main__":
a = TestClass()
b = TestClass()
print("id of a:",id(a))
print("id of b:",id(b))
#https://segmentfault.com/q/1010000007818814
| 3.53125 | 4 |
apps/django_auth_system/__init__.py | ipodjke/django_authorization_system | 0 | 12799185 | <reponame>ipodjke/django_authorization_system<filename>apps/django_auth_system/__init__.py
default_app_config = 'django_auth_system.apps.UsersConfig'
| 1.09375 | 1 |
preprocessing/main.py | hatttruong/feature2vec | 0 | 12799186 | """Summary
Attributes:
Configer (TYPE): Description
"""
import logging
import argparse
from src.preprocess import *
from src.item_preprocessor import *
from src.configer import *
from src import tfidf
Configer = Configer('setting.ini')
logging.basicConfig(
# filename='log.log',
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
parser = argparse.ArgumentParser()
print(Configer.ip_address, Configer.port,
Configer.ssh_username, Configer.ssh_password)
print(Configer.db_name, Configer.db_username, Configer.db_password)
if __name__ == '__main__':
parser.add_argument(
'action',
choices=['define_concepts', 'update_chartevents',
'create_train_dataset', 'crawl_webpages',
'tfidf_medical_webpages', 'cluster', 'backup', 'restore',
'create_los_dataset'],
help='define action for preprocess'
)
parser.add_argument('-p', '--process', default=2, type=int,
help='number of process')
parser.add_argument(
'-cd', '--concept_dir', default='../data',
help='directory to store concept definition')
# options for create train data
parser.add_argument(
'-ed', '--export_dir',
help='directory to store train data (options for create train data)')
args = parser.parse_args()
if args.action == 'define_concepts':
define_concepts(output_dir=args.concept_dir,
processes=args.process)
elif args.action == 'update_chartevents':
update_chartevents_value(concept_dir=args.concept_dir)
elif args.action == 'create_train_dataset':
create_train_feature_dataset(export_dir=args.export_dir,
processes=args.process,
concept_dir=args.concept_dir)
elif args.action == 'create_los_dataset':
create_cvd_los_dataset(export_dir=args.export_dir,
concept_dir=args.concept_dir)
elif args.action == 'crawl_webpages':
# TODO: parameters
export_dir = '../data/webpages'
concept_dir = '../data'
crawl_webpages(concept_dir, export_dir)
elif args.action == 'tfidf_medical_webpages':
tfidf.train_tfidf(min_count=5, chunksize=5000, ngrams=(1, 1),
model_dir='../models')
elif args.action == 'cluster':
cluster()
elif args.action == 'backup':
backup_merge_data()
elif args.action == 'restore':
restore_merge_data()
| 2.03125 | 2 |
Searching Algorithms/linear_search.py | harsh793/Algorithms | 0 | 12799187 | """This program implements linear search algorithms having a time complexity of O[n].
It compares every element of the array to the key.
"""
def linear_search(array, key):
len_array = len(array)
t = None
for i in range(len_array):
if array[i] == key:
t = i
else:
pass
if t != None:
print("Found {} at position {} in the array.".format(key, t))
else:
print("{} not present in the array.".format(key))
array = list(map(int, input("Enter elements of array separated by space: ").split()))
key = input("Enter element to find: ")
linear_search(array, key)
| 4.21875 | 4 |
tacotron2_gst/data_utils.py | tilde-nlp/pip2-expressive-speech-synthesis-for-dialogs | 0 | 12799188 | <reponame>tilde-nlp/pip2-expressive-speech-synthesis-for-dialogs
"""
Adapted from:
- https://github.com/NVIDIA/tacotron2
- https://github.com/mozilla/TTS
"""
import random
from typing import List, Tuple
import torch
import numpy as np
import torch.utils.data
from tacotron2_gst import layers
from tacotron2_gst.text import text_to_sequence
from tacotron2_gst.utils import load_filepaths_and_text
from tacotron2_gst.audio_processing import load_wav_to_torch
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self, audiopaths_and_text: str, hparams):
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
self.text_cleaners = hparams.data.text_cleaners
self.max_wav_value = hparams.data.max_wav_value
self.sampling_rate = hparams.data.sampling_rate
self.load_mel_from_disk = hparams.data.load_mel_from_disk
self.stft = layers.TacotronSTFT(
hparams.data.filter_length, hparams.data.hop_length, hparams.data.win_length,
hparams.data.n_mel_channels, hparams.data.sampling_rate, hparams.data.mel_fmin,
hparams.data.mel_fmax)
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
self.use_speaker_embedding = hparams.use_speaker_embedding
if self.use_speaker_embedding:
self.speaker_ids = self.create_speaker_lookup_table(self.audiopaths_and_text)
def get_data_sample(self, audiopath_and_text: List) -> Tuple[torch.IntTensor, torch.Tensor, torch.Tensor]:
# separate filename and text
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
speaker_id = None
if self.use_speaker_embedding:
speaker_id = audiopath_and_text[2]
text = self.get_text(text)
mel = self.get_mel(audiopath)
if speaker_id is not None:
speaker_id = self.get_speaker_id(speaker_id)
return text, mel, speaker_id
def create_speaker_lookup_table(self, audiopaths_and_text):
speaker_ids = np.sort(np.unique([x[2] for x in audiopaths_and_text]))
d = {int(speaker_ids[i]): i for i in range(len(speaker_ids))}
print("Number of speakers :", len(d))
return d
def get_mel(self, filename: str) -> torch.Tensor:
if not self.load_mel_from_disk:
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = audio_norm.clone().detach()
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
melspec = torch.from_numpy(np.load(filename))
assert melspec.size(0) == self.stft.n_mel_channels, (
'Mel dimension mismatch: given {}, expected {}'.format(
melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_text(self, text: str) -> torch.IntTensor:
text_norm = torch.IntTensor(text_to_sequence(text, self.text_cleaners))
return text_norm
def get_speaker_id(self, speaker_id) -> torch.Tensor:
return torch.LongTensor([self.speaker_ids[int(speaker_id)]])
def __getitem__(self, index: int):
return self.get_data_sample(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate():
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step: int):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
use_speaker_embedding = batch[0][2] is not None
if use_speaker_embedding:
speaker_ids = torch.LongTensor(len(batch))
else:
speaker_ids = None
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1) - 1:] = 1
output_lengths[i] = mel.size(1)
if use_speaker_embedding:
speaker_ids[i] = batch[ids_sorted_decreasing[i]][2]
return text_padded, input_lengths, mel_padded, speaker_ids, gate_padded, output_lengths
| 2.671875 | 3 |
type_conversion.py | Amber-Pittman/python-practice | 0 | 12799189 | <filename>type_conversion.py
birth_year = input("What year were you born?")
age = 2019 - int(birth_year)
print(f"Your age is: {age}") | 3.875 | 4 |
method.py | swiftops/JUNIT_RESULT_AGGREGATION | 1 | 12799190 | <reponame>swiftops/JUNIT_RESULT_AGGREGATION<filename>method.py<gh_stars>1-10
import map
from pymongo import MongoClient
import requests
from flask import jsonify
import json
import logging
logging.basicConfig(level=logging.DEBUG)
remotevalue = map.remotevalue
jenkinsdata = {}
build_id = ''
giturl = map.giturl
headers = {map.token_name: map.token_value}
headers1 = {'content-type': 'application/json'}
def insertintomnogo(Xmldata, build_id):
try:
# insert parsed xml data into mongodb
CLIENT = MongoClient(map.DB_IP, map.DB_PORT)
MONGO_PERF_DB = CLIENT.perf_db
MONGO_PERF_DB.authenticate(map.DB_USERNAME, map.DB_PASSWORD)
MONGO_PERF_COLLECTION = MONGO_PERF_DB.junit_test_suite
# get commit id from jenkins server
jenkinsdata = getjenkinsdata(build_id)
logging.debug(" Jenkinsdata" + json.dumps(jenkinsdata))
# get commit detials from git server
gitdata = getgitcommitdata(jenkinsdata['commitid'])
logging.debug(" GitData" + json.dumps(gitdata))
CommitMessage = gitdata['message'].split('<')[1].split(':')[0]
query = {'CommitID': gitdata['id'], 'SHA': gitdata['short_id'], 'CommitMessage': gitdata['message'],
'AuthorName': gitdata['author_name'], 'Author_Email': gitdata['author_email'], 'BuildNumber': build_id,
'Branchname': jenkinsdata['branchname'], 'Ownercode': CommitMessage,
'URL': map.jenkins_public_url_prefix + build_id + map.jenkins_url_result,
'Junit_test': Xmldata}
MONGO_PERF_COLLECTION.insert_one(query)
# call defect creation service
resp = requests.post(map.defect_service_url, data=json.dumps(gitdata),
headers=headers1)
return resp.text
except Exception as e:
response = {
"success": "false",
"data": {
"Result": "Build Failed"
},
"error": {"Message": str(e)}
}
return jsonify(response)
def getjenkinsdata(build_id):
r = requests.get(map.jenkins_url_prefix + build_id + map.jenkins_url_postfix,
auth=(map.jenkins_username, map.jenkins_password))
data = r.json()
for item in data['actions']:
if 'parameters' in item:
jenkinsdata['branchname'] = item['parameters'][0]['value']
searchremotevalue = remotevalue + jenkinsdata['branchname']
for item in data['actions']:
if 'buildsByBranchName' in item:
if searchremotevalue in item['buildsByBranchName']:
jenkinsdata['commitid'] = item['buildsByBranchName'][searchremotevalue]['marked']['SHA1']
return jenkinsdata
def getgitcommitdata(commit_id):
response = requests.get(giturl+commit_id, headers = headers, proxies={'http': '10.0.10.251:<proxy_url>'},timeout=5)
return response.json()
def junit_nightlybuild_data(Xmldata, rel_no, build_no, junit_url, branch_name):
try:
# insert parsed xml data into mongodb for nightly build
CLIENT = MongoClient(map.DB_IP, map.DB_PORT)
MONGO_PERF_DB = CLIENT.perf_db
MONGO_PERF_DB.authenticate(map.DB_USERNAME, map.DB_PASSWORD)
MONGO_PERF_COLLECTION = MONGO_PERF_DB.junit_nightly_build
data = {'BranchName': branch_name, 'Release No': rel_no, 'Build No': build_no,
'JunitURL': junit_url, 'JunitData': Xmldata}
MONGO_PERF_COLLECTION.insert_one(data)
return 'SUCCESS'
except Exception as e:
response = {
"success": "false",
"data": {
"Result": "Build Failed To get data for nightly build"
},
"error": {"Message": str(e)}
}
return jsonify(response) | 2.453125 | 2 |
src/utilities/plot_utilities.py | m-rubik/Grow-Space | 2 | 12799191 | """!
All functions providing plotting functionalities.
"""
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import matplotlib.image as image
import pandas as pd
import re
import argparse
import datetime as dt
import numpy as np
from pandas.plotting import register_matplotlib_converters
from datetime import datetime
register_matplotlib_converters()
plt.rcParams.update({'font.size': 22})
environment_sensor_pattern = re.compile(r"([0-9-]+)\s([0-9:.]+):\stemperature:\s([0-9.]+),\sgas:\s([0-9]+),\shumidity:\s([0-9.]+),\spressure:\s([0-9.]+),\saltitude:\s([0-9.]+)", re.MULTILINE)
soil_moisture_pattern = re.compile(r"([0-9-]+)\s([0-9.:]+):\s\[([0-9]+),\s([0-9.]+),\s([0-9.]+)\]", re.MULTILINE)
def plot_soil_moisture(dict, past24):
"""!
Plots soil moisture data in simple line chart
@param dict: Dicitonary containing timestamps and associated readings.
"""
lists = sorted(dict.items())
x, y = zip(*lists)
fig, ax = plt.subplots()
ax.plot(x, y, 'k', linewidth=2)
fig.autofmt_xdate()
hours6 = mdates.HourLocator(interval=6)
hours3 = mdates.HourLocator(interval=3)
# im = image.imread('./icons/Grow_Space_Logo.png')
# fig.figimage(im, 300, 0, zorder=3, alpha=0.2)
ax.xaxis.set_minor_locator(hours3)
ax.tick_params(which='major', length=7, width=2, color='black')
ax.tick_params(which='minor', length=4, width=2, color='black')
ax.xaxis.set_major_locator(hours6)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d - %H'))
ax.grid()
plt.xlabel("Day - Hour")
plt.ylabel("Moisture Percentage (%)")
plt.title("Soil Moisture % vs Time")
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
if past24:
datemin = np.datetime64(x[-1], 'h') - np.timedelta64(24, 'h')
datemax = np.datetime64(x[-1], 'h')
ax.set_xlim(datemin, datemax)
plt.xlabel("Hour")
plt.title("Soil Moisture % Past 24 Hrs")
ax.xaxis.set_major_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.savefig('Moisture_vs_Time_24H.png', dpi=500)
plt.savefig('Moisture_vs_Time.png', dpi=500)
# plt.show()
def plot_temperature(dict, past24):
"""!
Plots temperature data in simple line chart
@param dict: Dicitonary containing timestamps and associated readings.
"""
lists = sorted(dict.items())
x, y = zip(*lists)
fig, ax = plt.subplots()
ax.plot(x, y, 'k', linewidth=2)
fig.autofmt_xdate()
hours6 = mdates.HourLocator(interval=6)
hours3 = mdates.HourLocator(interval=3)
# im = image.imread('./icons/Grow_Space_Logo.png')
# fig.figimage(im, 650, 0, zorder=3, alpha=0.2)
ax.xaxis.set_major_locator(hours6)
ax.xaxis.set_minor_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d - %H'))
ax.tick_params(which='major', length=7, width=2, color='black')
ax.tick_params(which='minor', length=4, width=2, color='black')
ax.grid()
plt.title("Temperature Over Time")
plt.xlabel("Time (Month-Day Hour)")
plt.ylabel("Temperature (°C)")
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
if past24:
datemin = np.datetime64(x[-1], 'h') - np.timedelta64(24, 'h')
datemax = np.datetime64(x[-1], 'h')
ax.set_xlim(datemin, datemax)
plt.xlabel("Hour")
plt.title('Temperature Past 24 Hrs')
ax.xaxis.set_major_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.savefig('Temperature_vs_Time_24H.png', dpi=500)
plt.savefig('Temperature_vs_Time.png', dpi=500)
# plt.show()
def boxplot_environment(df):
"""!
Creates a boxplot of all the relevant environment sensor data.
What is a boxplot?
Text from https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html:
The box extends from the Q1 to Q3 quartile values of the data, with a line at the median (Q2).
The whiskers extend from the edges of box to show the range of the data.
The position of the whiskers is set by default to 1.5 * IQR (IQR = Q3 - Q1) from the edges of the box.
Outlier points are those past the end of the whiskers.
@param df: dataframe object from which we generate a boxplot.
"""
df['VOC'] = df['VOC'].div(1000)
# with plt.style.context("seaborn"):
fig, ax = plt.subplots(1, 3)
fig.suptitle('Environment Sensor Data')
df.boxplot('Temperature', ax=ax[0])
df.boxplot('VOC', ax=ax[1], fontsize=12)
df.boxplot('Humidity', ax=ax[2])
ax[0].set_ylabel("Temperature (°C)")
ax[1].set_ylabel("VOC (kΩ)")
ax[2].set_ylabel("Humidity (%)")
plt.subplots_adjust(top=0.95)
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
plt.savefig('Environment_Boxplot.png', dpi=500)
# plt.show()
def extract_data_from_log(data, pattern):
"""!
Function for extracting data out of a log file using regex matching.
Returns all regex match objects.
@param data: Raw data from the log file.
@param pattern: Regex pattern to use for matching.
"""
matches = list()
for line in data:
matches.append(re.match(pattern, line))
return matches
def generate_plots(root="./logs/", soil_sensor_log="soil_moisture_sensor_1.txt", environment_sensor_log="environment_sensor.txt"):
# Plot soil moisture data
with open(root+soil_sensor_log, "r") as myfile:
data = myfile.readlines()
matches = extract_data_from_log(data, soil_moisture_pattern)
data_dict = dict()
for match in matches:
# current_val = float(match.group(4)) # Raw voltage reading
current_val = float(match.group(5)) # Percentage reading
index_time = match.group(1) + " " + match.group(2)
index_dt = dt.datetime.strptime(index_time, "%Y-%m-%d %H:%M:%S.%f")
data_dict[index_dt] = current_val
plot_soil_moisture(data_dict, True)
plot_soil_moisture(data_dict, False)
# Plot temperature data
with open(root+environment_sensor_log, "r") as myfile:
data = myfile.readlines()
matches = extract_data_from_log(data, environment_sensor_pattern)
data_dict = dict()
temperature_dict = dict()
data_dict['Temperature'] = {}
data_dict['VOC'] = {}
data_dict['Humidity'] = {}
for match in matches:
index_time = match.group(1) + " " + match.group(2)
index_dt = dt.datetime.strptime(index_time, "%Y-%m-%d %H:%M:%S.%f")
data_dict['Temperature'][index_dt] = float(match.group(3))
data_dict['VOC'][index_dt] = float(match.group(4))
data_dict['Humidity'][index_dt] = float(match.group(5))
plot_temperature(data_dict['Temperature'], True)
plot_temperature(data_dict['Temperature'], False)
# Plot environment sensor data
df = pd.DataFrame.from_dict(data_dict, orient='columns')
df.reset_index(inplace=True)
boxplot_environment(df)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-r', '--root', type=str, default="", help='Root filepath of the log data')
parser.add_argument('-s', '--soil', type=str, default="soil_moisture_sensor_1.txt", help='Name of soil moisture sensor log file')
parser.add_argument('-e', '--environment', type=str, default="environment_sensor.txt", help='Name of the envrionment sensor log file')
args = parser.parse_args()
if args.root:
root_folder = "./logs/"+args.root+"/"
else:
root_folder = "./logs/"
generate_plots(root_folder, args.soil, args.environment)
| 2.8125 | 3 |
lib/GenericsUtil/GenericsUtilClient.py | jsfillman/GenericsUtil | 0 | 12799192 | <reponame>jsfillman/GenericsUtil
# -*- coding: utf-8 -*-
############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
from __future__ import print_function
# the following is a hack to get the baseclient to import whether we're in a
# package or not. This makes pep8 unhappy hence the annotations.
try:
# baseclient and this client are in a package
from .baseclient import BaseClient as _BaseClient # @UnusedImport
except:
# no they aren't
from baseclient import BaseClient as _BaseClient # @Reimport
class GenericsUtil(object):
def __init__(
self, url=None, timeout=30 * 60, user_id=None,
password=<PASSWORD>, token=<PASSWORD>, ignore_authrc=False,
trust_all_ssl_certificates=False,
auth_svc='https://kbase.us/services/authorization/Sessions/Login'):
if url is None:
raise ValueError('A url is required')
self._service_ver = None
self._client = _BaseClient(
url, timeout=timeout, user_id=user_id, password=password,
token=token, ignore_authrc=ignore_authrc,
trust_all_ssl_certificates=trust_all_ssl_certificates,
auth_svc=auth_svc)
def import_csv(self, params, context=None):
"""
:param params: instance of type "ImportCSVParams" -> structure:
parameter "file" of type "File" (Import a CSV file into a NDArray
or HNDArray. "File" and "usermeta" are common to all import
methods.) -> structure: parameter "path" of String, parameter
"shock_id" of String, parameter "workspace_name" of String,
parameter "object_name" of String, parameter "object_type" of
String, parameter "metadata" of type "usermeta" -> mapping from
String to String
:returns: instance of type "ImportResult" -> structure: parameter
"object_ref" of String
"""
return self._client.call_method(
'GenericsUtil.import_csv',
[params], self._service_ver, context)
def import_obo(self, params, context=None):
"""
:param params: instance of type "ImportOBOParams" (Import an OBO file
into an OntologyDictionary) -> structure: parameter "file" of type
"File" (Import a CSV file into a NDArray or HNDArray. "File" and
"usermeta" are common to all import methods.) -> structure:
parameter "path" of String, parameter "shock_id" of String,
parameter "workspace_name" of String, parameter "object_name" of
String, parameter "metadata" of type "usermeta" -> mapping from
String to String
:returns: instance of type "ImportResult" -> structure: parameter
"object_ref" of String
"""
return self._client.call_method(
'GenericsUtil.import_obo',
[params], self._service_ver, context)
def export_csv(self, params, context=None):
"""
:param params: instance of type "ExportParams" (Exporter for generic
objects as CSV files) -> structure: parameter "input_ref" of String
:returns: instance of type "ExportResult" -> structure: parameter
"shock_id" of String
"""
return self._client.call_method(
'GenericsUtil.export_csv',
[params], self._service_ver, context)
def list_generic_objects(self, params, context=None):
"""
:param params: instance of type "ListGenericObjectsParams" (List
generic objects in one or more workspaces optional parameters:
allowed_object_types - limits to specific types of object, e.g.,
KBaseGenerics.NDArray (version number is optional)
allowed_data_types - limits to specific data types, e.g.,
microbial growth allowed_scalar_types - limits to specific scalar
types, e.g., object_ref, int, float (see KBaseGenerics.spec for
valid types). HNDArrays must have at least one dimension that
passes. min_dimensions - limits to generics with minimum number of
dimensions max_dimensions - limits to generics with max number of
dimensions limit_mapped - if 0 (or unset) returns all objects. if
1, returns only mapped objects. if 2, returns only umapped
objects) -> structure: parameter "workspace_names" of list of
String, parameter "allowed_object_types" of list of String,
parameter "allowed_data_types" of list of String, parameter
"allowed_scalar_types" of list of String, parameter
"min_dimensions" of Long, parameter "max_dimensions" of Long,
parameter "limit_mapped" of Long
:returns: instance of type "ListGenericObjectsResult" -> structure:
parameter "object_ids" of list of String
"""
return self._client.call_method(
'GenericsUtil.list_generic_objects',
[params], self._service_ver, context)
def get_generic_metadata(self, params, context=None):
"""
:param params: instance of type "GetGenericMetadataParams" (Get
metadata describing the dimensions of one or more generic objects)
-> structure: parameter "object_ids" of list of String
:returns: instance of type "GetGenericMetadataResult" (maps object
ids to structure with metadata) -> structure: parameter
"object_info" of mapping from String to type "GenericMetadata"
(Basic metadata about an object: object_type - e.g.,
KBaseGenerics.HNDArray‑4.0 data_type - e.g., microbial growth
n_dimensions - number of dimensions is_mapped - 0 or 1 indicating
mapped status value_types - list of value types in the object
(there will only be 1 for NDArray objects), e.g., "specific
activity" scalar_types - list of scalar types in the object (there
will only be 1 for NDArray objects), e.g., "float" dimension_types
- a string describing each dimension (e.g., "media name")
dimension_sizes - size (length) of each dimension
dimension_value_types - a string describing each context of each
dimension (e.g., "media name") dimension_scalar_types - type of
values in each context of each dimension (e.g., "int")) ->
structure: parameter "object_type" of String, parameter
"data_type" of String, parameter "n_dimensions" of Long, parameter
"is_mapped" of type "boolean", parameter "value_types" of list of
String, parameter "scalar_types" of list of String, parameter
"dimension_types" of list of String, parameter "dimension_sizes"
of list of Long, parameter "has_unique_subindices" of list of type
"boolean", parameter "dimension_value_types" of list of list of
String, parameter "dimension_scalar_types" of list of list of
String
"""
return self._client.call_method(
'GenericsUtil.get_generic_metadata',
[params], self._service_ver, context)
def get_generic_dimension_labels(self, params, context=None):
"""
:param params: instance of type "GetGenericDimensionLabelsParams"
(gets labels for list of dimension axes for a generic object. User
will pass in the numeric indices of all dimensions they care about
(e.g., 1/1 will mean 1st dimension, 1st data type, 2/1 = 2nd
dimension, 1st data type), and an optional flag,
convert_to_string. The API will return a hash mapping each of the
dimension indices to a Values object. The Values will either
contain the scalar type in the original format, or if the
convert_to_string flag is set, will convert the scalar type to
strings. If unique_values is set, the API will only return the
unique values in each dimension (these will also be re-indexed,
but not resorted, so the Values array may be a different length).)
-> structure: parameter "object_id" of String, parameter
"dimension_ids" of list of String, parameter "convert_to_string"
of type "boolean", parameter "unique_values" of type "boolean"
:returns: instance of type "GetGenericDimensionLabelsResult" ->
structure: parameter "dimension_labels" of mapping from String to
type "Values" (@optional object_refs oterm_refs int_values
float_values string_values boolean_values) -> structure: parameter
"scalar_type" of type "data_type", parameter "object_refs" of list
of type "object_ref", parameter "oterm_refs" of list of type
"oterm_ref", parameter "int_values" of list of Long, parameter
"float_values" of list of Double, parameter "boolean_values" of
list of type "boolean", parameter "string_values" of list of String
"""
return self._client.call_method(
'GenericsUtil.get_generic_dimension_labels',
[params], self._service_ver, context)
def get_generic_data(self, params, context=None):
"""
:param params: instance of type "GetGenericDataParams" (gets subset
of generic data as a 2D matrix Users passes in the dimension
indices to use as variables (1st one must be X axis; additional
variables will lead to additional series being returned). User
selects which dimension indices to fix to particular constants.
This can be done one of two ways: either by fixing an entire
dimension (e.g., "2" for the 2nd dimension) to an index in the
complete list of labels, or by fixing a dimension index (e.g.,
"2/3" for the 3rd type of values in the 2nd dimension) to an index
in the list of unique labels for that dimension index. returns:
series_labels will show which variable index values correspond to
which series values_x will contain 1 list of of x-axis values per
series. The number of series depends on the number of variable
dimensions. values_y will contain 1 list of of y-axis values per
series. The number of series depends on the number of variable
dimensions. In each series, values where either the X and Y data
are null are removed.) -> structure: parameter "object_id" of
String, parameter "variable_dimension_ids" of list of String,
parameter "constant_dimension_ids" of mapping from String to Long
:returns: instance of type "GetGenericDataResult" -> structure:
parameter "series_labels" of list of String, parameter "values_x"
of list of type "Values" (@optional object_refs oterm_refs
int_values float_values string_values boolean_values) ->
structure: parameter "scalar_type" of type "data_type", parameter
"object_refs" of list of type "object_ref", parameter "oterm_refs"
of list of type "oterm_ref", parameter "int_values" of list of
Long, parameter "float_values" of list of Double, parameter
"boolean_values" of list of type "boolean", parameter
"string_values" of list of String, parameter "values_y" of list of
type "Values" (@optional object_refs oterm_refs int_values
float_values string_values boolean_values) -> structure: parameter
"scalar_type" of type "data_type", parameter "object_refs" of list
of type "object_ref", parameter "oterm_refs" of list of type
"oterm_ref", parameter "int_values" of list of Long, parameter
"float_values" of list of Double, parameter "boolean_values" of
list of type "boolean", parameter "string_values" of list of String
"""
return self._client.call_method(
'GenericsUtil.get_generic_data',
[params], self._service_ver, context)
def status(self, context=None):
return self._client.call_method('GenericsUtil.status',
[], self._service_ver, context)
| 1.976563 | 2 |
dispatcher.py | IakovTaranenko/theia | 0 | 12799193 | <reponame>IakovTaranenko/theia<filename>dispatcher.py
import discord, aiohttp, os, asyncio, colorama
from colorama import Fore
import video, image, audio, config
colorama.init()
async def dispatch(msg: discord.Message, ctx):
msgLower = msg.content.lower()
if msg.attachments:
for attachment in msg.attachments:
for attachment in msg.attachments:
header = header_type(attachment.url)
print(f'{Fore.WHITE}[ATTACHMENT FOUND] type: {attachment.content_type}')
if header == 'VIDEO':
await video.process_file(await save_attachment(attachment, msg), msg, ctx, attachment)
elif header == 'GIF':
await video.process_file(await save_attachment(attachment, msg), msg, ctx)
elif header == 'PICTURE':
await image.process_file(await save_attachment(attachment, msg), msg, ctx)
elif header == 'AUDIO':
await audio.process_file(await save_attachment(attachment, msg), msg, ctx)
else:
print(f'{Fore.RED}[DISPATCHER] failed to get file type from attachment{Fore.WHITE}')
elif msg.embeds or (msgLower.__contains__('cdn.') or msgLower.__contains__('media.')):
pass
''' for embed in msg.embeds:
if header_type(msgLower) is not None:
header = header_type(msgLower)
if header == 'VIDEO':
print(await save_embed(embed, msg))
video.process_file(embed, True)
elif header == 'GIF':
print(await save_embed(embed, msg))
video.process_file(embed, False)
elif header == 'PICTURE':
print(await save_embed(embed, msg))
image.process_file(embed)
elif header == 'AUDIO':
print(await save_embed(embed, msg))
audio.process_file(embed)
else:
print('failed to get file type from attachment')
else:
await msg.reply('good embed')
print(msgLower) '''
def header_type(attachment_url):
for key in config.FILE_HEADERS.keys():
for header in config.FILE_HEADERS[key]:
if attachment_url.__contains__(header):
return key
return None
async def save_attachment(attachment, msg):
if attachment.url and attachment.id:
await attachment.save(f'temp/{msg.id}{attachment.filename}')
return(f'temp/{msg.id}{attachment.filename}')
else:
print(f'{Fore.RED}[ATTACHMENT SAVING] attachment save requested did not have a .url or a .id{Fore.WHITE}')
async def save_embed(embed, msg):
if embed.url:
async with aiohttp.ClientSession() as session:
async with session.get(embed.url) as resp:
if resp.status == 200:
attachment_name = os.path.basename(embed.url)
with open(f'temp/{msg.id}', "wb") as file:
file.write(await resp.read())
else:
print('[EMBED SAVING] embed save requested did not have any content attached to it') | 2.3125 | 2 |
utils/feature_map.py | ankyhe/coursera-quiz-assignment | 0 | 12799194 | from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(6)
def map(x):
return poly.fit_transform(x)
| 1.960938 | 2 |
intpy/__init__.py | claytonchagas/intpyplus | 0 | 12799195 | from .intpy import deterministic
from .data_access import get_cache_data, create_entry
name="int_py" | 1.21875 | 1 |
liberaction/sales/models.py | Computeiros-Estonia/liberaction-api | 0 | 12799196 | <gh_stars>0
from django.db import models
from liberaction.users.models import User
from liberaction.core.models import BaseProduct
class Cart(models.Model):
is_open = models.BooleanField(default=True, verbose_name='Carrinho em aberto', help_text='Determina se a compra do carrinho está em aberto.')
class Meta:
verbose_name = 'carrinho de compras'
verbose_name_plural = 'carrinhos de compras'
def __str__(self):
return f'Carrinho #{self.id}'
def get_items(self):
return CartItem.objects.filter(cart=self)
class CartItem(models.Model):
cart = models.ForeignKey(Cart, on_delete=models.CASCADE, verbose_name='carrinho de compras')
product = models.ForeignKey(BaseProduct, on_delete=models.SET_NULL, null=True, verbose_name='produto')
product_count = models.IntegerField('unidades')
class Meta:
verbose_name = 'item do carrinho'
verbose_name_plural = 'itens dos carrinhos'
def __str__(self):
return f'Cart #{self.cart.id} - {self.product}'
class Sale(models.Model):
buyer = models.ForeignKey(User,on_delete=models.SET_NULL, null=True)
cart = models.OneToOneField(Cart, on_delete=models.CASCADE, verbose_name='carrinho de compras')
freight = models.FloatField('frete')
class Meta:
verbose_name = 'venda'
verbose_name_plural = 'vendas'
def __str__(self):
return f'Sale #{self.id} - {self.buyer}'
def get_items(self):
return self.cart.get_items()
def get_subtotal(self):
subtotal = 0
for i in self.get_items():
subtotal += i.product.price
return subtotal
def get_total(self):
return self.get_subtotal() + self.freight
| 2.015625 | 2 |
christelle/migrations/0005_rename_nom_contact_name.py | OBAMARIE13/portfolios | 0 | 12799197 | <reponame>OBAMARIE13/portfolios<filename>christelle/migrations/0005_rename_nom_contact_name.py<gh_stars>0
# Generated by Django 3.2.7 on 2021-10-22 10:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('christelle', '0004_remove_about_fonction'),
]
operations = [
migrations.RenameField(
model_name='contact',
old_name='nom',
new_name='name',
),
]
| 1.65625 | 2 |
Linear-Programing-Optimizing/Car-Factory-Problem.py | aminzayer/Amin-University-Data-Science | 2 | 12799198 | <reponame>aminzayer/Amin-University-Data-Science<filename>Linear-Programing-Optimizing/Car-Factory-Problem.py
# Import pulp
from pulp import *
# Create an Instance of LpProblem
problem = LpProblem('Car Factory', LpMaximize)
# Create Decision Variables
A = LpVariable('Car A', lowBound=0, cat=LpInteger)
B = LpVariable('Car B', lowBound=0, cat=LpInteger)
#Objective Function
problem += 20000*A + 45000*B, 'Objective Function'
#Constraints
problem += 4*A + 5*B <= 30, 'Designer Constraint'
problem += 3*A + 6*B <= 30, 'Engineer Constraint'
problem += 2*A + 7*B <= 30, 'Machine Constraint'
# Car_Profit:
# MAXIMIZE
# 20000*Car_A + 45000*Car_B + 0
# SUBJECT TO
# Designer_Constraint: 4 Car_A + 5 Car_B <= 30
# Engineer_Constraint: 3 Car_A + 6 Car_B <= 30
# Machine_Constraint: 2 Car_A + 7 Car_B <= 30
# VARIABLES
# 0 <= Car_A Integer
# 0 <= Car_B Integer
print(problem)
print("Current Status: ", LpStatus[problem.status])
problem.solve()
print("Number of Car A Made: ", A.varValue)
print("Number of Car B Made: ", B.varValue)
print("Total Profit: ", value(problem.objective))
| 3.65625 | 4 |
education/constants.py | photoboard/photoboard-django | 0 | 12799199 | from django.utils.translation import gettext as _
DAY_CHOICES = (
(1, _('Monday')),
(2, _('Tuesday')),
(3, _('Wednesday')),
(4, _('Thursday')),
(5, _('Friday')),
(6, _('Saturday')),
(7, _('Sunday')),
) | 1.890625 | 2 |
plugins/polio/migrations/0017_campaign_gpei_email.py | BLSQ/iaso-copy | 29 | 12799200 | # Generated by Django 3.1.12 on 2021-07-12 09:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("polio", "0016_config"),
]
operations = [
migrations.AddField(
model_name="campaign",
name="gpei_email",
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
| 1.609375 | 2 |
tools/checkerBoard.py | valette/FROG | 15 | 12799201 | <filename>tools/checkerBoard.py
import SimpleITK as sitk
import sys
gridSpacing = 30;
file = sys.argv[1]
image = sitk.ReadImage( file )
size = image.GetSize();
black = sitk.Image( size, sitk.sitkUInt8 )
black.SetOrigin( image.GetOrigin() )
spacing = image.GetSpacing();
black.SetSpacing( spacing )
black.SetDirection( image.GetDirection() )
threshold = sitk.ThresholdImageFilter()
threshold.SetLower( 10 )
threshold.SetOutsideValue ( 100 )
white = threshold.Execute( black )
threshold.SetOutsideValue ( 50 )
grey = threshold.Execute( black )
checker = sitk.CheckerBoardImageFilter();
pattern = [ 0, 0, 0 ]
for i in [ 0, 1, 2 ] :
pattern[ i ] = int( size[ i ] * spacing[ i ] / gridSpacing )
pattern[ 0 ] = 1
print pattern
checker.SetCheckerPattern( pattern );
board = checker.Execute( grey, white );
sitk.WriteImage( board , "output.nii.gz" )
| 2.734375 | 3 |
marketlearn/portfolio/__init__.py | mrajancsr/QuantEquityManagement | 2 | 12799202 | from marketlearn.portfolio.asset import Asset # noqa
from marketlearn.portfolio.harry import Harry # noqa
| 1.039063 | 1 |
bloodytracker/database.py | the10ccm/bloodytracker | 0 | 12799203 | import os
import sqlite3
import random
import string
import time
import datetime
from datetime import timedelta
import operator
from tabulate import tabulate
import config
TS_GROUP_BY = dict(
timestamp=0b10000,
project=0b1000,
task=0b0100,
track=0b0010,
date=0b0001
)
class Database:
def init_db(self, db_path):
self.conn = sqlite3.connect(
db_path,
detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES
)
self.conn.row_factory = sqlite3.Row
self.conn.text_factory = lambda x: x.decode('utf8')
#try:
#except sqlite3.OperationalError:
self.cursor = self.conn.cursor()
def close_db(self):
self.conn.close()
def create_db(self):
self.cursor.execute("PRAGMA foreign_keys = ON")
# Create Tables if do the not exist
# PROJECTS
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS Projects('
' id INTEGER PRIMARY KEY, '
' customer_id INTEGER, '
' name VARCHAR UNIQUE COLLATE NOCASE, '
' description TEXT DEFAULT "", '
' created TIMESTAMP'
')')
# TASKS
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS Tasks('
' id INTEGER PRIMARY KEY, '
' project_id INTEGER REFERENCES Projects(id) ON DELETE CASCADE, '
' name VARCHAR COLLATE NOCASE, '
' description TEXT DEFAULT ""'
')')
# TRACKS
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS Tracks('
' id INTEGER PRIMARY KEY, '
' task_id INTEGER REFERENCES Tasks(id) ON DELETE CASCADE, '
' started TIMESTAMP, '
' finished TIMESTAMP, '
' is_billed INTEGER DEFAULT 1'
')')
# CUSTOMERS
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS Customers('
'id INTEGER PRIMARY KEY, '
'name VARCHAR UNIQUE COLLATE NOCASE, '
'description TEXT, '
'created TIMESTAMP'
')')
self.conn.commit()
def __init__(self, db_name):
# create DB
self.init_db(db_name)
self.create_db()
def insert_test_task(self, project_id):
name = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(3))
self.cursor.execute(
"insert into Tasks ('name', 'project_id') "
"values('%s', '%s')" % (name, project_id)
)
self.conn.commit()
return self.cursor.lastrowid
def fill(self):
"""Fill with the test tasks"""
self.cursor.execute('DELETE FROM Customers')
self.cursor.execute('DELETE FROM Projects')
self.cursor.execute('DELETE FROM Tasks')
self.cursor.execute('DELETE FROM Tracks')
# Add a Customer
self.cursor.execute(
"insert into Customers ('name', 'description') "
"VALUES ('Andrey', 'Customer Numer One')")
self.cursor.execute("SELECT * FROM Customers ORDER BY id LIMIT 1")
customers = self.cursor.fetchone()
#print('filled customers', customers)
# Add a Project
self.create_project('p1', 'Test Project #1')
self.cursor.execute("SELECT * FROM Projects ORDER BY id LIMIT 1")
project = self.cursor.fetchone()
#print('filled projects', project)
# Add the Task
last_task = self.insert_test_task(project_id=1)
# Add the Tracks
started = datetime.datetime.now() - timedelta(days=4)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=3601))
self.create_track(last_task, started=started+timedelta(seconds=13600),
finished=started+timedelta(seconds=14600))
self.create_track(last_task, started=started+timedelta(seconds=15600),
finished=started+timedelta(seconds=16600))
last_task = self.insert_test_task(project_id=1)
self.create_track(last_task, started=started+timedelta(seconds=17600),
finished=started+timedelta(seconds=18600))
self.create_track(last_task, started=started+timedelta(seconds=19600),
finished=started+timedelta(seconds=20600))
# Add a Project #2
self.create_project('p2', 'Test Project #1')
self.cursor.execute("SELECT * FROM Projects ORDER BY id LIMIT 1")
project = self.cursor.fetchone()
#print('filled projects', project)
# Add the Task
tasks = []
last_task = self.insert_test_task(project_id=2)
self.create_track(last_task, started=started+timedelta(seconds=21600),
finished=started+timedelta(seconds=22600))
self.create_track(last_task, started=started+timedelta(seconds=23600),
finished=started+timedelta(seconds=24600))
self.create_track(last_task, started=started+timedelta(seconds=25600),
finished=started+timedelta(seconds=26600))
started = datetime.datetime.now() - timedelta(days=3)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=3600))
started = datetime.datetime.now() - timedelta(days=2)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=3600))
started = datetime.datetime.now() - timedelta(days=1)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=3600))
started = datetime.datetime.now() - timedelta(seconds=3300)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=600))
last_track = self.create_track(last_task)
self.cursor.execute("SELECT * FROM Tracks ")
tracks = self.cursor.fetchall()
#print('filled tracks', tracks)
print(tabulate(tracks, ['Track id', 'Task id', 'started', 'finished', 'billed'],
tablefmt='simple'))
return
# CUSTOMERS
def get_customer(self, customer):
self.cursor.execute(
"SELECT id, name FROM Customers "
"WHERE name == '{name:s}'".format(name=customer)
)
customer = self.cursor.fetchone()
return customer
def get_customer_or_create(self, customer):
self.cursor.execute(
"SELECT id, name FROM Customers "
"WHERE name == '{name:s}'".format(name=customer)
)
customer = self.cursor.fetchone()
if customer:
return customer
self.cursor.execute(
"INSERT INTO Customers ('name')"
"VALUES ('{name:s}')"
.format(name=customer)
)
self.conn.commit()
# PROJECTS
def get_project_by_name(self, pname):
self.cursor.execute(
"SELECT "
" id as pid, name as pname, created as created, "
" description as description "
"FROM Projects "
"WHERE "
" Projects.name == ?", (pname.encode('utf8'),)
)
return self.cursor.fetchone()
def update_project(self, pid, name, description):
"""Updates a project"""
self.cursor.execute(
"UPDATE Projects "
"SET name=?, description=?"
"WHERE id=?", (name.encode('utf8'), description.encode('utf8'),
pid)
)
self.conn.commit()
def is_project_existent(self, pname, pid):
"""Checks if project already exists """
self.cursor.execute(
"SELECT "
" id as pid, name as name, created as created, "
" description as description "
"FROM Projects "
"WHERE "
" pid == '{pid}'"
" name == '{name}'".format(name=pname.encode('utf8'), pid=pid)
)
return self.cursor.fetchone()
def get_projects_with_activity_field(self, from_date='', to_date='', limit=0):
"""Get list of project including a field is a project is finished"""
where_clause = first_limit_clause = last_limit_clause = ''
if limit:
first_limit_clause = "SELECT * FROM ("
last_limit_clause = " DESC LIMIT %d) ORDER BY pid ASC" % limit
if from_date and to_date:
where_clause = " AND DATE(Projects.created) BETWEEN '{from_date}' " \
"AND '{to_date}' ".format(from_date=from_date,
to_date=to_date)
self.cursor.execute(
"{first_limit_clause}"
"SELECT "
" Projects.id as pid, Projects.name, Projects.created, "
" Projects.description, "
" SUM(CASE WHEN Tracks.finished == '' THEN 1 ELSE 0 end) AS active "
"FROM Projects, Tracks, Tasks "
"WHERE "
" Tasks.project_id == Projects.id AND "
" Tracks.task_id == Tasks.id {where_clause}"
"GROUP BY Projects.id "
"UNION SELECT "
" Projects.id as pid, Projects.name, Projects.created,"
" Projects.description, '' as active "
"FROM Projects "
"WHERE NOT EXISTS ("
" SELECT id FROM Tasks WHERE "
" Tasks.project_id == Projects.id "
") {where_clause}"
"ORDER BY Projects.id {last_limit_clause}".format(
where_clause=where_clause, first_limit_clause=first_limit_clause,
last_limit_clause=last_limit_clause)
)
return self.cursor.fetchall()
def create_project(self, pname, description=''):
"""Create a project"""
self.cursor.execute(
"INSERT INTO Projects ('name', 'description', created)"
"VALUES (?, ?, ?)", (
pname.encode('utf8'),
description.encode('utf8'),
str(datetime.datetime.now())
)
)
self.conn.commit()
return self.cursor.lastrowid
def get_project_or_create(self, pname):
self.cursor.execute(
"SELECT id, name FROM Projects "
"WHERE name == '{name:s}'".format(name=pname.encode('utf8'))
)
project = self.cursor.fetchone()
if project:
return project
return self.create_project(name)
def delete_project_by_name(self, pname):
self.cursor.execute(
"DELETE FROM Projects WHERE name == '{name}'"
"".format(name=pname.encode('utf8')))
self.conn.commit()
# TASKS
def get_tasks(self, limit=10, add_activity=False):
"""Lists of last tasks"""
activity_field = ''
if add_activity:
activity_field = ", SUM(CASE WHEN Tracks.finished == '' THEN 1 ELSE 0 END) "
self.cursor.execute(
"SELECT "
" Tasks.id, Tasks.name, Projects.id, Projects.name, "
" Tasks.description {activity_field}"
"FROM Tasks, Projects, Tracks "
"WHERE "
" Tasks.project_id == Projects.id AND "
" Tracks.task_id == Tasks.id "
"GROUP BY Tasks.id "
"ORDER BY Tasks.id DESC LIMIT {limit:d}".format(
limit=limit, activity_field=activity_field)
)
tasks = self.cursor.fetchall()
return tasks
def get_profiled_tasks(self, started='', finished='', limit=0):
"""The list of last tasks between dates including unfinished"""
where_clause = first_limit_clause = last_limit_clause = ''
if started and finished:
where_clause = str(
"WHERE DATE(Tracks.started) BETWEEN '{started}' AND '{finished}'"
"".format(started=started, finished=finished))
if limit:
first_limit_clause = "SELECT * FROM ("
last_limit_clause = " DESC LIMIT %d) ORDER BY tid ASC" % limit
self.cursor.execute(
"{first_limit_clause}"
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.id as pid, "
" Projects.name as pname, Tasks.description as description, "
" Tracks.started as started, Tracks.finished as finished "
"FROM Tasks, Projects, Tracks "
"WHERE "
" Tasks.project_id == Projects.id AND "
" Tracks.task_id == Tasks.id AND "
" Tracks.id IN ("
" SELECT MAX(Tracks.id) FROM Tracks "
" {where_clause} "
" GROUP BY Tracks.task_id "
" ) ORDER BY tid {last_limit_clause}"
"".format(
where_clause=where_clause,
first_limit_clause=first_limit_clause,
last_limit_clause=last_limit_clause)
)
tasks = self.cursor.fetchall()
return tasks
def get_task_by_alias(self, tname, pname):
"""Get task by name"""
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.id as pid, "
" Projects.name as pname, Tasks.description as description "
"FROM Tasks, Projects "
"WHERE "
" Tasks.project_id == pid AND "
" tname == '{task:s}' AND "
" pname == '{project:s}'"
"".format(task=tname.encode('utf8'), project=pname.encode('utf8'))
)
return self.cursor.fetchone()
def create_task(self, name, pid):
self.cursor.execute(
"INSERT INTO Tasks ('name', 'project_id') "
"VALUES "
" (?, ?)", (
name.encode('utf8'),
pid
)
)
self.conn.commit()
return self.cursor.lastrowid
def get_task_or_create(self, name, project_id):
"""Get a task or create one"""
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.id as pid, "
" Projects.name as pname, Tasks.description as description "
"FROM Tasks, Projects "
"WHERE "
" tname == '{task}' AND "
" Tasks.project_id == pid AND "
" pid == '{project!s}'"
"".format(task=name.encode('utf8'), project=project_id)
)
last = self.cursor.fetchone()
if last:
return last['tid']
return self.create_task(name, project_id)
def _get_active_tasks(self):
"""Get active tasks"""
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" Tracks.id as track_id, Tracks.started as started, "
" Tracks.finished as finished, "
" Tasks.description as description "
"FROM Tracks, Tasks, Projects "
"WHERE "
" Tracks.task_id == Tasks.id AND "
" Tasks.project_id == Projects.id AND "
" finished == ''")
return self.cursor.fetchall()
def get_active_task(self, started='', finished='', tname='', pname=''):
"""Get an active task"""
params = []
where_date_clause = where_project_clause = where_task_clause = ''
if tname:
tname = tname.encode('utf8')
where_task_clause = "tname == ? AND "
params.append(tname)
if pname:
pname = pname.encode('utf8')
where_project_clause = "pname == ? AND "
params.append(pname)
if started and finished:
where_date_clause = "AND DATE(Tracks.started) " \
" BETWEEN ? " \
" AND ? "
params.extend([started, finished])
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" Tracks.id as track_id, Tracks.started as started, "
" Tracks.finished as finished, "
" Tasks.description as description "
"FROM Tracks, Tasks, Projects "
"WHERE "
" {where_task_clause}"
" {where_project_clause}"
" Tracks.task_id == Tasks.id AND "
" Tasks.project_id == Projects.id AND "
" finished == '' "
" {where_date_clause}".format(
where_date_clause=where_date_clause,
where_project_clause=where_project_clause,
where_task_clause=where_task_clause
), params
)
return self.cursor.fetchone()
def update_task(self, tid, name, description=''):
"""Updates the task info"""
self.cursor.execute(
"UPDATE Tasks "
"SET name=?, description=?"
"WHERE id=?", (
name.encode('utf8'),
description.encode('utf8'),
tid
)
)
self.conn.commit()
def delete_task(self, tid):
""""""
self.cursor.execute(
"DELETE FROM Tasks WHERE id == '{tid}'".format(tid=tid))
self.conn.commit()
# TRACKS
def get_tracks_by_date(self, started='', finished='', also_unfinished=False):
"""Get tracks"""
where_clause = ''
between_clause = ''
params = []
if not also_unfinished:
where_clause = "AND NOT finished == '' "
if started and finished:
between_clause = "AND DATE(started) BETWEEN ? AND ?"
params.extend([started, finished])
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, "
" Projects.id as pid, Projects.name as pname, "
" Tracks.id as trid, Tracks.started as started, "
" Tracks.finished as finished, "
" Tracks.is_billed as is_billed "
"FROM Tracks, Tasks, Projects "
"WHERE "
" Tracks.task_id == tid AND "
" Tasks.project_id == pid"
" {where_clause} "
" {between_clause} "
"ORDER BY Tracks.id".format(started=started,
finished=finished,
where_clause=where_clause,
between_clause=between_clause),
params
)
return self.cursor.fetchall()
def get_track_by_id(self, tid):
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" Tracks.id as trid, Tracks.started as started, "
" Tracks.finished as finished, "
" Tracks.is_billed as is_billed "
"FROM Tracks, Tasks, Projects "
"WHERE "
" Tracks.task_id == tid AND "
" Tasks.project_id == Projects.id AND "
" trid == %d" % tid
)
return self.cursor.fetchone()
def create_track(self, task_id, started='', finished='', is_billed=True):
# started, finished - 9-item sequence, not float
if not started:
started = datetime.datetime.now()
self.cursor.execute(
"INSERT INTO Tracks "
" ('task_id', 'started', 'finished', 'is_billed') "
"VALUES (?, ?, ?, ?)", (task_id, started, finished, int(is_billed))
)
self.conn.commit()
return self.cursor.lastrowid
def finish_track(self, track_id, started=None):
finished = datetime.datetime.now()
if started and config.BT_TIMESHEET_ROUNDING and config.BT_ROUNDING_INCREMENT:
delta = finished - started
round_to = config.BT_ROUNDING_INCREMENT * 60
seconds = round_to - delta.seconds % round_to
finished = finished + datetime.timedelta(seconds=seconds)
self.cursor.execute(
"UPDATE Tracks SET finished=? WHERE id=?", (finished, track_id)
)
self.conn.commit()
return finished
def update_track(self, track_id, started, finished, is_billed):
"""Updates the time was spend and is billed flag of the track record"""
self.cursor.execute(
"UPDATE Tracks "
"SET started=?, finished=?, is_billed=? "
"WHERE id=?", (started, finished, is_billed, track_id)
)
self.conn.commit()
def delete_tracks_by_date(self, started, finished, also_unfinished=False):
"""Deletes tracks by the date"""
if not also_unfinished:
where_clause = "AND NOT finished == '' "
self.cursor.execute(
"DELETE "
" FROM Tracks "
"WHERE "
" DATE(started) BETWEEN ? AND ?"
" {where_clause}"
"".format(where_clause=where_clause),
(started, finished)
)
self.conn.commit()
# TIMESHEET
def get_group_by_clause(self, mask):
"""Makes a GROUP BY clause by bit mask"""
def set_group_by_clause(bits, value, group_by):
"""Add a field to group_by clause"""
if mask & bits:
if group_by:
group_by = "%s," % group_by
group_by = '{group_by} {value}'.format(group_by=group_by,
value=value)
return group_by
group_by = set_group_by_clause(TS_GROUP_BY['date'], 'DATE(started)', '')
group_by = set_group_by_clause(TS_GROUP_BY['project'], 'Tasks.project_id',
group_by)
group_by = set_group_by_clause(TS_GROUP_BY['task'], 'Tracks.task_id',
group_by)
group_by = set_group_by_clause(TS_GROUP_BY['track'], 'Tracks.id', group_by)
if group_by:
group_by = "GROUP BY %s " % group_by
return group_by
def get_timesheet_fields(self, mask, get_headers=False):
"""Makes a list of ordered fields"""
# Priority:
# datetime - 0
# date - 1
# task - 2
# project - 3
# spent - 4
# date, tname, pname, started, finished, spent
date_field = (0, 'DATE(started) as "date [date]"', 'Date')
task_field = (1, 'tname', 'Task')
project_field = (2, 'pname', 'Project')
started_field = (3, 'DATETIME(started) as "started [timestamp]"', 'From')
finished_field = (4, 'DATETIME(finished) as "finished [timestamp]"', 'To')
spent_field = (5, 'spent', 'Time Spent')
clause = set()
if mask & TS_GROUP_BY['date']:
clause.add(date_field)
if mask & TS_GROUP_BY['task']:
clause.update([task_field, project_field])
if mask & TS_GROUP_BY['project']:
clause.add(project_field)
if mask & TS_GROUP_BY['track']:
clause.update([task_field, project_field, started_field,
finished_field])
clause.add(spent_field)
to_get = 2 if get_headers else 1
return map(operator.itemgetter(to_get),
sorted(clause, key=operator.itemgetter(0)))
def get_timesheet_select_clause(self, mask):
"""Get prepared select's clause list of fields"""
fields = self.get_timesheet_fields(mask)
return ', '.join(fields)
def get_minimal_started_track(self, tname='', pname=''):
"""Get a minimal tracked date"""
params = []
where_project_clause = where_task_clause = ''
if tname:
tname = tname.encode('utf8')
where_task_clause = "tname == ? AND "
params.append(tname)
if pname:
pname = pname.encode('utf8')
where_project_clause = "pname == ? AND "
params.append(pname)
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" DATE(started) as 'started [date]'"
"FROM Tracks, Tasks, Projects "
"WHERE "
" {where_task_clause}"
" {where_project_clause}"
" Tracks.task_id == tid AND "
" Tasks.project_id == Projects.id"
"".format(where_task_clause=where_task_clause,
where_project_clause=where_project_clause), params)
return self.cursor.fetchone()
def get_timesheet(self, started, finished, group_by_mask, only_billed=True,
tname='', pname=''):
""" Gets the time was spent for a task/project"""
params = []
only_billed_clause = where_project_clause = where_task_clause = ''
if tname:
params.append(tname.encode('utf8'))
where_task_clause = "tname == ? AND "
if pname:
params.append(pname.encode('utf8'))
where_project_clause = "pname == ? AND "
if only_billed:
only_billed_clause = " AND Tracks.is_billed == 1 "
params.extend([started, finished])
group_by_clause = self.get_group_by_clause(group_by_mask)
query = str(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" SUM(STRFTIME('%s', finished)-STRFTIME('%s', started)) as spent,"
" Tracks.started as started, "
" Tracks.finished as finished "
"FROM Tracks, Tasks, Projects "
"WHERE "
" {where_task_clause}"
" {where_project_clause}"
" Tracks.task_id == tid AND "
" Tasks.project_id == Projects.id AND "
" ("
" DATE(started) BETWEEN ? AND ?"
" AND NOT Tracks.finished == ''"
" {only_billed_clause}"
" ) "
"{group_by_clause} "
"ORDER BY started, Tasks.id"
"".format(started=started, finished=finished,
where_task_clause=where_task_clause,
where_project_clause=where_project_clause,
group_by_clause=group_by_clause,
only_billed_clause=only_billed_clause)
)
#print(query)
if group_by_mask:
select_clause = self.get_timesheet_select_clause(group_by_mask)
query = "SELECT {clause} FROM ({query})".format(
query=query, clause=select_clause)
self.cursor.execute(query, params)
return self.cursor.fetchall()
| 2.875 | 3 |
idlib/identifiers.py | tgbugs/idlib | 2 | 12799204 | """Identifiers are the smallest possible unit of a stream. Their
fundamental property is that they come equipped with an equality
operation. Not all equality operations are as simple as string= or
numberical equality. These must employ a true identity function that
does not reduce the amount of data that is compared. That is,
identifiers are distinguished from other pieces of data in the sense
that the family of functions that implement 'sameness' requires direct
comparison of every byte of two streams with an allowance for
conversion to a canonical form which may include reordering and
deduplication of elements of the identifier that follow set equality
rather than string equality for example composite primary keys in a
database may be rearranged into a preferred order for further byte to
byte comparison between rows, but the characters in a word cannot be
sorted prior to comparison if we are interested in the equality of two
ordered strings of chars.
Note that under the defintion provided above the ONLY requirement for
an identifier is that it come equipped with an identity function. This
means that whole computer programs can be identifiers as long as the
comparison function is defined. Then there is a question of the robustness
of that identity function to a change in context, specifically defined
as the failures of the equiality of identifiers to correctly imply the
equality of what they dereference to.
There is a tradeoff between robustness of reference and usefulness for
human communication. And for better or for worse the IRBs and IACUCs of
the world tend to frown upon shoving subjects through hash functions.
"""
import idlib
class Identifier(idlib.Stream): # TODO decide if str should be base ...
""" Base class for all Identifiers """
# all identifiers mapped to idlib should have a string representation
# or be representable as strings in as unambiguous a way as possible
# this means that distinctions based on types e.g. MyId('1') and YourId('1')
# need stringify in such a way that they do not colide e.g. myid:1 yourid:1
# within the expected context of their expansion
# local:?:myid:1 local:?:yourid:1 could represent the space of local identifiers
# with unknown local conventions, maintaining a record of all local conventions
# seems likely to be a giant pain, so local:?: ids would be ephemoral and would
# probably have to be marked with a source as a kind of best guess maximal domain
# for assuming similarity, though in some documents every instance of a local:?:id
# should probably be assumed to be different under expansion
# as a result of this, it is still not entirely clear whether
# str is quite the right option, but since normalization can
# occur before stringification, it is probably ok ...
# e.g. hello:world -> local:?:hello:world in cases where
# a local identifier is used without conventions
# same with local:?:1, local:?:2, local:?:3, local:?:4
_id_class = str
local_regex = None # an unqualified, likely non-unique regex for the system local identifier
canonical_regex = None # but really '.+'
#@staticmethod
#def normalize(identifier):
#raise NotImplementedError
#return identifier
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
#def exists(self):
# bad identifiers are not allowed to finish __init__
#raise NotImplementedError
def metadata(self):
raise NotImplementedError
def data(self):
raise NotImplementedError
def asLocal(self, conventions=None):
if conventions is None:
conventions = self._local_conventions
return conventions.asLocal(self)
| 3.328125 | 3 |
app/extract_utils.py | Maaslak-ORG/doccano | 0 | 12799205 | import json
import os
from rest_framework.renderers import JSONRenderer
from api.models import Project, DOCUMENT_CLASSIFICATION, SEQUENCE_LABELING
from api.serializers import LabelSerializer
from api.utils import JSONPainter
def extract_document_classification(label, labels):
return labels.get(pk=label["label"]).text
def extract_label_seq_labeling(label, labels):
return [
label["start_offset"],
label["end_offset"],
labels.get(pk=label["label"]).text,
]
def get_extract_label(project):
return {
DOCUMENT_CLASSIFICATION: extract_document_classification,
SEQUENCE_LABELING: extract_label_seq_labeling,
}[project.project_type]
def get_all_projects_json():
dump_dir = "projects_dump"
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
for project in Project.objects.all():
try:
project_dir = f"{dump_dir}/dump_{project.name.replace('/', '_')}"
if not os.path.exists(project_dir):
os.makedirs(project_dir)
print(f"Dumping {project.name}")
labels = project.labels.all()
label_serializer = LabelSerializer(labels, many=True)
documents = project.documents.all()
data = JSONPainter().paint(documents)
data = map(
lambda x: {
**x,
"labels": list(
map(
lambda y: get_extract_label(project)(y, labels),
x["annotations"],
)
),
},
data,
)
data = map(json.dumps, data)
data = map(lambda x: x + "\n", data)
with open(f"{project_dir}/labels.json", "wb") as f:
f.write(JSONRenderer().render(label_serializer.data))
with open(f"{project_dir}/data.jsonl", "w") as f:
f.writelines(data)
except Exception as ex:
print(f"Error {project.name} {ex}")
| 2.265625 | 2 |
Graduate_work/main/algorithms.py | mcxemic/Graduate_work | 0 | 12799206 | <reponame>mcxemic/Graduate_work<filename>Graduate_work/main/algorithms.py
import numpy as np
def calculate_task_table_from_productivity_factors(tasks_lists, productivity_factors):
# p - count of task. k - vector productivity factors
# transform two vector to matrix with task * productivity
output_table = []
productivity_factors.sort()
tasks_lists.sort()
tasks_lists.reverse()
# print(productivity_factors, tasks_lists)
for j in range(len(productivity_factors)):
row = []
for i in range(len(tasks_lists)):
row.append(tasks_lists[i] * productivity_factors[j])
output_table.append(row)
output_table = np.array(output_table)
output_table = output_table.T
return output_table
def calculate_second_table(table):
newtable = []
for i in range(table.shape[0]):
row = []
for j in range(table.shape[1]):
row.append(1 / table[i, j])
newtable.append(row)
newtable = np.array(newtable)
return newtable
def output_result_algorithm(result):
for i in enumerate(result):
pass
# print('Machine ', i[0] + 1, i[1])
def A1(count_of_machine, count_of_tasks, task_table_with_coefficient):
task_of_machine = []
list_of_used_time_of_every_machine = list(count_of_machine * [0])
# create dict for every machine in task
for _ in range(0, count_of_machine):
machine = {}
task_of_machine.append(machine)
# distribute tasks for every machine with magic algorithms from the Heaven
for j in range(count_of_tasks):
index = list_of_used_time_of_every_machine.index(min(list_of_used_time_of_every_machine))
list_of_used_time_of_every_machine[index] += np.asscalar(task_table_with_coefficient[j][index])
task_of_machine[index].update({j + 1: np.asscalar(task_table_with_coefficient[j][index])})
output_result_algorithm(task_of_machine)
return task_of_machine
def A2(count_of_machine, count_of_task, table, tasks_list, C_foreach_machine):
task_of_machine = []
list_of_used_time_of_every_machine = list(count_of_machine * [0])
#print("tasks" + tasks_list)
for _ in range(0, count_of_machine):
machine = {}
task_of_machine.append(machine)
for j in range(0, count_of_task):
index = C_foreach_machine.index(max(C_foreach_machine)) # index with max f
list_of_used_time_of_every_machine[index] += np.asscalar(table[j][index]) # fill C
C_foreach_machine[index] -= tasks_list[j]
task_of_machine[index].update({j + 1: np.asscalar(table[j][index])})
# output_result_algorithm(task_of_machine)
return task_of_machine
def optimization2(k, e, sigma, C):
print('\n----------------------------------------------------------------')
print('Second optimization')
T = []
for i in range(len(k)):
T.append((C - k[i] * e[i]))
opt = [0] * len(k)
x = [0] * len(k)
counter = 0
sigma2 = round(sigma, 0)
sigma2 = int(sigma2)
print(int(sigma2))
for i in range(sigma2):
for i in range(len(k)):
opt[i] = k[i] * (e[i] - x[i])
index = opt.index(max(opt))
x[index] += 1
T[index] += k[index]
counter += 1
print(counter)
print("X = ", x)
return x
def optimization1(sigma, e, k, C):
print('\n----------------------------------------------------------------')
print('First optimization')
T = []
for i in range(len(k)):
T.append((C - k[i] * e[i]))
FirstT = T.copy()
Tq = T.copy()
for i in range(len(k)):
Tq[i] += k[i]
x = [0] * len(k)
sigma2 = round(sigma, 0)
print(int(sigma2))
sigma2 = int(sigma2)
print(int(sigma2))
for i in range(sigma2):
index = Tq.index(min(Tq))
Tq[index] += k[index]
x[index] += 1
for i in range(len(k)):
T[i] += x[i] * k[i]
print("X = ", x)
return x, FirstT
def run_algorithms(productivity_factors, sets, task_id, C):
from .optimization_algorithms import get_finall_T, create
import time
schedules_first_alg = []
schedules_secoond_alg = []
for i in range(len(sets)):
task_table_with_coefficient = calculate_task_table_from_productivity_factors(sets[i],
productivity_factors[i])
schedules_first_alg.append(
A1(len(productivity_factors[i]), len(sets[i]), task_table_with_coefficient))
for i in range(len((sets))):
task_table_with_coefficient = calculate_task_table_from_productivity_factors(sets[i],
productivity_factors[i])
C_foreach_machine = list(map(lambda i: C / i, productivity_factors[i]))
schedules_secoond_alg.append(A2(len(productivity_factors[i]), len(sets[i]),
task_table_with_coefficient, sets[i], C_foreach_machine))
# Get data from DB
# Run algorithms
# Write to algorithm table
write_to_alorithms_table(task_id, schedules_first_alg, schedules_secoond_alg)
# create optimization
for i in range(len(sets)):
start1 = time.time()
final_T_first, keys1, ideal1 = get_finall_T(schedules_first_alg[i], productivity_factors[i])
optimizationed_schedule1,max_proj1,relative_projection1,iteration_count1 = create(keys1, ideal1, productivity_factors[i], final_T_first)
print("Iteration count 1 {}".format(iteration_count1))
stop1 = time.time()
write_to_optimization_table(task_id, optimizationed_schedule1, max_proj1, stop1 - start1,relative_projection1,iteration_count1)
start2 = time.time()
final_T_second, keys2, ideal2 = get_finall_T(schedules_secoond_alg[i], productivity_factors[i])
optimizationed_schedule2,max_proj2,relative_projection2,iteration_count2 = create(keys2, ideal2, productivity_factors[i], final_T_second)
print("Iteration count 2 {}".format(iteration_count2))
stop2 = time.time()
write_to_optimization_table(task_id,optimizationed_schedule2,max_proj2,stop2-start2,relative_projection2,iteration_count2)
def write_to_alorithms_table(task_id, schedule1, schedule2):
from ..models import Algorithm
from .. import db
import json
for i in range(len(schedule1)):
# print('schedule1 {0} schedule {1}'.format(schedule1, schedule2))
# print('1 len {0}, type {1} schedule {2}'.format(len(schedule1), type(schedule1[0]), schedule1[i][0]), i)
#print('2 len {0}, type {1} schedule {2}'.format(len(schedule2), type(schedule2[0]), schedule2[i][0]))
sched_JSON1 = json.dumps(schedule1[i])
sched_JSON2 = json.dumps(schedule2[i])
alg = Algorithm(task_id=task_id, initial_timetable_first_alg=sched_JSON1,
initial_timetable_second_alg=sched_JSON2)
db.session.add(alg)
db.session.commit()
def write_to_optimization_table(task_id,algorithm,projection,runtime,relative_projection,iteration_count):
from ..models import Task
import json
from .. import db
algo = json.dumps(algorithm)
tsk = Task.query.filter_by(id=task_id).first()
tsk.first_Optimization = algo
tsk.first_projection = projection
tsk.first_lead_time = runtime
tsk.first_relatively_projection = relative_projection
tsk.first_iteration_count = iteration_count
db.session.commit() | 3.15625 | 3 |
gestao/contrato/models/financeiro/ContratoDespesas.py | Smartboxweb98/gestao_empresarial | 3 | 12799207 | # -*- coding: utf-8 -*-
from django.db import models
from gestao.contrato.models.contrato.Contrato import Contrato
from gestao.financeiro.models.movimentacoes.Despesa import Despesa
from gestao.financeiro.models.pagamento.PagamentoDespesa import PagamentoDespesa
class ContratoDespesas(models.Model):
contrato = models.ForeignKey(Contrato, verbose_name="Contrato")
despesa = models.ForeignKey(Despesa, verbose_name="Despesa")
def __unicode__(self):
return u'%s: %s' % (self.contrato.titulo, self.despesa.valor_total)
def pagamento(self):
pagamento_despesa = PagamentoDespesa.objects.filter(despesa=self.despesa)
if pagamento_despesa:
return pagamento_despesa[0]
return None
class Meta:
app_label = 'contrato'
verbose_name = 'Despesa do Contrato'
verbose_name_plural = 'Despesas do Contrato'
| 2.1875 | 2 |
simplifiedpytrends/test_trendReq.py | Drakkar-Software/pytrends | 3 | 12799208 | from unittest import TestCase
from simplifiedpytrends.request import TrendReq
class TestTrendReq(TestCase):
def test__get_data(self):
"""Should use same values as in the documentation"""
pytrend = TrendReq()
self.assertEqual(pytrend.hl, 'en-US')
self.assertEqual(pytrend.tz, 360)
self.assertEqual(pytrend.geo, '')
self.assertTrue(pytrend.cookies['NID'])
def test_interest_over_time(self):
pytrend = TrendReq()
pytrend.build_payload(kw_list=['pizza', 'bagel'])
self.assertIsNotNone(pytrend.interest_over_time())
| 2.90625 | 3 |
app/posts/__init__.py | nchudleigh/yunite-blog | 10 | 12799209 | from __future__ import absolute_import, print_function
from flask import Blueprint
posts = Blueprint('posts', __name__)
from . import views
from . import models
| 1.328125 | 1 |
HRec/datasets/hdataset.py | geekinglcq/HRec | 49 | 12799210 | # -*- coding:utf-8 -*-
# ###########################
# File Name: hdataset.py
# Author: geekinglcq
# Mail: <EMAIL>
# Created Time: 2020-12-28 20:17:47
# ###########################
import pandas as pd
import os
import logging
from collections import defaultdict
from torch.utils.data import DataLoader, Dataset
from .enum_type import FeatureSource as FS
from .enum_type import item_type_dict
from .dataset import DataSet, SubSet
class HDataSet(DataSet):
"""
Dataset used for heterogenous items
"""
def __init__(self, config, restore_path=None):
self.config = config
self._init_setting()
if restore_path is None:
self._load_feats()
else:
# TODO
pass
self._preprocessing()
def _load_feats(self):
self.user_feat = self._load_meta_feats(self.config["user_feat_path"],
FS.USER, "user_id")
self.item_feat = self._load_item_feats(self.config["item_feat_path"],
FS.ITEM)
self.inter_feat = pd.read_csv(self.config["inter_feat_path"]).sample(
frac=1, random_state=28)
mask = None
if len(self.types) < 3:
for item_type, item_feat in self.item_feat.items():
new_mask = self.inter_feat[self.iid_field].isin(
item_feat[self.iid_field])
if mask is not None:
mask = mask | new_mask
else:
mask = new_mask
self.inter_feat = self.inter_feat[mask]
self.h_inter_feat = {}
self.user_num = len(self.user_feat)
self.item_num = sum([len(i) for i in self.item_feat.values()])
self.item_nums = {k: len(v) for k, v in self.item_feat.items()}
print(f'user num: {self.user_num}')
print(f'item num: {self.item_num}')
print(f'item nums: {self.item_nums}')
def _preprocessing(self):
self._normalize()
if len(self.types) < 3:
self._reID(self.iid_field)
self._reID(self.uid_field)
def _load_item_feats(self, paths, source):
item_feat = {}
for item_type, item_path in paths.items():
if item_type not in self.types:
continue
if os.path.isfile(item_path):
feat = pd.read_csv(item_path)
item_feat[item_type] = feat
else:
raise ValueError("Dataset file not fountd.")
return item_feat
def _init_setting(self):
self.logger = logging.getLogger()
self.name = self.config['name']
print(self.config)
self.uid_field = self.config["USER_ID_FIELD"]
self.iid_field = self.config["ITEM_ID_FIELD"]
self.label_field = self.config["LABEL_FIELD"]
self.itype_field = self.config["TYPE_FIELD"]
self.types = self.config["type"]
self.field2type = {}
self.field2source = {}
self.field2id_token = defaultdict(dict)
self.field2token_id = defaultdict(dict)
self.user_feat_fields = []
self.item_feat_fields = defaultdict(list)
for feat_name, feat_value in self.config['feat'].items():
source = feat_value['source']
self.field2type[feat_name] = feat_value['type']
self.field2source[feat_name] = feat_value['source']
if source == 'user' and feat_name != self.uid_field:
self.user_feat_fields.append(feat_name)
if source.startswith('item') and feat_name != self.iid_field:
item_type = source.split("_")[1]
if item_type in self.types:
self.item_feat_fields[item_type].append(feat_name)
def num(self, field):
if field == self.uid_field:
return self.user_num
if field == self.iid_field:
return self.item_num
if field not in self.field2type:
raise ValueError('field {} not in dataset'.format(field))
# if field not in self.field2token_id:
# raise ValueError('field {} is not token type'.format(field))
if len(self.field2token_id[field]) == 0:
if field in self.user_feat_fields:
return len(self.user_feat[field].unique())
else:
for item_type, item_feat_fields in self.item_feat_fields.items(
):
if field in item_feat_fields:
return len(self.item_feat[item_type][field].unique())
return len(self.field2token_id[field])
def _reID(self, field):
"""
Re-ID the token-type feature, save the id map in self.field2token_id
"""
self.logger.info(f'ReID field {field}.')
ftype = self.field2type.get(field)
assert ftype == 'token'
source = self.field2source.get(field)
if type(source) is str and source.startswith("item_"):
item_type = source.split("_")[1]
dataframe = self.item_feat[item_type]
elif source is FS.ITEM_ID or source == "item":
dataframe = pd.concat(list(self.item_feat.values()), join='inner')
elif source == 'user' or source is FS.USER_ID:
dataframe = self.user_feat
else:
dataframe = self.inter_feat
id_map = {v: k for k, v in enumerate(dataframe[field].unique())}
self.field2token_id[field].update(id_map)
dataframe[field] = dataframe[field].map(id_map)
if source in ['item', 'user', FS.ITEM_ID, FS.USER_ID]:
if field in self.inter_feat:
self.inter_feat[field] = self.inter_feat[field].map(id_map)
for item_type, item_feat in self.item_feat.items():
if field in item_feat:
item_feat[field] = item_feat[field].map(id_map)
def join(self, df):
"""
Join user/item features to interactions.
"""
if self.user_feat is not None and self.uid_field in df:
df = pd.merge(df,
self.user_feat,
on=self.uid_field,
how='left',
suffixes=('_inter', '_user'))
if self.item_feat is not None and self.iid_field in df:
for item_type, item_feat in self.item_feat.items():
df = pd.merge(df,
item_feat,
on=self.iid_field,
how='left',
suffixes=(f'_{item_type}', '_inter'))
type_c = [i for i in df.columns if i.startswith(self.itype_field)]
df[self.itype_field] = df[type_c].agg(sum, axis=1)
return df
def join_interaction(self):
self.inter_feat = self.join(self.inter_feat)
if 'sample' in self.config:
sample_ratio = self.config['sample']
sampled = []
for kind in self.types:
ratio = sample_ratio.get(kind, 1.0)
kind_id = item_type_dict[kind]
# preverse the data for val & test
new_df = self.inter_feat[self.inter_feat['type'] ==
kind_id].sample(frac=ratio * 0.7 +
0.3,
random_state=16)
print(kind, kind_id, ratio, new_df.shape)
sampled.append(new_df)
self.inter_feat = pd.concat(sampled, ignore_index=True)
self.inter_feat = self.inter_feat.sample(frac=1.).reset_index(
drop=True)
def train_val_test_split(self,
ratios=[0.7, 0.2, 0.1],
group_by=None,
**kwargs):
assert len(ratios) == 3
if 'sample' in self.config:
train, val, test = self.split_by_ratio_sampled(
ratios, create_new_dataset=False)
else:
train, val, test = self.split_by_ratio(ratios,
group_by=group_by,
create_new_dataset=False)
user_fs = self.user_feat_fields
item_fs = self.item_feat_fields
type_field = self.itype_field
self.train_inter_subset = {}
self.val_inter_subset = {}
self.test_inter_subset = {}
for item_type in self.types:
item_type_id = item_type_dict[item_type]
self.train_inter_subset[item_type] = SubSet(
train[train[type_field] == item_type_id], self.uid_field,
self.iid_field, self.itype_field, self.label_field, user_fs,
item_fs[item_type])
self.val_inter_subset[item_type] = SubSet(
val[val[type_field] == item_type_id], self.uid_field,
self.iid_field, self.itype_field, self.label_field, user_fs,
item_fs[item_type])
self.test_inter_subset[item_type] = SubSet(
test[test[type_field] == item_type_id], self.uid_field,
self.iid_field, self.itype_field, self.label_field, user_fs,
item_fs[item_type])
self.all_inter_feat = self.inter_feat
self.logger.info(
"Replace interaction features with train interaction fatures.")
self.logger.info(
"Interaction features are stored in self.all_inter_feat")
self.inter_feat = train
def init_data_loader(self, batch_size=256, num_workers=1):
self.train_data_loader = {}
self.val_data_loader = {}
self.test_data_loader = {}
for item_type in self.types:
self.train_data_loader[item_type] = DataLoader(
self.train_inter_subset[item_type],
batch_size=batch_size,
# pin_memory=True,
num_workers=num_workers)
self.val_data_loader[item_type] = DataLoader(
self.val_inter_subset[item_type],
batch_size=batch_size,
num_workers=num_workers)
self.test_data_loader[item_type] = DataLoader(
self.test_inter_subset[item_type],
batch_size=batch_size,
num_workers=num_workers)
class HSubSet(Dataset):
def __init__(self, dataframes, uid_field, iid_field, label_field,
u_feat_fields, i_feat_fields):
self.types = dataframes.keys()
self.dfs = dataframes
self.uid = uid_field
self.iid = iid_field
self.label = label_field
def __len__(self):
return min([len(df.index) for df in self.dfs])
| 2.171875 | 2 |
src/controllerarena/loggers/VisualLogger.py | VerifiableRobotics/controller-arena | 0 | 12799211 | <filename>src/controllerarena/loggers/VisualLogger.py<gh_stars>0
import socket
import matplotlib.pyplot as plt
import numpy as np
import json
idx = 0
lines = []
def decode(dct):
if "data" in dct:
return dct["data"]
elif "config" in dct:
return dct["config"]
elif "config" not in dct and "x" in dct and "y" in dct:
global idx, lines
idx += 1
plt.figure(idx)
if "xlabel" in dct:
plt.xlabel(dct["xlabel"])
if "ylabel" in dct:
plt.ylabel(dct["ylabel"])
l, = plt.plot([], [], 'r-')
lines.append(l)
return [dct["x"], dct["y"]]
else:
return "Invalid JSON"
def process(lines, datum, configs):
arr = json.loads(datum, object_hook=decode)
for idx, config in enumerate(configs):
plt.figure(idx+1)
xdata = arr[config[0]]
ydata = arr[config[1]]
l = lines[idx]
x = l.get_xdata()
y = l.get_ydata()
if len(x) > 0:
# Append new data
l.set_xdata(np.append(x, xdata))
l.set_ydata(np.append(y, ydata))
# Adjust axis limits
plt.xlim(0, np.amax(l.get_xdata())*1.05)
plt.ylim(0, np.amax(l.get_ydata())*1.05)
else:
# Add first coordinates
l.set_xdata([xdata])
l.set_ydata([ydata])
# Update plot
plt.draw()
# Socket address
HOST = '127.0.0.1'
PORT = 8080
# Open socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind socket to address
s.bind((HOST, PORT))
# Listen (1 connection in buffer)
s.listen(1)
# Accept connection
conn, addr = s.accept()
print "Connected by", addr
configs = conn.recv(1024)
configs = json.loads(configs, object_hook=decode)
plt.show(block=False)
conn.sendall('Ready')
while 1:
# Receive data
datum = conn.recv(1024)
if datum:
# If data is not terminating
try:
# Process and plot data
process(lines, datum, configs)
except:
# Handle invalid data without closing connection
print "Invalid data received"
else:
# If data is terminating
break
# Close connection
conn.close()
# Close socket
s.close()
# Keep showing plot
plt.show()
| 2.421875 | 2 |
json_eep.py | ivanliao/EazyEEP | 0 | 12799212 | <reponame>ivanliao/EazyEEP<filename>json_eep.py
#!/usr/bin/python
'''
Created on May 20, 2016
@author:
'''
import sys
import os
import json
import eeprom
#import logging as log
from argparse import ArgumentParser
class SysBoard(object):
'''
Main system definition
'''
def __init__(self, json_data):
'''
Constructor
'''
self.eeprom = eeprom.EepromBin("syseeprom.bin")
# Init EEPROM programmer
self.eepprog = eeprom.JsonEepromProg(self.eeprom,json_data['SysEeprom'])
def main(board, argv=None):
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
eepprog = board.eepprog
try:
# Setup argument parser
parser = ArgumentParser(prog=sys.argv[0])
subparsers = parser.add_subparsers(help='help for subcommand', dest='subcommand')
parser_show = subparsers.add_parser('show', help='Display the device info')
parser_dump = subparsers.add_parser('dump', help='Dump the binary content')
parser_json = subparsers.add_parser('json', help='Output JSON format')
parser_init = subparsers.add_parser('init', help='Initialize the device info')
parser_erase = subparsers.add_parser('erase', help='Erase the device info')
parser_update = subparsers.add_parser('update', help='Update the device info')
parser_update.add_argument('fields', type=str, metavar='<field>=<value>', nargs='+',
help='Update the specified field. ')
parser_list = subparsers.add_parser('field', help='List the available fields. ')
if len(sys.argv) == 1:
parser.print_help()
return 1
args = parser.parse_args()
if args.subcommand == 'show': # eepprog show
eepprog.eep_dev.reload()
for key in sorted(eepprog.fields.keys(), key = lambda name: eepprog.fields[name].offset):
print '%-16s: %s' % (eepprog.fields[key].descr, eepprog.get_field(key))
elif args.subcommand == 'dump': # eepprog dump
eepprog.eep_dev.reload()
print eepprog.eep_dev.dump()
elif args.subcommand == 'erase': # eepprog erase
if operation_confirm() == True:
eepprog.erase_all()
eepprog.eep_dev.save()
elif args.subcommand == 'init': # eepprog init
if operation_confirm() == True:
eepprog.init_default()
eepprog.eep_dev.save()
elif args.subcommand == 'json': # eepprog json
eepprog.eep_dev.reload()
print eepprog.toJSON()
elif args.subcommand == 'field': # eepprog field
print '\nAvailable fields are: ' +', '.join(eepprog.fields.keys())
elif args.subcommand == 'update': # eepprog update
# parse <field>=<value>
eepprog.eep_dev.reload()
fields = []
for f in args.fields:
pair = f.split('=')
if len(pair) != 2:
parser_update.print_help()
return 2
elif pair[0] not in eepprog.fields:
print 'Available fields are: ' +', '.join(eepprog.fields.keys())
return 2
else: fields.append(pair)
for f in fields:
if eepprog.fields[f[0]] != None:
eepprog.set_field(f[0],f[1])
eepprog.eep_dev.save()
else:
parser.print_help()
print ''
except Exception, e:
indent = len(parser.prog) * " "
sys.stderr.write(parser.prog + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help\n")
return 2
def operation_confirm():
s = raw_input('Are you sure to do this? (y/N): ')
if s.lower() == 'y':
return True
return False
# Entry point of the script
if __name__ == '__main__':
try:
f=open('eeprom.json') # EEPROM format
j_data = json.load(f)
f.close()
except Exception, e:
print "File eeprom.json is not found."
exit(1)
board = SysBoard(j_data)
sys.exit(main(board))
| 2.296875 | 2 |
MITx-6.00.1x/pset5/Problem_4_-_Decrypt_a_Story.py | FTiniNadhirah/Coursera-courses-answers | 73 | 12799213 | def decrypt_story():
"""
Using the methods you created in this problem set,
decrypt the story given by the function getStoryString().
Use the functions getStoryString and loadWords to get the
raw data you need.
returns: string - story in plain text
"""
story = CiphertextMessage(get_story_string())
return story.decrypt_message()
| 3.203125 | 3 |
commandlib/piped.py | crdoconnor/commandlib | 16 | 12799214 | <reponame>crdoconnor/commandlib<filename>commandlib/piped.py
from copy import deepcopy
from subprocess import PIPE, STDOUT, Popen
from commandlib.exceptions import CommandError, CommandExitError
from commandlib.utils import _check_directory
from os import chdir, getcwd
class PipedCommand(object):
def __init__(self, command):
self._command = command
self._from_string = None
self._from_handle = None
self._from_filename = None
self._stdout_to_filename = None
self._stdout_to_handle = None
self._stderr_to_handle = None
def from_string(self, string):
assert self._from_handle is None
assert self._from_filename is None
new_piped = deepcopy(self)
new_piped._from_string = string
return new_piped
def from_handle(self, handle):
assert self._from_string is None
assert self._from_filename is None
new_piped = deepcopy(self)
new_piped._from_handle = handle
return new_piped
def from_filename(self, filename):
assert self._from_string is None
assert self._from_handle is None
new_piped = deepcopy(self)
new_piped._from_filename = str(filename)
return new_piped
def stdout_to_filename(self, filename):
new_piped = deepcopy(self)
new_piped._stdout_to_filename = filename
return new_piped
def stdout_to_handle(self, handle):
new_piped = deepcopy(self)
new_piped._stdout_to_handle = handle
return new_piped
def stderr_to_handle(self, handle):
assert self._stderr_to_handle is None
new_piped = deepcopy(self)
new_piped._stderr_to_handle = handle
return new_piped
def run(self):
_check_directory(self._command.directory)
previous_directory = getcwd()
if (
self._from_handle is None
and self._from_string is None
and self._from_filename is None
):
stdin = None
else:
if self._from_string:
stdin = PIPE
if self._from_handle:
stdin = self._from_handle
if self._from_filename:
stdin = open(self._from_filename, "r")
if self._stdout_to_handle is None and self._stdout_to_filename is None:
stdout = None
else:
if self._stdout_to_handle:
stdout = self._stdout_to_handle
if self._stdout_to_filename:
stdout = open(self._stdout_to_filename, "w")
if self._stderr_to_handle is None:
stderr = PIPE
else:
if self._stderr_to_handle:
stderr = self._stderr_to_handle
if self._command.directory is not None:
chdir(self._command.directory)
try:
process = Popen(
self._command.arguments,
stdout=stdout,
stderr=stderr,
stdin=stdin,
shell=self._command._shell,
env=self._command.env,
)
if self._from_string:
process.stdin.write(self._from_string.encode("utf8"))
_, _ = process.communicate()
returncode = process.returncode
finally:
if self._from_filename:
stdin.close()
if self._stdout_to_filename:
stdout.close()
chdir(previous_directory)
if returncode != 0 and not self._command._ignore_errors:
raise CommandError(
'"{0}" failed (err code {1})'.format(self.__repr__(), returncode)
)
def output(self):
_check_directory(self._command.directory)
previous_directory = getcwd()
if self._command.directory is not None:
chdir(self._command.directory)
if self._from_handle is None and self._from_string is None:
stdin = None
else:
if self._from_string:
stdin = PIPE
if self._from_handle:
stdin = self._from_handle
process = Popen(
self._command.arguments,
stdout=PIPE,
stderr=STDOUT,
stdin=stdin,
shell=self._command._shell,
env=self._command.env,
)
if self._from_string:
process.stdin.write(self._from_string.encode("utf8"))
stdoutput, _ = process.communicate()
returncode = process.returncode
chdir(previous_directory)
if returncode != 0 and not self._command._ignore_errors:
raise CommandExitError(
self.__repr__(), returncode, stdoutput.decode("utf8").strip()
)
return stdoutput.decode("utf8")
def __str__(self):
return " ".join(self._command.arguments)
def __unicode__(self):
return " ".join(self._command.arguments)
def __repr__(self):
return self.__str__()
| 2.703125 | 3 |
thebrushstash/migrations/0006_staticpagecontent.py | zenofewords/thebrushstash | 0 | 12799215 | <reponame>zenofewords/thebrushstash
# Generated by Django 2.2.7 on 2019-12-02 23:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('thebrushstash', '0005_setting'),
]
operations = [
migrations.CreateModel(
name='StaticPageContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
],
),
]
| 1.671875 | 2 |
awwaards_app/migrations/0004_auto_20200609_1512.py | hamisicodes/Awwaards | 0 | 12799216 | <reponame>hamisicodes/Awwaards
# Generated by Django 3.0.7 on 2020-06-09 12:12
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awwaards_app', '0003_rate'),
]
operations = [
migrations.AddField(
model_name='rate',
name='content',
field=models.PositiveIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(10)]),
),
migrations.AddField(
model_name='rate',
name='usability',
field=models.PositiveIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(10)]),
),
]
| 1.757813 | 2 |
src/vps/config_seoul.py | deepguider/RoadGPS | 2 | 12799217 | <reponame>deepguider/RoadGPS
import os
root_dir = './data_vps/custom_dataset/dataset_seoul'
db_dir = os.path.join(root_dir, '.')
queries_dir = os.path.join(root_dir, '.')
if not os.path.exists(root_dir) or not(db_dir):
raise FileNotFoundError("root_dir : {}, db_dir : {}".format(root_dir, db_dir))
struct_dir = os.path.join(root_dir, 'datasets') # For mat files in which list of image files are
#structFile = join(struct_dir, 'pitts30k_test.mat')
#structFile = os.path.join(struct_dir, 'dg_daejeon_test.mat')
structFile = os.path.join(struct_dir, 'dg_seoul_test.mat')
#x, y, coord, radius = 327934.67464998, 4153535.06119226, 'utm', 25
x, y, coord, radius = 37.511634, 127.061298, 'latlon', 25
| 2.1875 | 2 |
farmblr/farmblr/settings.py | Nemwel-Boniface/Farmblr | 3 | 12799218 | """
Django settings for farmblr project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
from decouple import config, Csv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# TODO: Make secret key secret
SECRET_KEY = 'django-insecure-xyjd9zz!%+e^k9emeu8--hvpp1zqv01e_85eis(dux3li8t2!$'
# SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
# TODO: Uncomment the below 2 and delete defaults (for production)
# DEBUG = config('DEBUG', default=True, cast=bool)
#
# ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
'blog',
'accounts',
'products'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'farmblr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'farmblr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
BASE_DIR / "static",
]
# User uploaded files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Email configuration
# EMAIL_BACKEND = config('EMAIL_BACKEND')
# EMAIL_USE_TLS = config('EMAIL_USE_TLS', cast=bool)
# EMAIL_ACTIVE_FIELD = config('EMAIL_ACTIVE_FIELD')
# EMAIL_SERVER = EMAIL_HOST = config('EMAIL_HOST')
# EMAIL_PORT = config('EMAIL_PORT', cast=int)
# EMAIL_ADDRESS = EMAIL_HOST_USER = config('EMAIL_HOST_USER')
# EMAIL_FROM_ADDRESS = config('EMAIL_HOST_USER')
# EMAIL_PASSWORD = EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD') # os.environ['password_key'] suggested
# EMAIL_MAIL_SUBJECT = config('EMAIL_MAIL_SUBJECT')
# EMAIL_MAIL_HTML = config('EMAIL_MAIL_HTML')
# EMAIL_PAGE_TEMPLATE = config('EMAIL_PAGE_TEMPLATE')
# EMAIL_PAGE_DOMAIN = config('EMAIL_PAGE_DOMAIN')
# DEFAULT_FROM_EMAIL = config('EMAIL_FROM_ADDRESS')
| 1.84375 | 2 |
rubik_solver/Solver/CFOP/PLLSolver.py | kazi92/rubikSolver | 46 | 12799219 | <filename>rubik_solver/Solver/CFOP/PLLSolver.py<gh_stars>10-100
from rubik_solver.Move import Move
from .. import Solver
class PLLSolver(Solver):
STEPS = {
"810345672": ["X", "R'", "U", "R'", "D2", "R", "U'", "R'", "D2", "R2", "X'"],
"018345276": ["X'", "R", "U'", "R", "D2", "R'", "U", "R", "D2", "R2", "X"],
"012743658": ["R2", "U", "R", "U", "R'", "U'", "R'", "U'", "R'", "U", "R'"],
"012547638": ["R", "U'", "R", "U", "R", "U", "R", "U'", "R'", "U'", "R2"],
"072543618": ["M2", "U", "M2", "U2", "M2", "U", "M2"],
"018543672": ["R", "U", "R'", "U'", "R'", "F", "R2", "U'", "R'", "U'", "R", "U", "R'", "F'"],
"230145678": ["R'", "U", "L'", "U2", "R", "U'", "R'", "U2", "R", "L", "U'"],
"018347652": ["R", "U", "R'", "F'", "R", "U", "R'", "U'", "R'", "F", "R2", "U'", "R'", "U'"],
"210745638": ["L", "U2", "L'", "U2", "L", "F'", "L'", "U'", "L", "U", "L", "F", "L2", "U"],
"210347658": ["R'", "U2", "R", "U2", "R'", "F", "R", "U", "R'", "U'", "R'", "F'", "R2", "U'"],
"852341670": ["R'", "U", "R'", "Y", "U'", "R'", "F'", "R2", "U'", "R'", "U", "R'", "F", "R", "F"],
"650143278": ["R2", "Y", "D", "R'", "U", "R'", "U'", "R", "Y'", "D'", "R2", "Y'", "R'", "U", "R"],
"832745016": ["R'", "U'", "R", "Y", "R2", "Y", "D", "R'", "U", "R", "U'", "R", "Y'", "D'", "R2"],
"812743056": ["R2", "Y'", "D'", "R", "U'", "R", "U", "R'", "Y", "D", "R2", "Y", "R", "U'", "R'"],
"670145238": ["R", "U", "R'", "Y'", "R2", "Y'", "D'", "R", "U'", "R'", "U", "R'", "Y", "D", "R2"],
"012543876": ["R'", "U2", "R'", "Y", "U'", "R'", "F'", "R2", "U'", "R'", "U", "R'", "F", "R", "U'", "F"],
"032147658": ["M2", "U", "M2", "U", "M'", "U2", "M2", "U2", "M'", "U2"],
"832145670": ["F", "R", "U'", "R'", "U'", "R", "U", "R'", "F'", "R", "U", "R'", "U'", "R'", "F", "R", "F'"],
"872345610": ["L", "U'", "R", "U2", "L'", "U", "R'", "L", "U'", "R", "U2", "L'", "U", "R'", "U"],
"076345218": ["R'", "U", "L'", "U2", "R", "U'", "L", "R'", "U", "L'", "U2", "R", "U'", "L", "U'"],
"618345072": ["X'", "R", "U'", "R'", "D", "R", "U", "R'", "D'", "R", "U", "R'", "D", "R", "U'", "R'", "D'", "X"]
}
@staticmethod
def get_orientations(cube):
cubies = ['BLU', 'BU', 'BRU', 'LU', 'U', 'RU', 'FLU', 'FU', 'FRU']
orientation = []
for cubie in cubies:
o = PLLSolver.get_correct_cubie(cube, cubie)
orientation.append(str(cubies.index(o)))
return ''.join(orientation)
def move(self, s, solution):
self.cube.move(Move(s))
solution.append(s)
@staticmethod
def get_correct_cubie(cube, cubie):
colors = [cube.cubies[c].facings[c].color for c in cubie.replace('U', '')]
return cube.search_by_colors('Y', *colors)
def solution(self):
solution = []
while True:
for _ in range(4):
self.move('U', solution)
for _ in range(4):
self.move('Y', solution)
orientation = PLLSolver.get_orientations(self.cube)
if orientation in PLLSolver.STEPS:
for s in PLLSolver.STEPS[orientation]:
self.move(s, solution)
return solution
# Apply shortest and expect to be solvable after that
for s in PLLSolver.STEPS["072543618"]:
self.move(s, solution)
return []
| 2.078125 | 2 |
isic/scripts/make_smaller_image_folder.py | estherrolf/representation-matters | 1 | 12799220 | import os
import shutil
import pandas as pd
import torch
import PIL.Image as Image
import torchvision.transforms as transforms
import time
t = transforms.Compose([transforms.Resize((224,224))])
data_dir = '../../data'
image_dir = os.path.join(data_dir, 'isic/Images')
def main(csv_filename, include_sonic):
if include_sonic:
new_image_dir = image_dir.replace('Images','ImagesSmallerWithSonic')
p = pd.read_csv(os.path.join(data_dir,csv_filename))
else:
new_image_dir = image_dir.replace('Images','ImagesSmaller')
p = pd.read_csv(os.path.join(data_dir,csv_filename))
image_names = p['image_name'].values
if not os.path.exists(new_image_dir):
print('making ',new_image_dir)
os.mkdir(new_image_dir)
t1 = time.time()
print('resizing images')
for i,image_name in enumerate(image_names):
if i % 1000 == 0:
t2 = time.time()
print(i, t2-t1)
original = os.path.join(image_dir, image_name)
target = os.path.join(new_image_dir, image_name)
#shutil.copyfile(original, target)
#print(image_name)
img = Image.open(os.path.join(image_dir,image_name))
# tranform
img_t = t(img).convert("RGB")
img_t.save(os.path.join(new_image_dir,image_name),"JPEG")
if __name__ == '__main__':
main(csv_filename='isic/df_with_sonic_age_over_50_id.csv',include_sonic=False) | 2.640625 | 3 |
misc/migrations/versions/4c4c7189593e_.py | davewood/do-portal | 0 | 12799221 | <filename>misc/migrations/versions/4c4c7189593e_.py
"""empty message
Revision ID: 4c4c7189593e
Revises: 4b4e6d96c630
Create Date: 2017-04-04 12:37:27.512719
"""
# revision identifiers, used by Alembic.
revision = '4c4c7189593e'
down_revision = '4b4e6d96c630'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('organization_user_roles_name_german_key', 'organization_user_roles', type_='unique')
op.drop_constraint('organization_user_roles_name_key', 'organization_user_roles', type_='unique')
op.drop_column('organization_user_roles', 'deleted')
op.drop_column('organization_user_roles', 'name_german')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization_user_roles', sa.Column('name_german', sa.VARCHAR(length=255), autoincrement=False, nullable=False))
op.add_column('organization_user_roles', sa.Column('deleted', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.create_unique_constraint('organization_user_roles_name_key', 'organization_user_roles', ['name'])
op.create_unique_constraint('organization_user_roles_name_german_key', 'organization_user_roles', ['name_german'])
### end Alembic commands ###
| 1.539063 | 2 |
main.py | Abhinavka369/snake_game_with_python | 0 | 12799222 | from turtle import Screen
from snake import Snake
from food import Food
from scoreboard import Score
import time
screener = Screen()
screener.setup(width=600, height=600)
screener.bgcolor("black")
screener.title("SNAKE GAME")
screener.tracer(0)
snake = Snake()
food = Food()
scoreboard = Score()
screener.listen()
screener.onkey(snake.up, "Up")
screener.onkey(snake.down, "Down")
screener.onkey(snake.left, "Left")
screener.onkey(snake.right, "Right")
game_is_on = True
while game_is_on:
screener.update()
time.sleep(.1)
snake.move()
# Collision with food
if snake.head.distance(food) < 15:
food.refresh()
snake.extent()
scoreboard.increase_score()
# Detect collision with wall
if snake.head.xcor() > 280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:
scoreboard.reset()
snake.reset()
# Detect collision with tail
for segment in snake.segments[1:]:
if segment == snake.head:
pass
elif snake.head.distance(segment) < 10:
scoreboard.reset()
snake.reset()
screener.exitonclick()
| 3.484375 | 3 |
sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_azure_machine_learning_workspaces_enums.py | rsdoherty/azure-sdk-for-python | 2,728 | 12799223 | <filename>sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_azure_machine_learning_workspaces_enums.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AllocationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Allocation state of the compute. Possible values are: steady - Indicates that the compute is
not resizing. There are no changes to the number of compute nodes in the compute in progress. A
compute enters this state when it is created and when no operations are being performed on the
compute to change the number of compute nodes. resizing - Indicates that the compute is
resizing; that is, compute nodes are being added to or removed from the compute.
"""
STEADY = "Steady"
RESIZING = "Resizing"
class ApplicationSharingPolicy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Policy for sharing applications on this compute instance among users of parent workspace. If
Personal, only the creator can access applications on this compute instance. When Shared, any
workspace user can access applications on this instance depending on his/her assigned role.
"""
PERSONAL = "Personal"
SHARED = "Shared"
class BillingCurrency(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Three lettered code specifying the currency of the VM price. Example: USD
"""
USD = "USD"
class ComputeInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Current state of a ComputeInstance.
"""
CREATING = "Creating"
CREATE_FAILED = "CreateFailed"
DELETING = "Deleting"
RUNNING = "Running"
RESTARTING = "Restarting"
JOB_RUNNING = "JobRunning"
SETTING_UP = "SettingUp"
SETUP_FAILED = "SetupFailed"
STARTING = "Starting"
STOPPED = "Stopped"
STOPPING = "Stopping"
USER_SETTING_UP = "UserSettingUp"
USER_SETUP_FAILED = "UserSetupFailed"
UNKNOWN = "Unknown"
UNUSABLE = "Unusable"
class ComputeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of compute
"""
AKS = "AKS"
AML_COMPUTE = "AmlCompute"
COMPUTE_INSTANCE = "ComputeInstance"
DATA_FACTORY = "DataFactory"
VIRTUAL_MACHINE = "VirtualMachine"
HD_INSIGHT = "HDInsight"
DATABRICKS = "Databricks"
DATA_LAKE_ANALYTICS = "DataLakeAnalytics"
class EncryptionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether or not the encryption is enabled for the workspace.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class NodeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the compute node. Values are idle, running, preparing, unusable, leaving and
preempted.
"""
IDLE = "idle"
RUNNING = "running"
PREPARING = "preparing"
UNUSABLE = "unusable"
LEAVING = "leaving"
PREEMPTED = "preempted"
class OperationName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the last operation.
"""
CREATE = "Create"
START = "Start"
STOP = "Stop"
RESTART = "Restart"
REIMAGE = "Reimage"
DELETE = "Delete"
class OperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operation status.
"""
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
CREATE_FAILED = "CreateFailed"
START_FAILED = "StartFailed"
STOP_FAILED = "StopFailed"
RESTART_FAILED = "RestartFailed"
REIMAGE_FAILED = "ReimageFailed"
DELETE_FAILED = "DeleteFailed"
class PrivateEndpointConnectionProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current provisioning state.
"""
SUCCEEDED = "Succeeded"
CREATING = "Creating"
DELETING = "Deleting"
FAILED = "Failed"
class PrivateEndpointServiceConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The private endpoint connection status.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
TIMEOUT = "Timeout"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current deployment state of workspace resource. The provisioningState is to indicate states
for resource provisioning.
"""
UNKNOWN = "Unknown"
UPDATING = "Updating"
CREATING = "Creating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
class QuotaUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enum describing the unit of quota measurement.
"""
COUNT = "Count"
class ReasonCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reason for the restriction.
"""
NOT_SPECIFIED = "NotSpecified"
NOT_AVAILABLE_FOR_REGION = "NotAvailableForRegion"
NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
class RemoteLoginPortPublicAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh
port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is
open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed
on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be
default only during cluster creation time, after creation it will be either enabled or
disabled.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
NOT_SPECIFIED = "NotSpecified"
class ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
NONE = "None"
class SshPublicAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh
port is closed on this instance. Enabled - Indicates that the public ssh port is open and
accessible according to the VNet/subnet policy if applicable.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class SslConfigurationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enable or disable ssl for scoring
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class Status(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of update workspace quota.
"""
UNDEFINED = "Undefined"
SUCCESS = "Success"
FAILURE = "Failure"
INVALID_QUOTA_BELOW_CLUSTER_MINIMUM = "InvalidQuotaBelowClusterMinimum"
INVALID_QUOTA_EXCEEDS_SUBSCRIPTION_LIMIT = "InvalidQuotaExceedsSubscriptionLimit"
INVALID_VM_FAMILY_NAME = "InvalidVMFamilyName"
OPERATION_NOT_SUPPORTED_FOR_SKU = "OperationNotSupportedForSku"
OPERATION_NOT_ENABLED_FOR_REGION = "OperationNotEnabledForRegion"
class UnderlyingResourceAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DELETE = "Delete"
DETACH = "Detach"
class UnitOfMeasure(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The unit of time measurement for the specified VM price. Example: OneHour
"""
ONE_HOUR = "OneHour"
class UsageUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enum describing the unit of usage measurement.
"""
COUNT = "Count"
class VMPriceOSType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operating system type used by the VM.
"""
LINUX = "Linux"
WINDOWS = "Windows"
class VmPriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Virtual Machine priority
"""
DEDICATED = "Dedicated"
LOW_PRIORITY = "LowPriority"
class VMTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the VM.
"""
STANDARD = "Standard"
LOW_PRIORITY = "LowPriority"
SPOT = "Spot"
| 2.25 | 2 |
utils/cv_utils.py | RishavMishraRM/Rain_Emoji | 6 | 12799224 | import os
import cv2
import numpy as np
def get_emojis():
emojis_folder = 'emoji/'
emojis = []
for emoji in range(len(os.listdir(emojis_folder))):
print(emoji)
emojis.append(cv2.imread(emojis_folder + str(emoji) + '.png', -1))
return emojis[0:len(emojis) - 1]
def overlay(image, emoji, x, y, w, h):
emoji = cv2.resize(emoji, (w, h))
try:
image[y:y + h, x:x + w] = blend_transparent(image[y:y + h, x:x + w], emoji)
except:
pass
return image
def blend_transparent(face_img, overlay_t_img):
# Split out the transparency mask from the colour info
overlay_img = overlay_t_img[:, :, :3] # Grab the BRG planes
overlay_mask = overlay_t_img[:, :, 3:] # And the alpha plane
# Again calculate the inverse mask
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
| 3.421875 | 3 |
config.py | bvezilic/Neural-style-transfer | 3 | 12799225 | <filename>config.py
from pathlib import Path
ROOT_DIR = Path(__file__).parent.resolve()
IMAGE_DIR = ROOT_DIR / 'images'
| 1.828125 | 2 |
server.py | Ryan-Amaral/pi-cluster-vis | 0 | 12799226 | <gh_stars>0
import socket
import json
from _thread import start_new_thread
#from sense_hat import SenseHat
from optparse import OptionParser
import matplotlib.pyplot as plt
import time
parser = OptionParser()
parser.add_option('-i', '--ip', type='string', dest='ip', default='127.0.0.1')
parser.add_option('-p', '--port', type='int', dest='port', default=5005)
parser.add_option('-n', '--numNodes', type='int', dest='numNodes', default=15)
parser.add_option('-u', '--update', type='float', dest='update', default=0.2)
(options, args) = parser.parse_args()
#sense = SenseHat()
#sense.clear()
# temp = sense.get_temperature()
def clientReceiver():
# create server
s = socket.socket()
s.bind((options.ip, options.port))
s.listen(options.numNodes)
# keep main in here to accept connections
while True:
con, _ = s.accept()
start_new_thread(dataStream, (con,))
visDatas = {} # store all data to visualize
# continually stream in data in separate threads
def dataStream(con):
while True:
data = con.recv(1024).decode('utf-8')
if data == '':
break
mDict = json.loads(data)
uid = mDict['uid']
if uid not in visDatas:
visDatas[uid] = {'mem_use':[], 'cpu_use':[]}
visDatas[uid]['mem_use'].append(mDict['mem_use'])
visDatas[uid]['cpu_use'].append(mDict['cpu_use'])
print(mDict)
start_new_thread(clientReceiver, ())
plt.ion() # for live update plot
# plotting stuff
fig = plt.figure()
axRam = plt.subplot(2,1,1)
axCpu = plt.subplot(2,1,2)
# colors of lines
cols = ['C'+str(i%10) for i in range(options.numNodes)]
# styles of lines
lins = ['-']*10 + ['--']*10 + ['-.']*10 # manually update if need more
maxX = 20
while True:
axRam.cla()
axCpu.cla()
for i, uid in enumerate(list(visDatas.keys())):
l = len(visDatas[uid]['mem_use'])
axRam.plot(visDatas[uid]['mem_use'][max(0, l-maxX):l],
color=cols[i], linestyle=lins[i], label=uid)
axCpu.plot(visDatas[uid]['cpu_use'][max(0, l-maxX):l],
color=cols[i], linestyle=lins[i], label=uid)
if len(visDatas[uid]['mem_use']) > maxX:
visDatas[uid]['mem_use'] = visDatas[uid]['mem_use'][len(visDatas[uid]['mem_use'])-20:]
if len(visDatas[uid]['cpu_use']) > maxX:
visDatas[uid]['cpu_use'] = visDatas[uid]['cpu_use'][len(visDatas[uid]['cpu_use'])-20:]
axRam.set_title('RAM Usage of Nodes')
axRam.set_ylabel('RAM (GB)')
axRam.get_xaxis().set_visible(False)
axRam.legend(loc='upper left')
axRam.set_ylim(0,1.05)
axCpu.set_title('CPU Usage of Nodes (4 Cores)')
axCpu.set_ylabel('CPU %')
axCpu.get_xaxis().set_visible(False)
axCpu.legend(loc='upper left')
axCpu.set_ylim(0,105)
plt.draw()
fig.canvas.start_event_loop(options.update)
| 2.59375 | 3 |
FargoNorth.py | jonschull/Lyte | 1 | 12799227 | <filename>FargoNorth.py
import lyte
secret_number = 4
def shift(c, howMuch= 1):
return chr(ord(c) + howMuch)
def encoder(message):
message=list(message)
for i in range( len(message) ):
message[i] = shift( message[i], howMuch= secret_number )
return ''.join(message)
def decoder(message):
message=list(message)
for i in range( len(message) ):
message[i] = shift(message[i], howMuch= - secret_number )
return ''.join(message)
if __name__ == '__main__':
message = 'this is a test'
encoded_message = encoder(message)
lyte.say(f' {message} -->encoded--> {encoded_message} -->decoded--> {decoder(encoded_message)} ')
lyte.webMe()
| 2.625 | 3 |
ngraph/frontends/caffe2/tests/test_ops_unary.py | NervanaSystems/ngraph-python | 18 | 12799228 | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function
from caffe2.python import core, workspace
from ngraph.frontends.caffe2.c2_importer.importer import C2Importer
from ngraph.testing import ExecutorFactory
import numpy as np
import random as random
def run_all_close_compare_initiated_with_random_gauss(c2_op_name,
shape=None,
data=None,
expected=None):
workspace.ResetWorkspace()
if not shape:
shape = [2, 7]
if not data:
data = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
net = core.Net("net")
net.GivenTensorFill([], "X", shape=shape, values=data, name="X")
getattr(net, c2_op_name)(["X"], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
c2_y = workspace.FetchBlob("Y")
# compare Caffe2 and ngraph results
assert(np.allclose(f_result, c2_y, atol=1e-4, rtol=0, equal_nan=False))
# compare expected results and ngraph results
if expected:
assert(np.allclose(f_result, expected, atol=1e-3, rtol=0, equal_nan=False))
def test_relu():
run_all_close_compare_initiated_with_random_gauss('Relu',
shape=[10, 10])
def test_softmax():
shape = [2, 7]
data = [
1., 2., 3., 4., 1., 2., 3.,
1., 2., 3., 4., 1., 2., 3.
]
expected = [
[0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175],
[0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175],
]
run_all_close_compare_initiated_with_random_gauss('Softmax',
shape=shape,
data=data,
expected=expected)
def test_negative():
run_all_close_compare_initiated_with_random_gauss('Negative')
def test_sigmoid():
run_all_close_compare_initiated_with_random_gauss('Sigmoid')
def test_tanh():
run_all_close_compare_initiated_with_random_gauss('Tanh')
def test_exp():
workspace.ResetWorkspace()
shape = [2, 7]
data = [
1., 2., 3., 4., 1., 2., 3.,
1., 2., 3., 4., 1., 2., 3.
]
expected = [
[2.71828, 7.3890, 20.08553, 54.59815, 2.71828, 7.3890, 20.08553],
[2.71828, 7.3890, 20.08553, 54.59815, 2.71828, 7.3890, 20.08553],
]
run_all_close_compare_initiated_with_random_gauss('Exp',
shape=shape,
data=data,
expected=expected)
def test_NCHW2NHWC():
workspace.ResetWorkspace()
# NCHW
shape = [2, 3, 4, 5]
data1 = [float(i) for i in range(np.prod(shape))]
net = core.Net("net")
X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
X.NCHW2NHWC([], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.array_equal(f_result, workspace.FetchBlob("Y")))
def test_NHWC2NCHW():
workspace.ResetWorkspace()
# NHWC
shape = [2, 3, 4, 5]
data1 = [float(i) for i in range(np.prod(shape))]
net = core.Net("net")
X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
X.NCHW2NHWC([], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.array_equal(f_result, workspace.FetchBlob("Y")))
| 2.0625 | 2 |
analysis/froude.py | SalishSeaCast/2d-domain | 0 | 12799229 | # This is a module with functions that can be used to calculate the Froude
# number in a simple 2D system
# <NAME>, 2015
import numpy as np
import datetime
from salishsea_tools.nowcast import analyze
def find_mixed_depth_indices(n2, n2_thres=5e-6):
"""Finds the index of the mixed layer depth for each x-position.
The mixed layer depth is chosen based on the lowest near-surface vertical
grid cell where n2 >= n2_thres
A resaonable value for n2_thres is 5e-6.
If n2_thres = 'None' then the index of the maximum n2 is returned.
n2 is the masked array of buoyancy frequencies with dimensions (depth, x)
returns a list of indices of mixed layer depth cell for each x-position
"""
if n2_thres == 'None':
dinds = np.argmax(n2, axis=0)
else:
dinds = []
for ii in np.arange(n2.shape[-1]):
inds = np.where(n2[:, ii] >= n2_thres)
# exlclude first vertical index less <=1 because the
# buoyancy frequency is hard to define there
if inds[0].size:
inds = filter(lambda x: x > 1, inds[0])
if inds:
dinds.append(min(inds))
else:
dinds.append(0) # if no mixed layer depth found, set to 0
else:
dinds.append(0) # if no mixed layer depth found, set it to 0
return dinds
def average_mixed_layer_depth(mixed_depths, xmin, xmax):
"""Averages the mixed layer depths over indices xmin and xmax
mixed_depths is a 1d array of mixed layer depths
returns the mean mixed layer depth in the defined region
"""
mean_md = np.mean(mixed_depths[xmin:xmax+1])
return mean_md
def mld_time_series(n2, deps, times, time_origin,
xmin=300, xmax=700, n2_thres=5e-6):
"""Calculates the mean mixed layer depth in a region defined by
xmin and xmax over time
n2 is the buoyancy frequency array with dimensions (time, depth, x)
deps is the model depth array
times is the model time_counter array
time_origin is the model's time_origin as a datetime
returns a list of mixed layer depths mlds and dates
"""
mlds = []
dates = []
for t in np.arange(n2.shape[0]):
dinds = find_mixed_depth_indices(n2[t, ...], n2_thres=n2_thres)
mld = average_mixed_layer_depth(deps[dinds], xmin, xmax,)
mlds.append(mld)
dates.append(time_origin + datetime.timedelta(seconds=times[t]))
return mlds, dates
def calculate_density(t, s):
"""Caluclates the density given temperature in deg C (t)
and salinity in psu (s).
returns the density as an array (rho)
"""
rho = (
999.842594 + 6.793952e-2 * t
- 9.095290e-3 * t*t + 1.001685e-4 * t*t*t
- 1.120083e-6 * t*t*t*t + 6.536332e-9 * t*t*t*t*t
+ 8.24493e-1 * s - 4.0899e-3 * t*s
+ 7.6438e-5 * t*t*s - 8.2467e-7 * t*t*t*s
+ 5.3875e-9 * t*t*t*t*s - 5.72466e-3 * s**1.5
+ 1.0227e-4 * t*s**1.5 - 1.6546e-6 * t*t*s**1.5
+ 4.8314e-4 * s*s
)
return rho
def calculate_internal_wave_speed(rho, deps, dinds):
"""Calculates the internal wave speed
c = sqrt(g*(rho2-rho1)/rho2*h1)
where g is acceleration due to gravity, rho2 is denisty of lower layer,
rho1 is density of upper layer and h1 is thickness of upper layer.
rho is the model density (shape is depth, x), deps is the array of depths
and dinds is a list of indices that define the mixed layer depth.
rho must be a masked array
returns c, an array of internal wave speeds at each x-index in rho
"""
# acceleration due to gravity (m/s^2)
g = 9.81
# calculate average density in upper and lower layers
rho_1 = np.zeros((rho.shape[-1]))
rho_2 = np.zeros((rho.shape[-1]))
for ind, d in enumerate(dinds):
rho_1[ind] = analyze.depth_average(rho[0:d+1, ind],
deps[0:d+1], depth_axis=0)
rho_2[ind] = analyze.depth_average(rho[d+1:, ind],
deps[d+1:], depth_axis=0)
# calculate mixed layer depth
h_1 = deps[dinds]
# calcuate wave speed
c = np.sqrt(g*(rho_2-rho_1)/rho_2*h_1)
return c
def depth_averaged_current(u, deps):
"""Calculates the depth averaged current
u is the array with current speeds (shape is depth, x).
u must be a masked array
deps is the array of depths
returns u_avg, the depths averaged current (shape x)
"""
u_avg = analyze.depth_average(u, deps, depth_axis=0)
return u_avg
def calculate_froude_number(n2, rho, u, deps, depsU, n2_thres=5e-6):
"""Calculates the Froude number
n2, rho, u are buoyancy frequency, density and current arrays
(shape depth, x)
deps is the depth array
depsU is the depth array at U poinnts
returns: Fr, c, u_avg - the Froude number, wave speed, and depth averaged
velocity for each x-index
"""
# calculate mixed layers
dinds = find_mixed_depth_indices(n2, n2_thres=n2_thres)
# calculate internal wave speed
c = calculate_internal_wave_speed(rho, deps, dinds)
# calculate depth averaged currents
u_avg = depth_averaged_current(u, depsU)
# Froude numer
Fr = np.abs(u_avg)/c
return Fr, c, u_avg
def froude_time_series(n2, rho, u, deps, depsU, times, time_origin,
xmin=300, xmax=700, n2_thres=5e-6):
"""Calculates the Froude number time series
n2, rho, u are buoyancy frequency, density and current arrays
(shape time, depth, x)
deps is the model depth array
depsU is the model deps array at U points
times is the model time_counter array
time_origin is the mode's time_origin as a datetime
xmin,xmax define the averaging area
returns: Frs, cs, u_avgs, dates
the Froude number, internal wave speed, and depth averaged current
for each time associated with dates
"""
Frs = []
cs = []
u_avgs = []
dates = []
for t in np.arange(n2.shape[0]):
Fr, c, u_avg = calculate_froude_number(n2[t, ...], rho[t, ...],
u[t, ...], deps, depsU,
n2_thres=n2_thres)
Frs.append(np.mean(Fr[xmin:xmax+1]))
cs.append(np.mean(c[xmin:xmax+1]))
u_avgs.append(np.mean(u_avg[xmin:xmax+1]))
dates.append(time_origin + datetime.timedelta(seconds=times[t]))
return Frs, cs, u_avgs, dates
def calculate_buoyancy_frequency(temp, sal, e3, depth_axis=1):
""" Calculate the squared buoyancy frequency (n2) given temperature and
salinity profiles. N2 is set to g*drho/dz/rho. Note that NEMO uses a defini tion based on an question of state: g* (alpha dk[T] + beta dk[S] ) / e3w
temp and sal are the temperature and salinity arrays
e3 is an array of the vertical scale factors (grid spacing). Use e3w for
constistency with NEMO.
depth_axis defines the axis which corresponds to depth in the temp/sal
arrays
returns n2, an array of square buoyancy frequency at each point in temp/sal.
"""
# acceleration due to gravity
g = 9.80665
# First calculate density.
rho = calculate_density(temp, sal)
# Density gradient
drho = np.zeros(rho.shape)
# roll depth axis in rho and drho to first axis
# assume e3 already has depth axis in first axis
drho_r = np.rollaxis(drho, depth_axis)
rho_r = np.rollaxis(rho, depth_axis)
for k in np.arange(1, drho.shape[depth_axis]-1):
drho_r[k, ...] = 1/e3[k, ...]*(rho_r[k+1, ...] - rho_r[k, ...])
# Unroll drho
drho = np.rollaxis(drho_r, 0, depth_axis+1)
rho = np.rollaxis(rho_r, 0, depth_axis+1)
# Define N2
n2 = g*drho/rho # no negative because depth increases with increasking k
return n2
| 2.6875 | 3 |
train/openem_train/ssd/ssd_training.py | bryan-flywire/openem | 10 | 12799230 | __copyright__ = "Copyright (C) 2018 CVision AI."
__license__ = "GPLv3"
# This file is part of OpenEM, released under GPLv3.
# OpenEM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenEM. If not, see <http://www.gnu.org/licenses/>.
"""SSD training utils.
"""
import tensorflow as tf
def _l1_smooth_loss(y_true, y_pred):
"""Compute L1-smooth loss.
# Arguments
y_true: Ground truth bounding boxes,
tensor of shape (?, num_boxes, 4).
y_pred: Predicted bounding boxes,
tensor of shape (?, num_boxes, 4).
# Returns
l1_loss: L1-smooth loss, tensor of shape (?, num_boxes).
# References
https://arxiv.org/abs/1504.08083
"""
abs_loss = tf.abs(y_true - y_pred)
sq_loss = 0.5 * (y_true - y_pred)**2
l1_loss = tf.where(tf.less(abs_loss, 1.0), sq_loss, abs_loss - 0.5)
return tf.reduce_sum(l1_loss, -1)
def _softmax_loss(y_true, y_pred):
"""Compute softmax loss.
# Arguments
y_true: Ground truth targets,
tensor of shape (?, num_boxes, num_classes).
y_pred: Predicted logits,
tensor of shape (?, num_boxes, num_classes).
# Returns
softmax_loss: Softmax loss, tensor of shape (?, num_boxes).
"""
y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15)
softmax_loss = -tf.reduce_sum(y_true * tf.log(y_pred),
axis=-1)
return softmax_loss
class MultiboxLoss:
"""Multibox loss with some helper functions.
# Arguments
num_classes: Number of classes including background.
alpha: Weight of L1-smooth loss.
neg_pos_ratio: Max ratio of negative to positive boxes in loss.
background_label_id: Id of background label.
negatives_for_hard: Number of negative boxes to consider
it there is no positive boxes in batch.
# References
https://arxiv.org/abs/1512.02325
"""
def __init__(self, num_classes, alpha=1.0, neg_pos_ratio=3.0,
background_label_id=0, negatives_for_hard=100.0,
pos_cost_multiplier=1.0):
self.pos_cost_multiplier = pos_cost_multiplier
self.num_classes = num_classes
self.alpha = alpha
self.neg_pos_ratio = neg_pos_ratio
if background_label_id != 0:
raise Exception('Only 0 as background label id is supported')
self.background_label_id = background_label_id
self.negatives_for_hard = negatives_for_hard
def compute_loss(self, y_true, y_pred):
"""Compute mutlibox loss.
# Arguments
y_true: Ground truth targets,
tensor of shape (?, num_boxes, 4 + num_classes + 8),
priors in ground truth are fictitious,
y_true[:, :, -8] has 1 if prior should be penalized
or in other words is assigned to some ground truth box,
y_true[:, :, -7:] are all 0.
y_pred: Predicted logits,
tensor of shape (?, num_boxes, 4 + num_classes + 8).
# Returns
loss: Loss for prediction, tensor of shape (?,).
"""
batch_size = tf.shape(y_true)[0]
num_boxes = tf.to_float(tf.shape(y_true)[1])
# loss for all priors
conf_loss = _softmax_loss(y_true[:, :, 4:-8],
y_pred[:, :, 4:-8])
loc_loss = _l1_smooth_loss(y_true[:, :, :4],
y_pred[:, :, :4])
# get positives loss
num_pos = tf.reduce_sum(y_true[:, :, -8], axis=-1)
pos_loc_loss = tf.reduce_sum(loc_loss * y_true[:, :, -8],
axis=1)
pos_conf_loss = tf.reduce_sum(conf_loss * y_true[:, :, -8],
axis=1)
# get negatives loss, we penalize only confidence here
num_neg = tf.minimum(self.neg_pos_ratio * num_pos,
num_boxes - num_pos)
pos_num_neg_mask = tf.greater(num_neg, 0)
has_min = tf.to_float(tf.reduce_any(pos_num_neg_mask))
num_neg = tf.concat(
axis=0,
values=[num_neg, [(1 - has_min) * self.negatives_for_hard]])
num_neg_batch = tf.reduce_min(tf.boolean_mask(num_neg,
tf.greater(num_neg, 0)))
num_neg_batch = tf.to_int32(num_neg_batch)
confs_start = 4 + self.background_label_id + 1
confs_end = confs_start + self.num_classes - 1
max_confs = tf.reduce_max(y_pred[:, :, confs_start:confs_end],
axis=2)
_, indices = tf.nn.top_k(max_confs * (1 - y_true[:, :, -8]),
k=num_neg_batch)
batch_idx = tf.expand_dims(tf.range(0, batch_size), 1)
batch_idx = tf.tile(batch_idx, (1, num_neg_batch))
full_indices = (tf.reshape(batch_idx, [-1]) * tf.to_int32(num_boxes) +
tf.reshape(indices, [-1]))
# full_indices = tf.concat(2, [tf.expand_dims(batch_idx, 2),
# tf.expand_dims(indices, 2)])
# neg_conf_loss = tf.gather_nd(conf_loss, full_indices)
neg_conf_loss = tf.gather(tf.reshape(conf_loss, [-1]),
full_indices)
neg_conf_loss = tf.reshape(neg_conf_loss,
[batch_size, num_neg_batch])
neg_conf_loss = tf.reduce_sum(neg_conf_loss, axis=1)
# loss is sum of positives and negatives
total_loss = pos_conf_loss * self.pos_cost_multiplier + neg_conf_loss
total_loss /= (num_pos + tf.to_float(num_neg_batch))
num_pos = tf.where(tf.not_equal(num_pos, 0), num_pos,
tf.ones_like(num_pos))
total_loss += (self.alpha * pos_loc_loss) / num_pos
return total_loss
| 1.953125 | 2 |
activerest/connections.py | datashaman/activerest | 0 | 12799231 | import activerest.formats.json_format
import requests
from furl import furl
HTTP_FORMAT_HEADER_NAMES = {
'GET': 'Accept',
'PUT': 'Content-Type',
'POST': 'Content-Type',
'PATCH': 'Content-Type',
'DELETE': 'Accept',
'HEAD': 'Accept',
}
class Connection(object):
_site = None
_format = None
_auth_type = 'basic'
username = None
password = <PASSWORD>
_timeout = None
_open_timeout = None
_read_timeout = None
_default_header = None
proxies = None
requests = []
def __init__(self, site, format=activerest.formats.json_format):
self.site = site
self.format = format
@property
def site(self):
return self._site
@site.setter
def site(self, site):
if isinstance(self._site, furl):
self._site = site
else:
self._site = furl(site)
if self._site.username:
self.username = self._site.username
if self._site.password:
self.password = <PASSWORD>
@property
def auth_type(self):
return self._auth_type
@auth_type.setter
def auth_type(self, auth_type):
if auth_type in ['basic', 'digest']:
self._auth_type = auth_type
else:
raise ValueError("auth_type must be 'basic' or 'digest'")
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, timeout):
if isinstance(timeout, (float, int, tuple)):
self._timeout = timeout
else:
raise ValueError('timeout must be an instance of float, int or tuple')
@property
def open_timeout(self):
return self._open_timeout
@open_timeout.setter
def open_timeout(self, open_timeout):
if isinstance(open_timeout, (float, int)):
self._open_timeout = open_timeout
else:
raise ValueError('open_timeout must be an instance of float or int')
@property
def read_timeout(self):
return self._read_timeout
@read_timeout.setter
def read_timeout(self, read_timeout):
if isinstance(read_timeout, (float, int)):
self._read_timeout = read_timeout
else:
raise ValueError('read_timeout must be an instance of float or int')
def get(self, path, **kwargs):
return self._request('GET', path, **kwargs)
def delete(self, path, **kwargs):
return self._request('DELETE', path, **kwargs)
def patch(self, path, **kwargs):
return self._request('PATCH', path, **kwargs)
def put(self, path, **kwargs):
return self._request('PUT', path, **kwargs)
def post(self, path, **kwargs):
return self._request('POST', path, **kwargs)
def head(self, path, **kwargs):
return self._request('HEAD', path, **kwargs)
def _request(self, method, path, **kwargs):
kwargs['headers'] = self.build_request_headers(kwargs.get('headers', {}), method)
if self.username and self.password:
if self._auth_type == 'basic':
auth_class = requests.auth.HTTPBasicAuth
if self._auth_type == 'digest':
auth_class = requests.auth.HTTPDigestAuth
kwargs['auth'] = auth_class(self.username, self.password)
if self.proxies:
kwargs['proxies'] = self.proxies
open_timeout = read_timeout = None
if self._timeout is not None:
if isinstance(self._timeout, tuple):
(open_timeout, read_timeout) = self._timeout
else:
open_timeout = read_timeout = self._timeout
if self._open_timeout is not None:
open_timeout = self._open_timeout
if self._read_timeout is not None:
read_timeout = self._read_timeout
if open_timeout or read_timeout:
kwargs['timeout'] = (open_timeout, read_timeout)
url = furl().set(scheme=self._site.scheme,
host=self._site.host,
port=self._site.port,
path=path)
response = requests.request(method, url, **kwargs)
return response
@property
def default_header(self):
if self._default_header:
return self._default_header
self._default_header = {}
return self._default_header
@default_header.setter
def default_header(self, default_header):
self._default_header = default_header
def build_request_headers(self, headers, method):
result = {}
result.update(self.default_header)
result.update(self.http_format_header(method))
result.update(headers)
return result
def http_format_header(self, method):
return {
HTTP_FORMAT_HEADER_NAMES[method]: self.format.mime_type(),
}
| 2.359375 | 2 |
src/ExampleNets/pythonScripts/eabLatVolData.py | benjaminlarson/SCIRunGUIPrototype | 0 | 12799232 | latvolMod = addModule("CreateLatVol")
size = 10
latvolMod.XSize = size
latvolMod.YSize = size
latvolMod.ZSize = size
latvolMod.DataAtLocation = "Nodes"
report1 = addModule("ReportFieldInfo")
latvolMod.output[0] >> report1.input[0]
data = {}
addDataMod = addModule("CreateScalarFieldDataBasic")
#eval.Operator = 2
#eval.Scalar = i
report2 = addModule("ReportFieldInfo")
addDataMod.output[0] >> report2.input[0]
show = addModule("ShowField")
latvolMod.output[0] >> addDataMod.input[0]
addDataMod.output[0] >> show.input.Field
view = addModule("ViewScene")
show.output[0] >> view.input[0]
view.showUI()
executeAll()
removeModule(view.id)
view = addModule("ViewScene")
show.output[0] >> view.input[0]
view.showUI()
executeAll()
#executeAll()
#executeAll() | 1.84375 | 2 |
tests/conftest.py | showtatsu/python-bind9zone | 0 | 12799233 | <reponame>showtatsu/python-bind9zone
import pytest, os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from bind9zone.cli import Bind9ZoneCLI
ZONEDIR_SRC = 'tests/input'
ZONEDIR = 'tests/output'
def get_connection_fixture_params():
if os.getenv('TEST_POSTGRES'):
return ['sqlite:///tests/db.sqlite3',
'postgresql://postgres:postgres@db/database']
else:
return ['sqlite:///tests/db.sqlite3']
@pytest.fixture()
def zonedir_src():
return ZONEDIR_SRC
@pytest.fixture()
def zonedir():
return ZONEDIR
@pytest.fixture(scope='module', params=get_connection_fixture_params())
def connection(request):
""" pytest対象モジュールの引数に"connection"を指定すると、
このfixtureが実行され、データベースの初期化を行った上でconnection文字列を返します。
1つのモジュール内(pyファイル)から複数回使用された場合でも、データベースの初期化処理が
行われるのは各モジュールあたり最初の一回だけです。
"""
connection = request.param
con = ['--connection', connection]
Bind9ZoneCLI(['init', *con, '--drop']).run()
Bind9ZoneCLI(['bulkpush', *con,
'--dir', ZONEDIR_SRC,
'--zones', 'public/example.com,private/example.com'
]).run()
return connection
@pytest.fixture(scope='function')
def session_factory(connection):
""" pytest対象モジュールの引数に"session_factory"を指定すると、
このfixtureが実行され、データベースの初期化を行った上でSQLAlchemyのscoped_sessionを返します。
1つのモジュール内(pyファイル)から複数回使用された場合でも、データベースの初期化処理が行われるのは
各モジュールあたり最初の一回だけです。
"""
engine = create_engine(connection)
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
return Session
| 2.046875 | 2 |
run_all_tests.py | ricdezen/random-algo-stuff | 0 | 12799234 | import os
from setuptools import find_packages
if __name__ == '__main__':
for package in find_packages():
if '.test' in package:
continue
os.system(f'cmd /c "python -m pytest -s {package}/test"')
| 1.632813 | 2 |
config_test/path_test.py | saustinp/3D-CG | 0 | 12799235 | import logging.config
import logging.handlers
logger = logging.getLogger()
logger.setLevel(logging.INFO)
smtp_handler = logging.handlers.SMTPHandler(mailhost=('outgoing.mit.edu', 465),
fromaddr='<EMAIL>',
toaddrs=['<EMAIL>'],
subject='Sample Log Mail',
credentials=('austinsp','<PASSWORD>'),
secure=None)
logger.addHandler(smtp_handler)
logger.info("logger configured") | 2.234375 | 2 |
naucse/freezer.py | OndSeb/naucse.python.cz | 0 | 12799236 | <reponame>OndSeb/naucse.python.cz<filename>naucse/freezer.py<gh_stars>0
import contextlib
from collections import deque
from flask import current_app
from flask_frozen import UrlForLogger, Freezer
def record_url(url):
"""Logs that `url` should be included in the resulting static site"""
urls_to_freeze = current_app.config.get('NAUCSE_ABSOLUTE_URLS_TO_FREEZE')
if urls_to_freeze is not None:
urls_to_freeze.append(url)
class AllLinksLogger(UrlForLogger):
"""Logs ``url_for`` calls, but yields urls from ``absolute_urls_to_freeze`` as well.
"""
def __init__(self, app, urls_to_freeze):
super().__init__(app)
self.naucse_urls_to_freeze = urls_to_freeze
def iter_calls(self):
"""Yield all logged urls and links parsed from content.
"""
# Unfortunately, ``yield from`` cannot be used as the queues are
# modified on the go.
while self.logged_calls or self.naucse_urls_to_freeze:
while self.logged_calls:
yield self.logged_calls.popleft()
# Prefer URLs from logged_calls - ideally, cache is populated
# from the base repository.
# That means we only yield from urls_to_freeze
# if there are no logged_calls.
if self.naucse_urls_to_freeze:
yield self.naucse_urls_to_freeze.popleft()
@contextlib.contextmanager
def temporary_url_for_logger(app):
"""Context manager which temporary adds a new UrlForLogger to the app.
The logger is yielded as the context object, so it can be used
to get logged calls.
"""
logger = UrlForLogger(app)
yield logger
# reverses the following operating from :class:`UrlForLogger`
# self.app.url_default_functions.setdefault(None, []).insert(0, logger)
app.url_default_functions[None].pop(0)
class NaucseFreezer(Freezer):
def __init__(self, app):
super().__init__(app)
urls_to_freeze = deque()
with app.app_context():
app.config['NAUCSE_ABSOLUTE_URLS_TO_FREEZE'] = urls_to_freeze
# override the default url_for_logger with our modified version
self.url_for_logger = AllLinksLogger(app, urls_to_freeze)
| 2.4375 | 2 |
maxflow/push_relabel.py | JovanCe/mfp | 0 | 12799237 | <reponame>JovanCe/mfp<gh_stars>0
__author__ = '<NAME> <<EMAIL>>'
__date__ = '30 August 2015'
__copyright__ = 'Copyright (c) 2015 Seven Bridges Genomics'
from collections import defaultdict
class PushRelabel(object):
def __init__(self, flow_network):
self.flow_network = flow_network
self.height = {}
self.excess = {}
self._init_node_neighbour_lists()
self.current_neighbhours = {k: 0 for k in flow_network.node_set}
def _init_node_neighbour_lists(self):
all_neighbours = defaultdict(set)
for n1, n2 in self.flow_network.nodes:
all_neighbours[n1].add(n2)
all_neighbours[n2].add(n1)
self.all_neighbours = {k: list(v) for k, v in all_neighbours.items()}
def _push(self, n1, n2):
residual = self.flow_network.residual
cf = residual.get_arc_capacity(n1, n2)
if cf <= 0 or self.height[n1] != self.height[n2] + 1:
return False
delta_flow = min(self.excess[n1], cf)
try:
self.flow_network.increase_flow(n1, n2, delta_flow)
except KeyError:
self.flow_network.decrease_flow(n2, n1, delta_flow)
self.excess[n1] -= delta_flow
self.excess[n2] += delta_flow
return True
def _relabel(self, n):
residual = self.flow_network.residual
neighbours = residual.get_node_neighbours(n)
min_neighbour_height = float('inf')
for neighbour in neighbours:
n_height = self.height[neighbour]
if n_height < min_neighbour_height and residual.get_arc_capacity(n, neighbour) > 0:
min_neighbour_height = n_height
if self.height[n] > n_height:
return False
self.height[n] = 1 + min_neighbour_height
return True
def _init_preflow(self):
excess = {k: 0 for k in self.flow_network.node_set}
height = {k: 0 for k in self.flow_network.node_set}
self.flow_network.reset()
s = self.flow_network.source
height[s] = self.flow_network.total_nodes
for n in self.flow_network.get_node_neighbours(s):
c = self.flow_network.get_arc_capacity(s, n)
self.flow_network.set_flow(s, n, c)
excess[n] = c
excess[s] -= c
self.excess = excess
self.height = height
def _get_overflowing_node(self):
for n, f in self.excess.items():
if f > 0 and n != self.flow_network.source and n != self.flow_network.sink:
return n
def generic_push_relabel(self):
self._init_preflow()
node = self._get_overflowing_node()
while node is not None:
res = False
for neighbour in self.flow_network.residual.get_node_neighbours(node):
res = self._push(node, neighbour)
if res:
break
if not res:
self._relabel(node)
node = self._get_overflowing_node()
return self.flow_network.get_current_flows()
def _discharge(self, n):
i = self.current_neighbhours[n]
neighbour_list = self.all_neighbours[n]
while self.excess[n] > 0:
try:
neighbour = neighbour_list[i]
success = self._push(n, neighbour)
i += 1
except IndexError:
self._relabel(n)
i = 0
self.current_neighbhours[n] = i
def relabel_to_front(self):
self._init_preflow()
node_list = list(self.flow_network.node_set - {self.flow_network.source, self.flow_network.sink})
i = 0
while True:
try:
n = node_list[i]
old_height = self.height[n]
self._discharge(n)
if self.height[n] > old_height:
node_list.pop(i)
node_list.insert(0, n)
i = 0
i += 1
except IndexError:
break
return self.flow_network.get_current_flows()
def generic_push_relabel(flow_network):
return PushRelabel(flow_network).generic_push_relabel()
def relabel_to_front(flow_network):
return PushRelabel(flow_network).relabel_to_front()
| 2.234375 | 2 |
utils/error_operate.py | game-platform-awaresome/XSdkTools | 2 | 12799238 | <reponame>game-platform-awaresome/XSdkTools<gh_stars>1-10
#Embedded file name: D:/AnySDK_Package/Env/debug/../script\error_operate.py
# from taskManagerModule import taskManager
import thread
import threading
import file_operate
# import core
def error(code):
return
# idChannel = int(threading.currentThread().getName())
# taskManager.shareInstance().notify(idChannel, 100 + code)
# file_operate.printf('%s Failed at code %s!' % (idChannel, -100 - code))
| 1.914063 | 2 |
mega_analysis/crosstab/lobe_top_level_hierarchy_only.py | thenineteen/Semiology-Visualisation-Tool | 10 | 12799239 | <reponame>thenineteen/Semiology-Visualisation-Tool<filename>mega_analysis/crosstab/lobe_top_level_hierarchy_only.py
import numpy as np
import pandas as pd
from mega_analysis.crosstab.all_localisations import all_localisations
# list of all excel localisations
all_localisations = all_localisations()
# list of top level localisations we want to keep
def top_level_lobes(Bayesian=False):
Lobes = ['TL', 'FL', 'CING', 'PL', 'OL', 'INSULA',
'Hypothalamus', 'Sub-Callosal Cortex', 'Cerebellum', 'Perisylvian',
'FT', 'TO', 'TP', 'FTP', 'TPO Junction',
'PO', 'FP']
if Bayesian:
redistributed = ['FT', 'FTP', 'PO', 'Perisylvian', 'FP', 'Sub-Callosal Cortex', 'TO', 'TPO Junction', 'TP']
redistributed.append('Cerebellum')
Lobes = [i for i in Lobes if i not in redistributed]
return Lobes
major_localisations = top_level_lobes()
# list of localisations to drop
minor_locs = [
loc for loc in all_localisations if loc not in major_localisations]
def drop_minor_localisations(df):
df_temp = df.drop(columns=minor_locs, inplace=False, errors='ignore')
return df_temp
| 2.328125 | 2 |
tests/test_1.py | BroadAxeC3/deidre | 1 | 12799240 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import unittest
class ApiTests(unittest.TestCase):
pass
# @Mocker()
# def test_timeout_exception(self, m):
# # given
# m._adapter = Co2ApiTimeoutAdapter(m._adapter)
# m.register_uri(ANY, ANY, text=self.hanging_callback)
# client = ApiClient(adapter=Co2ApiTimeoutAdapter(), timeout=10)
#
# # when/then
# with self.assertRaises(ApiException):
# client.retrieve('GET', f'{BASE_URI}/foobar')
| 2.625 | 3 |
g2ana/plugins/ObsLog.py | naojsoft/g2ana | 0 | 12799241 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""Observation Log plugin.
**Plugin Type: Global**
``ObsLog`` is a global plugin. Only one instance can be opened.
**Usage**
***Saving the log to a file***
Put in values for the Observation Log folder and filename. The format
of the file saved will depend on the file extension of the filename;
use the type selector combobox to pick the right extension:
* csv:
* xlsx: MS Excel file format
The file is rewritten out every time a new entry is added to the log
***Adding a memo to one or more log entries***
Write a memo in the memo box. Select one or more frames to add the memo
to and press the "Add Memo" button. Multiple selection follows the usual
rules about holding down CTRL and/or SHIFT keys.
***Displaying an image***
Double-click on a log entry.
"""
import os
from collections import OrderedDict
from ginga import GingaPlugin, AstroImage
from ginga.gw import Widgets
__all__ = ['ObsLog']
class ObsLog(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
super(ObsLog, self).__init__(fv)
self.chname = None
self.file_prefixes = []
# columns to be shown in the table
columns = [("Obs Mod", 'OBS-MOD'),
("Datatype", 'DATA-TYP'),
("FrameID", 'FRAMEID'),
("Object", 'OBJECT'),
("UT", 'UT'),
("PropId", 'PROP-ID'),
("Exp Time", 'EXPTIME'),
("Air Mass", 'AIRMASS'),
#("Pos Ang", 'INST-PA'),
#("Ins Rot", 'INSROT'),
#("Foc Val", 'FOC-VAL'),
#("Filter01", 'FILTER01'),
#("Filter02", 'FILTER02'),
#("Filter03", 'FILTER03'),
("RA", 'RA'),
("DEC", 'DEC'),
("EQUINOX", 'EQUINOX'),
("Memo", 'G_MEMO'),
]
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_ObsLog')
self.settings.add_defaults(sortable=True,
color_alternate_rows=True,
#max_rows_for_col_resize=5000,
report_columns=columns,
cache_normalized_images=True)
self.settings.load(onError='silent')
self.rpt_dict = OrderedDict({})
self.rpt_columns = []
self.fv.add_callback('add-image', self.incoming_data_cb)
self.gui_up = False
def build_gui(self, container):
vbox = Widgets.VBox()
vbox.set_border_width(1)
vbox.set_spacing(1)
tv = Widgets.TreeView(sortable=self.settings.get('sortable'),
use_alt_row_color=self.settings.get('color_alternate_rows'),
selection='multiple')
self.w.rpt_tbl = tv
vbox.add_widget(tv, stretch=1)
tv.add_callback('activated', self.dblclick_cb)
tv.add_callback('selected', self.select_cb)
self.rpt_columns = self.settings.get('report_columns')
tv.setup_table(self.rpt_columns, 1, 'FRAMEID')
captions = (("Memo:", 'label', "memo", 'entry', 'Add Memo', 'button'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
self.w.update(b)
vbox.add_widget(w, stretch=0)
b.memo.set_tooltip('Set memo for selected frames')
b.add_memo.add_callback('activated', self.add_memo_cb)
b.add_memo.set_enabled(False)
captions = (("Folder:", 'label', "obslog_dir", 'entry',
"Name:", 'label', "obslog_name", 'entryset',
"Type", 'combobox', "Load", 'button'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
self.w.update(b)
vbox.add_widget(w, stretch=0)
obs_log = self.settings.get('obslog_name', None)
if obs_log is None:
obs_log = ''
b.obslog_name.set_text(obs_log)
b.obslog_name.set_tooltip('File name for observation log')
b.obslog_name.add_callback('activated', self.write_obslog_cb)
b.obslog_dir.set_text("/tmp")
b.obslog_dir.set_tooltip('Folder path for observation log')
b.obslog_dir.add_callback('activated', self.write_obslog_cb)
b.type.insert_alpha("csv")
b.type.insert_alpha("xlsx")
b.type.set_tooltip("Format for saving/loading ObsLog")
b.type.add_callback('activated', self.set_obslog_format_cb)
b.load.set_tooltip("Load a saved ObsLog")
b.load.add_callback('activated', self.load_obslog_cb)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btn.set_enabled(False)
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns, stretch=0)
container.add_widget(vbox, stretch=1)
self.gui_up = True
def replace_kwds(self, header):
"""Subclass this method to do munge the data for special reports."""
d = dict()
d.update(header)
return d
def add_to_obslog(self, header, image):
frameid = header['FRAMEID']
# replace some kwds as needed in the table
d = self.replace_kwds(header.asdict())
# Hack to insure that we get the columns in the desired order
d = OrderedDict([(kwd, d.get(kwd, ''))
for col, kwd in self.rpt_columns])
self.rpt_dict[frameid] = d
self.update_obslog()
def stop(self):
self.gui_up = False
def process_image(self, chname, header, image):
"""Override this method to do something special with the data."""
pass
def incoming_data_cb(self, fv, chname, image, info):
if chname != self.chname:
return
imname = image.get('name', None)
if imname is None:
return
# only accepted list of frames
accepted = False
for prefix in self.file_prefixes:
if imname.startswith(prefix):
accepted = True
break
if not accepted:
return
header = image.get_header()
# add image to obslog
self.fv.gui_do(self.add_to_obslog, header, image)
try:
self.process_image(chname, header, image)
except Exception as e:
self.logger.error("Failed to process image: {}".format(e),
exc_info=True)
def update_obslog(self):
if not self.gui_up:
return
self.w.rpt_tbl.set_tree(self.rpt_dict)
obslog_name = self.w.obslog_name.get_text().strip()
if len(obslog_name) > 0:
obslog_path = os.path.join(self.w.obslog_dir.get_text().strip(),
obslog_name)
self.write_obslog(obslog_path)
def write_obslog(self, filepath):
if len(self.rpt_dict) == 0:
return
try:
import pandas as pd
except ImportError:
self.fv.show_error("Please install 'pandas' and "
"'openpyxl' to use this feature")
return
try:
self.logger.info("writing obslog: {}".format(filepath))
col_hdr = [colname for colname, key in self.rpt_columns]
rows = [list(d.values()) for d in self.rpt_dict.values()]
df = pd.DataFrame(rows, columns=col_hdr)
if filepath.endswith('.csv'):
df.to_csv(filepath, index=False, header=True)
else:
df.to_excel(filepath, index=False, header=True)
except Exception as e:
self.logger.error("Error writing obslog: {}".format(e),
exc_info=True)
def load_obslog(self, filepath):
try:
import pandas as pd
except ImportError:
self.fv.show_error("Please install 'pandas' and "
"'openpyxl' to use this feature")
return
try:
self.logger.info("loading obslog: {}".format(filepath))
col_hdr = [colname for colname, key in self.rpt_columns]
if filepath.endswith('.csv'):
df = pd.read_csv(filepath, header=0, #names=col_hdr,
index_col=None)
else:
df = pd.read_excel(filepath, header=0, #names=col_hdr,
index_col=None)
self.rpt_dict = OrderedDict({})
res = df.to_dict('index')
for row in res.values():
frameid = row['FrameID']
d = OrderedDict([(kwd, row.get(col, ''))
for col, kwd in self.rpt_columns])
self.rpt_dict[frameid] = d
self.w.rpt_tbl.set_tree(self.rpt_dict)
except Exception as e:
self.logger.error("Error loading obslog: {}".format(e),
exc_info=True)
def write_obslog_cb(self, w):
obslog_path = os.path.join(self.w.obslog_dir.get_text().strip(),
self.w.obslog_name.get_text().strip())
self.write_obslog(obslog_path)
def load_obslog_cb(self, w):
obslog_path = os.path.join(self.w.obslog_dir.get_text().strip(),
self.w.obslog_name.get_text().strip())
self.load_obslog(obslog_path)
def get_selected(self):
res_dict = self.w.rpt_tbl.get_selected()
return res_dict
def dblclick_cb(self, widget, d):
"""Switch to the image that was double-clicked in the obslog"""
frameid = list(d.keys())[0]
info = d[frameid]
self.view_image(frameid, info)
def view_image(self, imname, info):
chname = self.chname
channel = self.fv.get_current_channel()
if channel.name != chname:
channel = self.fv.get_channel(chname)
self.fv.change_channel(chname)
channel.switch_name(imname)
def select_cb(self, widget, d):
res = self.get_selected()
if len(res) == 0:
self.w.add_memo.set_enabled(False)
else:
self.w.add_memo.set_enabled(True)
def add_memo_cb(self, widget):
memo_txt = self.w.memo.get_text().strip()
res = self.get_selected()
if len(res) == 0:
self.fv.show_error("No frames selected for memo!")
return
for key in res.keys():
self.rpt_dict[key]['G_MEMO'] = memo_txt
self.w.rpt_tbl.set_tree(self.rpt_dict)
def set_obslog_format_cb(self, w, idx):
ext = w.get_text()
obslog_name = self.w.obslog_name.get_text().strip()
name, old_ext = os.path.splitext(obslog_name)
self.w.obslog_name.set_text(name + '.' + ext)
self.write_obslog_cb(None)
def close(self):
self.fv.stop_global_plugin(str(self))
return True
def __str__(self):
return 'obslog'
| 2.25 | 2 |
retrieval_gloss.py | iacercalixto/visualsem | 37 | 12799242 | import argparse
import torch
import sys
import os
import json
from collections import defaultdict
import h5py
from sentence_transformers import SentenceTransformer, util
import numpy
import tqdm
from itertools import zip_longest
from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids
def retrieve_nodes_given_sentences(out_fname, batch_size, all_input_sentences, glosses_bnids, glosses_feats, topk):
"""
out_fname(str): Output file to write retrieved node ids to.
batch_size(int): Batch size for Sentence BERT.
all_input_sentences(list[str]): All input sentences loaded from `input_file`.
glosses_bnids(list[str]): All gloss BNids loaded from `args.glosses_bnids`. Aligned with `glosses_feats`.
glosses_feats(numpy.array): Numpy array with VisualSem gloss features computed with Sentence BERT.
topk(int): Number of nodes to retrieve for each input sentence.
"""
if os.path.isfile(out_fname):
raise Exception("File already exists: '%s'. Please remove it manually to avoid tampering."%out_fname)
n_examples = len(all_input_sentences)
print("Number of input examples to extract BNIDs for: ", n_examples)
model_name = "paraphrase-multilingual-mpnet-base-v2"
model = SentenceTransformer(model_name)
with open(out_fname, 'w', encoding='utf8') as fh_out:
ranks_predicted = []
for idxs_ in grouper(batch_size, range(n_examples)):
idxs = []
queries = []
for i in idxs_:
if not i is None:
idxs.append(i)
queries.append( all_input_sentences[i] )
queries_embs = model.encode(queries, convert_to_tensor=True)
if torch.cuda.is_available():
queries_embs = queries_embs.cuda()
scores = util.pytorch_cos_sim(queries_embs, glosses_feats)
scores = scores.cpu().numpy()
ranks = numpy.argsort(scores) # sort scores by cosine similarity (low to high)
ranks = ranks[:,::-1] # sort by cosine similarity (high to low)
for rank_idx in range(len(idxs[:ranks.shape[0]])):
bnids_predicted = []
for rank_predicted in range(topk*10):
bnid_pred = glosses_bnids[ ranks[rank_idx,rank_predicted] ]
bnid_pred_score = scores[rank_idx, ranks[rank_idx, rank_predicted]]
if not bnid_pred in bnids_predicted:
bnids_predicted.append((bnid_pred,bnid_pred_score))
if len(bnids_predicted)>=topk:
break
# write top-k predicted BNids
for iii, (bnid, score) in enumerate(bnids_predicted[:topk]):
fh_out.write(bnid+"\t"+"%.4f"%score)
if iii < topk-1:
fh_out.write("\t")
else: # iii == topk-1
fh_out.write("\n")
def encode_query(out_fname, batch_size, all_sentences):
"""
out_fname(str): Output file to write SBERT features for query.
batch_size(int): Batch size for Sentence BERT.
all_sentences(list[str]): Sentences to be used for retrieval.
"""
n_lines = len(all_sentences)
model_name = "paraphrase-multilingual-mpnet-base-v2"
model = SentenceTransformer(model_name)
shape_features = (n_lines, 768)
with h5py.File(out_fname, 'w') as fh_out:
fh_out.create_dataset("features", shape_features, dtype='float32', chunks=(1,768), maxshape=(None, 768), compression="gzip")
for from_idx in tqdm.trange(0,n_lines,batch_size):
to_idx = from_idx+batch_size if from_idx+batch_size <= n_lines else n_lines
batch_sentences = all_sentences[ from_idx: to_idx ]
emb_sentences = model.encode(batch_sentences, convert_to_tensor=True)
#test_queries(emb_sentences, all_sentences, model)
fh_out["features"][from_idx:to_idx] = emb_sentences.cpu().numpy()
if __name__=="__main__":
visualsem_path = os.path.dirname(os.path.realpath(__file__))
visualsem_nodes_path = "%s/dataset/nodes.v2.json"%visualsem_path
visualsem_images_path = "%s/dataset/images/"%visualsem_path
glosses_sentence_bert_path = "%s/dataset/gloss_files/glosses.en.txt.sentencebert.h5"%visualsem_path
glosses_bnids_path = "%s/dataset/gloss_files/glosses.en.txt.bnids"%visualsem_path
os.makedirs("%s/dataset/gloss_files/"%visualsem_path, exist_ok=True)
p = argparse.ArgumentParser()
p.add_argument('--input_files', type=str, nargs="+", default=["example_data/queries.txt"],
help="""Input file(s) to use for retrieval. Each line in each file should contain a detokenized sentence.""")
p.add_argument('--topk', type=int, default=1, help="Retrieve topk nodes for each input sentence.")
p.add_argument('--batch_size', type=int, default=1000)
p.add_argument('--visualsem_path', type=str, default=visualsem_path,
help="Path to directory containing VisualSem knowledge graph.")
p.add_argument('--visualsem_nodes_path', type=str, default=visualsem_nodes_path,
help="Path to file containing VisualSem nodes.")
p.add_argument('--visualsem_images_path', type=str, default=visualsem_images_path,
help="Path to directory containing VisualSem images.")
p.add_argument('--glosses_sentence_bert_path', type=str, default=glosses_sentence_bert_path,
help="""HDF5 file containing glosses index computed with Sentence BERT (computed with `extract_glosses_visualsem.py`).""")
p.add_argument('--glosses_bnids_path', type=str, default=glosses_bnids_path,
help="""Text file containing glosses BabelNet ids, one per line (computed with `extract_glosses_visualsem.py`).""")
p.add_argument('--input_valid', action='store_true',
help="""Perform retrieval for the glosses in the validation set. (See paper for reference)""")
p.add_argument('--input_test', action='store_true',
help="""Perform retrieval for the glosses in the test set. (See paper for reference)""")
args = p.parse_args()
# load all nodes in VisualSem
all_bnids = load_visualsem_bnids(args.visualsem_nodes_path, args.visualsem_images_path)
gloss_bnids = load_bnids( args.glosses_bnids_path )
gloss_bnids = numpy.array(gloss_bnids, dtype='object')
with h5py.File(args.glosses_sentence_bert_path, 'r') as fh_glosses:
glosses_feats = fh_glosses["features"][:]
glosses_feats = torch.tensor(glosses_feats)
if torch.cuda.is_available():
glosses_feats = glosses_feats.cuda()
# load train/valid/test gloss splits
glosses_splits = fh_glosses["split_idxs"][:]
train_idxs = (glosses_splits==0).nonzero()[0]
valid_idxs = (glosses_splits==1).nonzero()[0]
test_idxs = (glosses_splits==2).nonzero()[0]
# load gloss language splits
language_splits = fh_glosses["language_idxs"][:]
for input_file in args.input_files:
print("Processing input file: %s ..."%input_file)
sbert_out_fname = input_file+".sentencebert.h5"
if os.path.isfile( sbert_out_fname ):
raise Exception("File already exists: '%s'. Please remove it manually to avoid tampering."%sbert_out_fname)
input_sentences = load_sentences( input_file )
encode_query(sbert_out_fname, args.batch_size, input_sentences)
out_fname = input_file+".bnids"
retrieve_nodes_given_sentences(out_fname, args.batch_size, input_sentences, gloss_bnids, glosses_feats, args.topk)
# remove temporary SBERT index created for input file(s)
os.remove( sbert_out_fname )
print("Retrieved glosses: %s"%out_fname)
| 2.1875 | 2 |
hcat/detect.py | buswinka/hcat | 4 | 12799243 | import hcat.lib.functional
import hcat.lib.functional as functional
from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter
from hcat.lib.cell import Cell
from hcat.lib.cochlea import Cochlea
from hcat.backends.detection import FasterRCNN_from_url
from hcat.backends.detection import HairCellFasterRCNN
from hcat.lib.utils import warn
import torch
from torch import Tensor
from tqdm import tqdm
from itertools import product
import numpy as np
from hcat.lib.explore_lif import get_xml
import torchvision.ops
import skimage.io as io
import os.path
from typing import Optional, List, Dict
# DOCUMENTED
def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.86, dtype=None,
nms_threshold: float = 0.2, save_xml=False, save_fig=False, pixel_size=None, cell_diameter=None):
"""
2D hair cell detection algorithm.
Loads arbitrarily large 2d image and performs iterative faster rcnn detection on the entire image.
:param *str* f: path to image by which to analyze
:param *float* cell_detection_threshold: cells below threshold are rejected
:param *float* nms_threshold: iou rejection threshold for nms.
:return: *Cochlea* object containing data of analysis.
"""
print('Initializing hair cell detection algorithm...')
if f is None:
warn('ERROR: No File to Analyze... \nAborting.', color='red')
return None
if not pixel_size:
warn('WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. '
'Consider suplying value for optimal performance.', color='yellow')
with torch.no_grad():
# Load and preprocess Image
image_base = load(f, 'TileScan 1 Merged', verbose=True) # from hcat.lib.utils
image_base = image_base[[2, 3],...].max(-1) if image_base.ndim == 4 else image_base
shape = list(image_base.shape)
shape[0] = 1
dtype = image_base.dtype if dtype is None else dtype
scale: int = hcat.lib.utils.get_dtype_offset(dtype)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
temp = np.zeros(shape)
temp = np.concatenate((temp, image_base)) / scale * 255
c, x, y = image_base.shape
print(
f'DONE: shape: {image_base.shape}, min: {image_base.min()}, max: {image_base.max()}, dtype: {image_base.dtype}')
if image_base.max() < scale * 0.33:
warn(f'WARNING: Image max value less than 1/3 the scale factor for bit depth. Image Max: {image_base.max()},'
f' Scale Factor: {scale}, dtype: {dtype}. Readjusting scale to 1.5 time Image max.', color='yellow')
scale = image_base.max() * 1.5
image_base = torch.from_numpy(image_base.astype(np.uint16) / scale).to(device)
if pixel_size is not None:
image_base: Tensor = correct_pixel_size(image_base, pixel_size) #model expects pixel size of 288.88
print(f'Rescaled Image to match pixel size of 288.88nm with a new shape of: {image_base.shape}')
elif cell_diameter is not None:
image_base: Tensor = scale_to_hair_cell_diameter(image_base, cell_diameter)
print(f'Rescaled Image to match pixel size of 288.88nm with a new shape of: {image_base.shape}')
# normalize around zero
image_base.sub_(0.5).div_(0.5)
if device == 'cuda':
warn('CUDA: GPU successfully initialized!', color='green')
else:
warn('WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. '
'Analysis may be slow.', color='yellow')
# Initalize the model...
model = FasterRCNN_from_url(url='https://github.com/buswinka/hcat/blob/master/modelfiles/detection.trch?raw=true', device=device)
model.eval()
# Initalize curvature detection
predict_curvature = hcat.lib.functional.PredictCurvature(erode=3)
# Get the indicies for evaluating cropped regions
c, x, y = image_base.shape
image_base = torch.cat((torch.zeros((1, x, y), device=device), image_base), dim=0)
x_ind: List[List[int]] = calculate_indexes(10, 235, x, x) # [[0, 255], [30, 285], ...]
y_ind: List[List[int]] = calculate_indexes(10, 235, y, y) # [[0, 255], [30, 285], ...]
total: int = len(x_ind) * len(y_ind)
# Initalize other small things
cell_id = 1
cells = []
add_cell = cells.append # stupid but done for speed
for x, y in tqdm(product(x_ind, y_ind), total=total, desc='Detecting: '):
# Load and prepare image crop for ML model evaluation
image: Tensor = image_base[:, x[0]:x[1], y[0]:y[1]].unsqueeze(0)
# If the image has nothing in it we can skip for speed
if image.max() == -1:
continue
# Evaluate Deep Learning Model
out: Dict[str, Tensor] = model(image.float())[0]
scores: Tensor = out['scores'].cpu()
boxes: Tensor = out['boxes'].cpu()
labels: Tensor = out['labels'].cpu()
# The model output coords with respect to the crop of image_base. We have to adjust
# idk why the y and x are flipped. Breaks otherwise.
boxes[:, [0, 2]] += y[0]
boxes[:, [1, 3]] += x[0]
# center x, center y, width, height
centers: Tensor = torchvision.ops.box_convert(boxes, 'xyxy', 'cxcywh').cpu()
cx = centers[:, 0]
cy = centers[:, 1]
for i, score in enumerate(scores):
if score > cell_detection_threshold:
add_cell(Cell(id=cell_id,
loc=torch.tensor([0, cx[i], cy[i], 0]),
image=None,
mask=None,
cell_type='OHC' if labels[i] == 1 else 'IHC',
boxes=boxes[i, :],
scores=scores[i]))
cell_id += 1
# some cells may overlap. We remove cells after analysis is complete.
cells: List[Cell] = _cell_nms(cells, nms_threshold)
ohc = sum([int(c.type == 'OHC') for c in cells]) # number of ohc
ihc = sum([int(c.type == 'IHC') for c in cells]) # number of ihc
print(f'Total Cells: {len(cells)}\n OHC: {ohc}\n IHC: {ihc}' )
max_projection: Tensor = image_base[[1], ...].mul(0.5).add(0.5).unsqueeze(-1).cpu()
curvature, distance, apex = predict_curvature(max_projection, cells, curve_path)
if curvature is None:
warn('WARNING: All three methods to predict hair cell path have failed. Frequency Mapping functionality is '
'limited. Consider Manual Calculation.', color='yellow')
# curvature estimation really only works if there is a lot of tissue...
if distance is not None and distance.max() > 4000:
for c in cells: c.calculate_frequency(curvature[[0, 1], :], distance) # calculate cell's best frequency
cells = [c for c in cells if not c._distance_is_far_away] # remove a cell if its far away from curve
else:
curvature, distance, apex = None, None, None
warn('WARNING: Predicted Cochlear Distance is below 4000um. Not sufficient '
'information to determine cell frequency.', color='yellow')
xml = get_xml(f) if f.endswith('.lif') else None
filename = os.path.split(f)[-1]
# remove weird cell ID's
for i, c in enumerate(cells): c.id = i+1
# Store in compressible object for further use
c = Cochlea(mask=None,
filename=filename,
path=f,
analysis_type='detect',
leica_metadata=xml,
im_shape=image_base.shape,
cochlear_distance=distance,
curvature=curvature,
cells=cells,
apex=apex)
c.write_csv()
if save_xml: cochlea_to_xml(c)
if save_fig: c.make_detect_fig(image_base)
print('')
return c
def _cell_nms(cells: List[Cell], nms_threshold: float) -> List[Cell]:
"""
Perforns non maximum supression on the resulting cell predictions
:param cells: Iterable of cells
:param nms_threshold: cell iou threshold
:return: Iterable of cells
"""
# nms to get rid of cells
boxes = torch.zeros((len(cells), 4))
scores = torch.zeros(len(cells))
for i, c in enumerate(cells):
boxes[i, :] = c.boxes
scores[i] = c.scores
ind = torchvision.ops.nms(boxes, scores, nms_threshold)
# need to pop off list elements from an int64 tensor
ind_bool = torch.zeros(len(cells))
ind_bool[ind] = 1
for i, val in enumerate(ind_bool):
if val == 0:
cells[i] = None
return [c for c in cells if c]
| 2.3125 | 2 |
UServer/http_api_oauth/api/api_gateway.py | soybean217/lora-python | 0 | 12799244 | import json
from . import api, root
from .decorators import gateway_belong_to_user, require_basic_or_oauth
from userver.object.gateway import Gateway, Location
from utils.errors import KeyDuplicateError, PatchError
from .forms.form_gateway import AddGatewayForm, PatchGateway
from flask import request, Response
from .forms import get_formdata_from_json_or_form
@api.route(root + 'gateways', methods=['GET', 'POST'])
@require_basic_or_oauth
def gateways(user):
if request.method == 'GET':
gateways_list = []
gateways = Gateway.query.filter_by(user_id=user.id)
for gateway in gateways:
dict = gateway.obj_to_dict()
gateways_list.append(dict)
data = json.dumps(gateways_list)
return Response(status=200, response=data)
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
add_gateway = AddGatewayForm(formdata)
if add_gateway.validate():
try:
gateway = import_gateway(user, add_gateway)
gateway.save()
new_gateway = Gateway.query.get(gateway.id)
return Response(status=201, response=json.dumps(new_gateway.obj_to_dict()))
except KeyDuplicateError as error:
errors = {'mac_addr': str(error)}
return Response(status=406, response=json.dumps({"errors": errors}))
except AssertionError as error:
return Response(status=406, response=json.dumps({"errors": {"other": str(error)}}))
else:
errors = {}
for key, value in add_gateway.errors.items():
errors[key] = value[0]
return Response(status=406, response=json.dumps({"errors": errors}))
@api.route(root + 'gateways/<gateway_id>/pull_info', methods=['GET'])
@require_basic_or_oauth
@gateway_belong_to_user
def gateway_pull_info(user, gateway):
"""
:param user:
:param gateway:
:return:
"""
gateway.get_pull_info()
@api.route(root + 'gateways/<gateway_id>', methods=['GET', 'DELETE', 'PATCH', 'POST'])
@require_basic_or_oauth
@gateway_belong_to_user
def gateway(user, gateway):
if request.method == 'GET':
return Response(status=200, response=json.dumps(gateway.obj_to_dict()))
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGateway.patch(gateway, formdata)
return json.dumps(gateway.obj_to_dict()), 200
except (AssertionError, PatchError, ValueError) as error:
return json.dumps({'errors': str(error)}), 406
elif request.method == 'DELETE':
gateway.delete()
return json.dumps({'success': True}), 200
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
if formdata and formdata.get('cmd') is not None:
if formdata['cmd'] == 'restart':
gateway.send_restart_request()
return '', 204
else:
return 'Unknown cmd %s ' % formdata['cmd'], 406
else:
return '', 406
def import_gateway(user, add_gateway):
mac_addr = add_gateway['mac_addr'].data
name = add_gateway['name'].data
platform = add_gateway['platform'].data
freq_plan = add_gateway['freq_plan'].data
model = add_gateway['model'].data
location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
return Gateway(user.id, mac_addr, name, platform, model, freq_plan=freq_plan, location=location) | 2.390625 | 2 |
station/websockets/mock/mock_current_step.py | GLO3013-E4/COViRondelle2021 | 0 | 12799245 | <gh_stars>0
import random
from enum import Enum
import rospy
from std_msgs.msg import String
class Step(Enum):
CycleNotStarted = 'CycleNotStarted'
CycleReadyInWaitingMode = 'CycleReadyInWaitingMode'
CycleStarted = 'CycleStarted'
ToResistanceStation = 'ToResistanceStation'
ReadResistance = 'ReadResistance'
ToControlPanel = 'ToControlPanel'
ReadControlPanel = 'ReadControlPanel'
ToFirstPuckAndGrabFirstPuck = 'ToFirstPuckAndGrabFirstPuck'
ToFirstCornerAndReleaseFirstPuck = 'ToFirstCornerAndReleaseFirstPuck'
ToSecondPuckAndGrabSecondPuck = 'ToSecondPuckAndGrabSecondPuck'
ToSecondCornerAndReleaseSecondPuck = 'ToSecondCornerAndReleaseSecondPuck'
ToThirdPuckAndGrabThirdPuck = 'ToThirdPuckAndGrabThirdPuck'
ToThirdCornerAndReleaseThirdPuck = 'ToThirdCornerAndReleaseThirdPuck'
ToSquareCenter = 'ToSquareCenter'
CycleEndedAndRedLedOn = 'CycleEndedAndRedLedOn'
def create_current_step():
return random.choice(list(Step)).name
def mock_current_step(pub, step=create_current_step()):
rospy.loginfo('Mocking current_step: {}'.format(step))
pub.publish(step)
if __name__ == '__main__':
rospy.init_node('mock_current_step', anonymous=True)
current_step_publisher = rospy.Publisher('current_step', String, queue_size=10)
mock_current_step(current_step_publisher)
| 2.46875 | 2 |
river/linear_model/__init__.py | mathco-wf/river | 4 | 12799246 | """Linear models."""
from .alma import ALMAClassifier
from .glm import LinearRegression, LogisticRegression, Perceptron
from .pa import PAClassifier, PARegressor
from .softmax import SoftmaxRegression
__all__ = [
"ALMAClassifier",
"LinearRegression",
"LogisticRegression",
"PAClassifier",
"PARegressor",
"Perceptron",
"SoftmaxRegression",
]
| 1.390625 | 1 |
Chapter 02/__manifest__.py | hitosony/odoo | 84 | 12799247 | <reponame>hitosony/odoo
{'name': '<NAME>',
'data': [
'security/ir.model.access.csv',
'security/todo_access_rules.xml',
'views/todo_menu.xml',
'views/todo_view.xml'],
'application': True}
| 0.839844 | 1 |
GUI.py | SlavPetkovic/Python-Crud-Application | 0 | 12799248 | <filename>GUI.py
# Import dependencies
from tkinter import *
from tkinter import ttk
import mysql.connector
import sqlalchemy
import json
import datetime as dt
import getpass
import mysql
with open("parameters/config.json") as config:
param = json.load(config)
# Establishing engine
engine = sqlalchemy.create_engine('mysql+mysqlconnector://{0}:{1}@{2}/{3}'.
format(param['Teletron'][0]['user'],
param['Teletron'][0]['password'],
param['Teletron'][0]['host'],
param['Teletron'][0]['database']), echo=False)
# Defining entry form
def entry():
# getting form data
temperature = Temperature.get()
pressure = Pressure.get()
recorddate = (dt.date.today()).strftime('%Y-%m-%d')
CreatedBy = getpass.getuser()
# applying empty validation
if temperature == '' or pressure =='' or recorddate == '' or CreatedBy == '': message.set("fill the empty field!!!")
else:
# Create connection object to Epi
Epi_con = engine.connect()
# Preparing SQL query to INSERT a record into the database.
sql = """INSERT INTO crudexample (RecordDate, Temperature, Pressure,CreatedBy)
VALUES (%s, %s, %s, %s) """
data = (recorddate, temperature, pressure , CreatedBy)
try:
# executing the sql command
Epi_con.execute(sql, data)
# commit changes in database
Epi_con.commit()
except:
message.set("Data Stored successfully")
#def read():
#def update():
#def delete():
#def dbsetup():
# defining Registration form function
def Entryform():
global entry_screen
entry_screen = Tk()
# Setting title of screen
entry_screen.title("Data Entry Form")
# setting height and width of screen
entry_screen.geometry("400x270")
# declaring variable
global message
global RecordDate
global Temperature
global Pressure
global CreatedBy
Temperature = IntVar()
Pressure = IntVar()
RecordDate = StringVar()
CreatedBy = StringVar()
message = StringVar()
# Creating layout of Data Entry Form
Label(entry_screen, width="300", text="Please enter details below", bg="blue", fg="white").pack()
# Temperature Label
Label(entry_screen, text= "Temperature * ").place(x=20, y=80)
# Temperature textbox
Entry(entry_screen, textvariable= Temperature).place(x=140, y=82)
# Pressure Label
Label(entry_screen, text = "Pressure * ").place(x=20, y=120)
# Pressure textbox
Entry(entry_screen, textvariable = Pressure).place(x=140, y=122)
# Label for displaying entry status[success/failed]
Label(entry_screen, text = "", textvariable=message).place(x=95, y=240)
# Submit Button
Button(entry_screen, text="Submit", width=10, height=1, bg="gray", command=entry).place(x=105, y=210)
Button(entry_screen, text="Update", width=10, height=1, bg="gray", command=entry).place(x=205, y=210)
Button(entry_screen, text="Delete", width=10, height=1, bg="gray", command=entry).place(x=305, y=210)
entry_screen.mainloop()
# calling function entry form
Entryform() | 3.015625 | 3 |
products/admin.py | zerobug110/Syfters_project | 0 | 12799249 | from django.contrib import admin
from products.views import portfolio
# Register your models here.
from . models import Product, New, About, LatestNew
class ProductAdmin(admin.ModelAdmin):
list_display = ('name','price','created_at')
list_links = ('id', 'name')
list_filter = ('name','price','created_at')
search_fields = ('name','price')
ordering =('name','price','created_at')
class NewAdmin(admin.ModelAdmin):
list_display=('title','time')
list_filter=('title','time')
search_fields = ('title','time')
admin.site.register(Product, ProductAdmin)
admin.site.register(New, NewAdmin)
admin.site.register(LatestNew)
admin.site.register(About) | 1.820313 | 2 |
program/predictor/predictor_bilstm_crf.py | windsuzu/AICUP-Deidentification-of-Medical-Data | 1 | 12799250 | <filename>program/predictor/predictor_bilstm_crf.py<gh_stars>1-10
from program.models.model_bilstm_crf import BilstmCrfModel
from program.data_process.data_preprocessor import GeneralDataPreprocessor
import pandas as pd
import tensorflow as tf
import tensorflow_addons as tf_ad
from program.utils.tokenization import read_vocab
from dataclasses import dataclass
from program.utils.write_output_file import format_result
from program.abstracts.abstract_ner_predictor import NerPredictor
@dataclass
class BilstmCrfPredictor(NerPredictor):
def __post_init__(self):
vocab_file_path = self.model_data_path + "vocab_file.txt"
tag_file_path = self.model_data_path + "tag.txt"
self.voc2id, self.id2voc = read_vocab(vocab_file_path)
self.tag2id, self.id2tag = read_vocab(tag_file_path)
test_X_path = self.model_data_path + "test_X.pkl"
test_mapping_path = self.model_data_path + "test_mapping.pkl"
self.test_X, self.test_mapping = GeneralDataPreprocessor.loadTestArrays(
test_X_path, test_mapping_path
)
self.model = BilstmCrfModel(
hidden_num=self.hidden_nums,
vocab_size=len(self.voc2id),
label_size=len(self.tag2id),
embedding_size=self.embedding_size,
)
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate)
def predict_sentence(self, sentence):
"""
predict single sentence.
Input:
Raw text string
"""
# dataset = encode sentence
# = [[1445 33 1878 826 1949 1510 112]]
dataset = tf.keras.preprocessing.sequence.pad_sequences(
[[self.voc2id.get(char, 0) for char in sentence]], padding="post"
)
# logits = (1, 7, 28) = (sentence, words, predict_distrib)
# text_lens = [7]
logits, text_lens = self.model.predict(dataset)
paths = []
for logit, text_len in zip(logits, text_lens):
viterbi_path, _ = tf_ad.text.viterbi_decode(
logit[:text_len], self.model.transition_params
)
paths.append(viterbi_path)
# path[0] = tag in sentence
# = [18, 19, 19, 1, 26, 27, 1]
# result = ['B-name', 'I-name', 'I-name', 'O', 'B-time', 'I-time', 'O']
result = [self.id2tag[id] for id in paths[0]]
# entities_result =
# [{'begin': 0, 'end': 3, 'words': '賈伯斯', 'type': 'name'},
# {'begin': 4, 'end': 6, 'words': '七號', 'type': 'time'}]
entities_result = format_result(list(sentence), result)
return entities_result
def predict(self):
# restore model
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
ckpt.restore(tf.train.latest_checkpoint(self.checkpoint_path))
article_id = 0
counter = 0
results = []
result = []
for testset in self.test_X:
prediction = self.predict_sentence(testset)
# predict_pos + counter
if prediction:
for pred in prediction:
pred["begin"] += counter
pred["end"] += counter
result.append(pred)
counter += len(testset)
if counter == self.test_mapping[article_id]:
results.append(result)
article_id += 1
counter = 0
result = []
self.results = results
def output(self):
"""
results:
[
[
{'begin': 170, 'end': 174, 'words': '1100', 'type': 'med_exam'},
{'begin': 245, 'end': 249, 'words': '1145', 'type': 'med_exam'},
...
]
]
"""
titles = {
"end": "end_position",
"begin": "start_position",
"words": "entity_text",
"type": "entity_type",
}
df = pd.DataFrame()
for i, result in enumerate(self.results):
results = pd.DataFrame(result).rename(columns=titles)
results = results[
["start_position", "end_position", "entity_text", "entity_type"]
]
article_ids = pd.Series([i] * len(result), name="article_id")
df = df.append(pd.concat([article_ids, results], axis=1), ignore_index=True)
df.to_csv(self.output_path + "output.tsv", sep="\t", index=False)
| 2.59375 | 3 |
Subsets and Splits